From deafc769b6d0d38c8d152b4ac0b3e5e9f6079ff0 Mon Sep 17 00:00:00 2001 From: bobz965 Date: Fri, 3 Jan 2025 13:51:49 +0800 Subject: [PATCH] remove e2e Signed-off-by: bobz965 --- .golangci.yml | 2 - go.mod | 85 +- go.sum | 219 +-- test/e2e/framework/cni.go | 75 - test/e2e/framework/daemonset.go | 146 -- test/e2e/framework/deployment.go | 232 --- test/e2e/framework/docker/container.go | 87 - test/e2e/framework/docker/exec.go | 71 - test/e2e/framework/docker/network.go | 150 -- test/e2e/framework/endpoints.go | 170 -- test/e2e/framework/event.go | 65 - test/e2e/framework/exec_utils.go | 38 - test/e2e/framework/expect.go | 223 --- test/e2e/framework/framework.go | 252 --- test/e2e/framework/image.go | 6 - test/e2e/framework/ip.go | 149 -- test/e2e/framework/ippool.go | 273 --- test/e2e/framework/iproute/iproute.go | 178 -- test/e2e/framework/iptables-dnat.go | 177 -- test/e2e/framework/iptables-eip.go | 204 --- test/e2e/framework/iptables-fip.go | 171 -- test/e2e/framework/iptables-snat.go | 171 -- test/e2e/framework/iptables/iptables.go | 53 - test/e2e/framework/kind/kind.go | 179 -- test/e2e/framework/kube-ovn.go | 17 - test/e2e/framework/kubectl.go | 38 - test/e2e/framework/log.go | 101 -- test/e2e/framework/namespace.go | 108 -- .../network-attachment-definition.go | 67 - test/e2e/framework/network-policy.go | 82 - test/e2e/framework/ovn-dnat.go | 179 -- test/e2e/framework/ovn-eip.go | 176 -- test/e2e/framework/ovn-fip.go | 176 -- test/e2e/framework/ovn-snat.go | 176 -- test/e2e/framework/pod.go | 144 -- test/e2e/framework/provider-network.go | 219 --- test/e2e/framework/qos-policy.go | 305 ---- test/e2e/framework/security-group.go | 147 -- test/e2e/framework/service.go | 177 -- test/e2e/framework/statefulset.go | 180 -- test/e2e/framework/subnet.go | 288 --- test/e2e/framework/switch-lb-rule.go | 179 -- test/e2e/framework/util.go | 238 --- test/e2e/framework/vip.go | 149 -- test/e2e/framework/virtual-machine.go | 259 --- test/e2e/framework/vlan.go | 96 - test/e2e/framework/vpc-nat-gw.go | 224 --- test/e2e/framework/vpc.go | 180 -- test/e2e/framework/wait.go | 76 - test/e2e/ha/ha_test.go | 103 -- test/e2e/iptables-vpc-nat-gw/e2e_test.go | 1406 --------------- test/e2e/k8s-network/e2e_test.go | 30 - test/e2e/kube-ovn/e2e_test.go | 39 - test/e2e/kube-ovn/ipam/ipam.go | 512 ------ test/e2e/kube-ovn/kubectl-ko/kubectl-ko.go | 253 --- .../kube-ovn/network-policy/network-policy.go | 185 -- test/e2e/kube-ovn/node/node.go | 294 --- test/e2e/kube-ovn/pod/pod_recreation.go | 109 -- test/e2e/kube-ovn/pod/pod_routes.go | 172 -- test/e2e/kube-ovn/pod/statefulset.go | 67 - test/e2e/kube-ovn/pod/vpc_pod_probe.go | 224 --- test/e2e/kube-ovn/qos/qos.go | 199 -- test/e2e/kube-ovn/service/service.go | 205 --- test/e2e/kube-ovn/subnet/subnet.go | 1594 ----------------- .../kube-ovn/switch_lb_rule/switch_lb_rule.go | 356 ---- test/e2e/kube-ovn/underlay/underlay.go | 1000 ----------- test/e2e/kubevirt/e2e_test.go | 335 ---- test/e2e/lb-svc/e2e_test.go | 361 ---- test/e2e/multus/e2e_test.go | 434 ----- test/e2e/ovn-ic/e2e_test.go | 312 ---- test/e2e/ovn-vpc-nat-gw/e2e_test.go | 1041 ----------- test/e2e/security/e2e_test.go | 158 -- test/e2e/vip/e2e_test.go | 397 ---- test/e2e/webhook/e2e_test.go | 32 - test/e2e/webhook/pod/pod.go | 117 -- test/e2e/webhook/subnet/subnet.go | 93 - test/e2e/webhook/vip/vip.go | 86 - 77 files changed, 4 insertions(+), 17467 deletions(-) delete mode 100644 test/e2e/framework/cni.go delete mode 100644 test/e2e/framework/daemonset.go delete mode 100644 test/e2e/framework/deployment.go delete mode 100644 test/e2e/framework/docker/container.go delete mode 100644 test/e2e/framework/docker/exec.go delete mode 100644 test/e2e/framework/docker/network.go delete mode 100644 test/e2e/framework/endpoints.go delete mode 100644 test/e2e/framework/event.go delete mode 100644 test/e2e/framework/exec_utils.go delete mode 100644 test/e2e/framework/expect.go delete mode 100644 test/e2e/framework/framework.go delete mode 100644 test/e2e/framework/image.go delete mode 100644 test/e2e/framework/ip.go delete mode 100644 test/e2e/framework/ippool.go delete mode 100644 test/e2e/framework/iproute/iproute.go delete mode 100644 test/e2e/framework/iptables-dnat.go delete mode 100644 test/e2e/framework/iptables-eip.go delete mode 100644 test/e2e/framework/iptables-fip.go delete mode 100644 test/e2e/framework/iptables-snat.go delete mode 100644 test/e2e/framework/iptables/iptables.go delete mode 100644 test/e2e/framework/kind/kind.go delete mode 100644 test/e2e/framework/kube-ovn.go delete mode 100644 test/e2e/framework/kubectl.go delete mode 100644 test/e2e/framework/log.go delete mode 100644 test/e2e/framework/namespace.go delete mode 100644 test/e2e/framework/network-attachment-definition.go delete mode 100644 test/e2e/framework/network-policy.go delete mode 100644 test/e2e/framework/ovn-dnat.go delete mode 100644 test/e2e/framework/ovn-eip.go delete mode 100644 test/e2e/framework/ovn-fip.go delete mode 100644 test/e2e/framework/ovn-snat.go delete mode 100644 test/e2e/framework/pod.go delete mode 100644 test/e2e/framework/provider-network.go delete mode 100644 test/e2e/framework/qos-policy.go delete mode 100644 test/e2e/framework/security-group.go delete mode 100644 test/e2e/framework/service.go delete mode 100644 test/e2e/framework/statefulset.go delete mode 100644 test/e2e/framework/subnet.go delete mode 100644 test/e2e/framework/switch-lb-rule.go delete mode 100644 test/e2e/framework/util.go delete mode 100644 test/e2e/framework/vip.go delete mode 100644 test/e2e/framework/virtual-machine.go delete mode 100644 test/e2e/framework/vlan.go delete mode 100644 test/e2e/framework/vpc-nat-gw.go delete mode 100644 test/e2e/framework/vpc.go delete mode 100644 test/e2e/framework/wait.go delete mode 100644 test/e2e/ha/ha_test.go delete mode 100644 test/e2e/iptables-vpc-nat-gw/e2e_test.go delete mode 100644 test/e2e/k8s-network/e2e_test.go delete mode 100644 test/e2e/kube-ovn/e2e_test.go delete mode 100644 test/e2e/kube-ovn/ipam/ipam.go delete mode 100644 test/e2e/kube-ovn/kubectl-ko/kubectl-ko.go delete mode 100644 test/e2e/kube-ovn/network-policy/network-policy.go delete mode 100644 test/e2e/kube-ovn/node/node.go delete mode 100644 test/e2e/kube-ovn/pod/pod_recreation.go delete mode 100644 test/e2e/kube-ovn/pod/pod_routes.go delete mode 100644 test/e2e/kube-ovn/pod/statefulset.go delete mode 100644 test/e2e/kube-ovn/pod/vpc_pod_probe.go delete mode 100644 test/e2e/kube-ovn/qos/qos.go delete mode 100644 test/e2e/kube-ovn/service/service.go delete mode 100644 test/e2e/kube-ovn/subnet/subnet.go delete mode 100644 test/e2e/kube-ovn/switch_lb_rule/switch_lb_rule.go delete mode 100644 test/e2e/kube-ovn/underlay/underlay.go delete mode 100644 test/e2e/kubevirt/e2e_test.go delete mode 100644 test/e2e/lb-svc/e2e_test.go delete mode 100644 test/e2e/multus/e2e_test.go delete mode 100644 test/e2e/ovn-ic/e2e_test.go delete mode 100644 test/e2e/ovn-vpc-nat-gw/e2e_test.go delete mode 100644 test/e2e/security/e2e_test.go delete mode 100644 test/e2e/vip/e2e_test.go delete mode 100644 test/e2e/webhook/e2e_test.go delete mode 100644 test/e2e/webhook/pod/pod.go delete mode 100644 test/e2e/webhook/subnet/subnet.go delete mode 100644 test/e2e/webhook/vip/vip.go diff --git a/.golangci.yml b/.golangci.yml index 4e4ddf5afdc..fef8bd6bff4 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -17,9 +17,7 @@ linters: - errcheck - gosec - govet - - perfsprint - usestdlibvars - - loggercheck - whitespace - errorlint diff --git a/go.mod b/go.mod index daba59d8f95..c7de1ddc0ff 100644 --- a/go.mod +++ b/go.mod @@ -46,61 +46,32 @@ require ( k8s.io/apimachinery v0.31.1 k8s.io/client-go v1.5.2 k8s.io/klog/v2 v2.130.1 - k8s.io/kubectl v0.30.3 k8s.io/kubernetes v1.30.3 - k8s.io/pod-security-admission v0.30.3 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 - kubevirt.io/api v1.3.0 kubevirt.io/client-go v1.3.0 sigs.k8s.io/controller-runtime v0.18.5 sigs.k8s.io/network-policy-api v0.1.5 ) require ( - cloud.google.com/go/auth v0.7.3 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect - github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect - github.com/GoogleCloudPlatform/k8s-cloud-provider v1.20.0 // indirect - github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab // indirect - github.com/MakeNowJust/heredoc v1.0.0 // indirect - github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect - github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 // indirect - github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/buger/jsonparser v1.1.1 // indirect github.com/cenkalti/hub v1.0.2 // indirect github.com/cenkalti/rpc2 v1.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/chai2010/gettext-go v1.0.2 // indirect - github.com/cilium/ebpf v0.17.1 // indirect - github.com/container-storage-interface/spec v1.11.0 // indirect - github.com/containerd/cgroups v1.1.0 // indirect github.com/containerd/cgroups/v3 v3.0.3 // indirect - github.com/containerd/containerd/api v1.7.19 // indirect github.com/containerd/errdefs v0.1.0 // indirect github.com/containerd/log v0.1.0 // indirect - github.com/containerd/ttrpc v1.2.5 // indirect - github.com/coreos/go-semver v0.3.1 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cyphar/filepath-securejoin v0.2.5 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect - github.com/distribution/reference v0.6.0 // indirect - github.com/docker/go-connections v0.5.0 // indirect - github.com/docker/go-units v0.5.0 // indirect github.com/eapache/channels v1.1.0 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/elazarl/goproxy v0.0.0-20230731152917-f99041a5c027 // indirect - github.com/euank/go-kmsg-parser v2.0.0+incompatible // indirect github.com/evanphx/json-patch v5.7.0+incompatible // indirect - github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect - github.com/fatih/camelcase v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-errors/errors v1.5.1 // indirect github.com/go-kit/kit v0.13.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect @@ -113,67 +84,43 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/btree v1.1.2 // indirect - github.com/google/cadvisor v0.51.0 // indirect github.com/google/cel-go v0.17.8 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect - github.com/google/s2a-go v0.1.8 // indirect - github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/gopherjs/gopherjs v1.17.2 // indirect github.com/gorilla/websocket v1.5.3 // indirect - github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect - github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/imdario/mergo v0.3.16 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/josharian/native v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/juju/errors v1.0.0 // indirect github.com/k-sone/critbitgo v1.4.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/karrick/godirwalk v1.17.0 // indirect github.com/kelseyhightower/envconfig v1.4.0 // indirect github.com/klauspost/compress v1.17.9 // indirect github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect - github.com/libopenstorage/openstorage v1.0.0 // indirect - github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 // indirect github.com/mdlayher/packet v1.1.2 // indirect github.com/mdlayher/socket v0.5.1 // indirect - github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible // indirect - github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/spdystream v0.5.0 // indirect - github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect - github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/moul/http2curl v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0 // indirect - github.com/opencontainers/runc v1.1.14 // indirect - github.com/opencontainers/runtime-spec v1.2.0 // indirect - github.com/opencontainers/selinux v1.11.0 // indirect github.com/openshift/api v0.0.0-20230621174358-ea40115b9fa6 // indirect github.com/openshift/client-go v3.9.0+incompatible // indirect github.com/openshift/custom-resource-status v1.1.2 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect - github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/projectcalico/go-json v0.0.0-20161128004156-6219dc7339ba // indirect @@ -183,7 +130,6 @@ require ( github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.57.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sagikazarmark/locafero v0.6.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/smartystreets/assertions v1.13.0 // indirect @@ -191,31 +137,21 @@ require ( github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.7.0 // indirect - github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/viper v1.19.0 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/vishvananda/netns v0.0.4 // indirect - github.com/xlab/treeprint v1.2.0 // indirect - go.etcd.io/etcd/api/v3 v3.5.15 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.15 // indirect - go.etcd.io/etcd/client/v3 v3.5.15 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.54.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect go.opentelemetry.io/otel v1.29.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 // indirect go.opentelemetry.io/otel/metric v1.29.0 // indirect go.opentelemetry.io/otel/sdk v1.29.0 // indirect go.opentelemetry.io/otel/trace v1.29.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect - go.starlark.net v0.0.0-20231121155337-90ade8b19d09 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.28.0 // indirect golang.org/x/exp v0.0.0-20240904232852-e7e105dedf7e // indirect golang.org/x/net v0.30.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect @@ -224,40 +160,23 @@ require ( golang.org/x/text v0.19.0 // indirect golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/api v0.188.0 // indirect - google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - gopkg.in/gcfg.v1 v1.2.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect - gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + gotest.tools/v3 v3.0.2 // indirect k8s.io/apiextensions-apiserver v0.30.3 // indirect k8s.io/apiserver v0.30.3 // indirect - k8s.io/cli-runtime v0.30.3 // indirect - k8s.io/cloud-provider v0.30.3 // indirect - k8s.io/cluster-bootstrap v0.30.3 // indirect k8s.io/component-base v0.31.0 // indirect - k8s.io/component-helpers v0.30.3 // indirect - k8s.io/controller-manager v0.30.3 // indirect - k8s.io/cri-api v0.30.3 // indirect - k8s.io/csi-translation-lib v0.30.3 // indirect - k8s.io/dynamic-resource-allocation v0.0.0 // indirect - k8s.io/kms v0.30.3 // indirect k8s.io/kube-openapi v0.30.0 // indirect - k8s.io/kube-scheduler v0.0.0 // indirect - k8s.io/kubelet v0.30.3 // indirect - k8s.io/legacy-cloud-providers v0.0.0 // indirect - k8s.io/mount-utils v0.0.0 // indirect + kubevirt.io/api v1.3.0 // indirect kubevirt.io/containerized-data-importer-api v1.58.1 // indirect kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.14.0 // indirect - sigs.k8s.io/kustomize/kyaml v0.14.3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 0d018bf0e0a..c675c01a124 100644 --- a/go.sum +++ b/go.sum @@ -98,10 +98,6 @@ cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVo cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= -cloud.google.com/go/auth v0.7.3 h1:98Vr+5jMaCZ5NZk6e/uBgf60phTk/XN84r8QEWB9yjY= -cloud.google.com/go/auth v0.7.3/go.mod h1:HJtWUx1P5eqjy/f6Iq5KeytNpbAcGolPhOgyop2LlzA= -cloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI= -cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= @@ -182,8 +178,6 @@ cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZ cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= -cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= @@ -603,24 +597,14 @@ cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcP dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= -github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/GoogleCloudPlatform/k8s-cloud-provider v1.20.0 h1:YzfIwJKTQrEfI01GNmniOXvbTNb4orp/2FkQR3SOV7Q= -github.com/GoogleCloudPlatform/k8s-cloud-provider v1.20.0/go.mod h1:udfkkDrxEo+LR3PoOyZI93UA0+yj9tETL2b+oucu1Gc= -github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab h1:UKkYhof1njT1/xq4SEg5z+VpTgjmNeHwPGRQl7takDI= -github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= -github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= -github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/Microsoft/hcsshim v0.12.7 h1:MP6R1spmjxTE4EU4J3YsrTxn8CjvN9qwjTKJXldFaRg= github.com/Microsoft/hcsshim v0.12.7/go.mod h1:HPbAuJ9BvQYYZbB4yEQcyGIsTP5L4yHKeO9XO149AEM= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= -github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= @@ -635,13 +619,9 @@ github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/g github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= -github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 h1:7Ip0wMmLHLRJdrloDxZfhMm0xrLXZS8+COSu2bXmEQs= -github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bhendo/go-powershell v0.0.0-20190719160123-219e7fb4e41e h1:KCjb01YiNoRaJ5c+SbnPLWjVzU9vqRYHg3e5JcN50nM= @@ -666,8 +646,6 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= -github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= @@ -677,8 +655,6 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= -github.com/cilium/ebpf v0.17.1 h1:G8mzU81R2JA1nE5/8SRubzqvBMmAmri2VL8BIZPWvV0= -github.com/cilium/ebpf v0.17.1/go.mod h1:vay2FaYSmIlv3r8dNACd4mW/OCaZLJKJOo+IHBvCIO8= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -695,54 +671,27 @@ github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cnf/structhash v0.0.0-20201127153200-e1b16c1ebc08 h1:ox2F0PSMlrAAiAdknSRMDrAr8mfxPCfSZolH+/qQnyQ= github.com/cnf/structhash v0.0.0-20201127153200-e1b16c1ebc08/go.mod h1:pCxVEbcm3AMg7ejXyorUXi6HQCzOIBf7zEDVPtw0/U4= -github.com/container-storage-interface/spec v1.11.0 h1:H/YKTOeUZwHtyPOr9raR+HgFmGluGCklulxDYxSdVNM= -github.com/container-storage-interface/spec v1.11.0/go.mod h1:DtUvaQszPml1YJfIK7c00mlv6/g4wNMLanLgiUbKFRI= -github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= -github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0= github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0= -github.com/containerd/containerd/api v1.7.19 h1:VWbJL+8Ap4Ju2mx9c9qS1uFSB1OVYr5JJrW2yT5vFoA= -github.com/containerd/containerd/api v1.7.19/go.mod h1:fwGavl3LNwAV5ilJ0sbrABL44AQxmNjDRcwheXDb6Ig= github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM= github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containerd/ttrpc v1.2.5 h1:IFckT1EFQoFBMG4c3sMdT8EP3/aKfumK1msY+Ze4oLU= -github.com/containerd/ttrpc v1.2.5/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= -github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso= -github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g= github.com/containernetworking/cni v1.2.3 h1:hhOcjNVUQTnzdRJ6alC5XF+wd9mfGIUaj8FuJbEslXM= github.com/containernetworking/cni v1.2.3/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M= github.com/containernetworking/plugins v1.5.1 h1:T5ji+LPYjjgW0QM+KyrigZbLsZ8jaX+E5J/EcKOE4gQ= github.com/containernetworking/plugins v1.5.1/go.mod h1:MIQfgMayGuHYs0XdNudf31cLLAC+i242hNm6KuDGqCM= -github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= -github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= -github.com/cyphar/filepath-securejoin v0.2.5 h1:6iR5tXJ/e6tJZzzdMc1km3Sa7RRIVBKAK32O2s7AYfo= -github.com/cyphar/filepath-securejoin v0.2.5/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= -github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= -github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= -github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= -github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/eapache/channels v1.1.0 h1:F1taHcn7/F0i8DYqKXJnyhJcVpp2kgFcNePxXtnyu4k= github.com/eapache/channels v1.1.0/go.mod h1:jMm2qB5Ubtg9zLd+inMZd2/NUvXgzmWXsDaLyQIGfH0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= @@ -770,17 +719,11 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7 github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= -github.com/euank/go-kmsg-parser v2.0.0+incompatible h1:cHD53+PLQuuQyLZeriD1V/esuG4MuU0Pjs5y6iknohY= -github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= -github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4= -github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc= -github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8= -github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/set v0.2.1 h1:nn2CaJyknWE/6txyUDGwysr3G5QC6xWB/PtVjPBbeaA= github.com/fatih/set v0.2.1/go.mod h1:+RKtMCH+favT2+3YecHGxcc0b4KyVWA1QWWJUs4E0CI= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -795,8 +738,6 @@ github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nos github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fxamacker/cbor/v2 v2.6.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= -github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= @@ -815,7 +756,6 @@ github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2C github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -845,8 +785,6 @@ github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+Gr github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= -github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= -github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= @@ -857,13 +795,10 @@ github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u1 github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= @@ -908,10 +843,6 @@ github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cadvisor v0.51.0 h1:BspqSPdZoLKrnvuZNOvM/KiJ/A+RdixwagN20n+2H8k= -github.com/google/cadvisor v0.51.0/go.mod h1:czGE/c/P/i0QFpVNKTFrIEzord9Y10YfpwuaSWXELc0= github.com/google/cel-go v0.17.8 h1:j9m730pMZt1Fc4oKhCLUHfjj6527LuhYcYw0Rl8gqto= github.com/google/cel-go v0.17.8/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulNQzhwhY= github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= @@ -967,10 +898,6 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= -github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= -github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -980,8 +907,6 @@ github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= @@ -996,8 +921,6 @@ github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38 github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw= github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI= -github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= -github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -1008,13 +931,6 @@ github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= @@ -1034,17 +950,11 @@ github.com/ianlancetaylor/demangle v0.0.0-20240312041847-bd984b5ce465/go.mod h1: github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= -github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM= -github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= @@ -1063,8 +973,6 @@ github.com/k8snetworkplumbingwg/sriovnet v1.2.0 h1:6ELfAxCB1dvosGUy3DVRmfH+HWTzm github.com/k8snetworkplumbingwg/sriovnet v1.2.0/go.mod h1:jyWzGe6ZtYiPq6ih6aXCOy6mZ49Y9mNyBOLBBXnli+k= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= -github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI= -github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= @@ -1105,10 +1013,6 @@ github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0/go.mod h1:YBCo4D github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= -github.com/libopenstorage/openstorage v1.0.0 h1:GLPam7/0mpdP8ZZtKjbfcXJBTIA/T1O6CBErVEFEyIM= -github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= -github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= -github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= @@ -1126,8 +1030,6 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 h1:2oDp6OOhLxQ9JBoUuysVz9UZ9uI6oLUbvAZu0x8o+vE= github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118/go.mod h1:ZFUnHIVchZ9lJoWoEGUg8Q3M4U8aNNWA3CVSUTkW4og= -github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= -github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= github.com/mdlayher/packet v1.0.0/go.mod h1:eE7/ctqDhoiRhQ44ko5JZU2zxB88g+JH/6jmnjzPjOU= github.com/mdlayher/packet v1.1.2 h1:3Up1NG6LZrsgDVn6X4L9Ge/iyRyxFEFD9o6Pr3Q1nQY= github.com/mdlayher/packet v1.1.2/go.mod h1:GEu1+n9sG5VtiRE4SydOmX5GTwyyYlteZiFU+x0kew4= @@ -1136,32 +1038,18 @@ github.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos github.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= -github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= -github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= -github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= -github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= -github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= -github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/moul/http2curl v1.0.0 h1:dRMWoAtb+ePxMlLkrCbAqh4TlPHXvoGUSQ323/9Zahs= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -1227,14 +1115,6 @@ github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/opencontainers/runc v1.1.14 h1:rgSuzbmgz5DUJjeSnw337TxDbRuqjs6iqQck/2weR6w= -github.com/opencontainers/runc v1.1.14/go.mod h1:E4C2z+7BxR7GHXp0hAY53mek+x49X1LjPNeMTfRGvOA= -github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= -github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= -github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/openshift/api v0.0.0-20230621174358-ea40115b9fa6 h1:VqW2TH3h1CjkatlshBy4gRLQEFfolZ1w9pLjWdT5wYE= github.com/openshift/api v0.0.0-20230621174358-ea40115b9fa6/go.mod h1:4VWG+W22wrB4HfBL88P40DxLEpSOaiBVxUnfalfJo9k= github.com/openshift/client-go v0.0.1 h1:zJ9NsS9rwBtYkYzLCUECkdmrM6jPit3W7Q0+Pxf5gd4= @@ -1248,7 +1128,6 @@ github.com/parnurzeal/gorequest v0.3.0 h1:SoFyqCDC9COr1xuS6VA8fC8RU7XyrJZN2ona1k github.com/parnurzeal/gorequest v0.3.0/go.mod h1:3Kh2QUMJoqw3icWAecsyzkpY7UzRfDhbRdTjtNwNiUE= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= -github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= @@ -1293,8 +1172,6 @@ github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/f github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk= @@ -1303,10 +1180,6 @@ github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6g github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/scylladb/go-set v1.0.2 h1:SkvlMCKhP0wyyct6j+0IHJkBkSZL+TDzZ4E7f7BCcRE= github.com/scylladb/go-set v1.0.2/go.mod h1:DkpGd78rljTxKAnTDPFqXSGxvETQnJyuSOQwsHycqfs= -github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= -github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -1314,8 +1187,6 @@ github.com/smartystreets/assertions v1.13.0 h1:Dx1kYM01xsSqKPno3aqLnrwac2LetPvN2 github.com/smartystreets/assertions v1.13.0/go.mod h1:wDmR7qL282YbGsPy6H/yAsesrxfxaaSlJazyFLYVFx8= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -1326,8 +1197,7 @@ github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= @@ -1353,17 +1223,11 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= -github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk= github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= -github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1373,22 +1237,6 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= -go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= -go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= -go.etcd.io/etcd/api/v3 v3.5.15 h1:3KpLJir1ZEBrYuV2v+Twaa/e2MdDCEZ/70H+lzEiwsk= -go.etcd.io/etcd/api/v3 v3.5.15/go.mod h1:N9EhGzXq58WuMllgH9ZvnEr7SI9pS0k0+DHZezGp7jM= -go.etcd.io/etcd/client/pkg/v3 v3.5.15 h1:fo0HpWz/KlHGMCC+YejpiCmyWDEuIpnTDzpJLB5fWlA= -go.etcd.io/etcd/client/pkg/v3 v3.5.15/go.mod h1:mXDI4NAOwEiszrHCb0aqfAYNCrZP4e9hRca3d1YK8EU= -go.etcd.io/etcd/client/v2 v2.305.12 h1:0m4ovXYo1CHaA/Mp3X/Fak5sRNIWf01wk/X1/G3sGKI= -go.etcd.io/etcd/client/v2 v2.305.12/go.mod h1:aQ/yhsxMu+Oht1FOupSr60oBvcS9cKXHrzBpDsPTf9E= -go.etcd.io/etcd/client/v3 v3.5.15 h1:23M0eY4Fd/inNv1ZfU3AxrbbOdW79r9V9Rl62Nm6ip4= -go.etcd.io/etcd/client/v3 v3.5.15/go.mod h1:CLSJxrYjvLtHsrPKsy7LmZEE+DK2ktfd2bN4RhBMwlU= -go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM= -go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs= -go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA= -go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc= -go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg= -go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1398,22 +1246,14 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.54.0 h1:PwK3qu85W2/OZsZ+7KVSSKj6fAy9Ewf0E2N34UZNUjU= -go.opentelemetry.io/contrib/instrumentation/github.com/emicklei/go-restful/otelrestful v0.54.0/go.mod h1:fgH0JNwCMNwGsQOz4nlf6j5OhJwpE/Bv1hpQM6sjYOU= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= -go.opentelemetry.io/contrib/propagators/b3 v1.29.0 h1:hNjyoRsAACnhoOLWupItUjABzeYmX3GTTZLzwJluJlk= -go.opentelemetry.io/contrib/propagators/b3 v1.29.0/go.mod h1:E76MTitU1Niwo5NSN+mVxkyLu4h4h7Dp/yh38F2WuIU= go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0 h1:dIIDULZJpgdiHz5tXrTgKIMLkus6jEFa7x5SOKcyR7E= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.29.0/go.mod h1:jlRVBe7+Z1wyxFSUs48L6OBQZ5JwH2Hg/Vbl+t9rAgI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0 h1:nSiV3s7wiCam610XcLbYOmMfJxB9gO4uK3Xgv5gmTgg= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.29.0/go.mod h1:hKn/e/Nmd19/x1gvIHwtOwVWM+VhuITSWip3JUDghj0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0 h1:JAv0Jwtl01UFiyWZEMiJZBiTlv5A50zNs8lsthXqIio= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.29.0/go.mod h1:QNKLmUEAq2QUbPQUfvw4fmv0bgbK7UlOSFCnXyfvSNc= go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= @@ -1425,8 +1265,6 @@ go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.starlark.net v0.0.0-20231121155337-90ade8b19d09 h1:hzy3LFnSN8kuQK8h9tHl4ndF6UruMj47OqwqsS+/Ai4= -go.starlark.net v0.0.0-20231121155337-90ade8b19d09/go.mod h1:LcLNIzVOMp4oV+uusnpk+VU+SzXaJakUuBjoCSWH5dM= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -1455,8 +1293,6 @@ golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1m golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= -golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= -golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1831,6 +1667,7 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1956,7 +1793,6 @@ google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6r google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= -google.golang.org/api v0.89.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= @@ -1977,8 +1813,6 @@ google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjY google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms= google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4= google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw= -google.golang.org/api v0.188.0 h1:51y8fJ/b1AaaBRJr4yWm96fPcuxSo0JcegXE3DaHQHw= -google.golang.org/api v0.188.0/go.mod h1:VR0d+2SIiWOYG3r/jdm7adPW9hI2aRv9ETOSCQ9Beag= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2121,8 +1955,6 @@ google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOl google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY= google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64= -google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988 h1:CT2Thj5AuPV9phrYMtzX11k+XkzMGfRAet42PmoTATM= -google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988/go.mod h1:7uvplUBj4RjHAxIZ//98LzOvrQ04JBkaixRmCMI29hc= google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8= google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig= @@ -2209,8 +2041,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntN gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= @@ -2219,12 +2049,8 @@ gopkg.in/k8snetworkplumbingwg/multus-cni.v4 v4.1.2 h1:WlsNIV3g9BAHX6f6HbCmXqwphe gopkg.in/k8snetworkplumbingwg/multus-cni.v4 v4.1.2/go.mod h1:tDBw1dugyqB3rHi8EbXUQusalu/VWQg6gbVlIgCpHa4= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= -gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= -gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -2255,60 +2081,23 @@ k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc= k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= k8s.io/apiserver v0.30.3 h1:QZJndA9k2MjFqpnyYv/PH+9PE0SHhx3hBho4X0vE65g= k8s.io/apiserver v0.30.3/go.mod h1:6Oa88y1CZqnzetd2JdepO0UXzQX4ZnOekx2/PtEjrOg= -k8s.io/cli-runtime v0.30.3 h1:aG69oRzJuP2Q4o8dm+f5WJIX4ZBEwrvdID0+MXyUY6k= -k8s.io/cli-runtime v0.30.3/go.mod h1:hwrrRdd9P84CXSKzhHxrOivAR9BRnkMt0OeP5mj7X30= k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= -k8s.io/cloud-provider v0.30.3 h1:SNWZmllTymOTzIPJuhtZH6il/qVi75dQARRQAm9k6VY= -k8s.io/cloud-provider v0.30.3/go.mod h1:Ax0AVdHnM7tMYnJH1Ycy4SMBD98+4zA+tboUR9eYsY8= -k8s.io/cluster-bootstrap v0.30.3 h1:MgxyxMkpaC6mu0BKWJ8985XCOnKU+eH3Iy+biwtDXRk= -k8s.io/cluster-bootstrap v0.30.3/go.mod h1:h8BoLDfdD7XEEIXy7Bx9FcMzxHwz29jsYYi34bM5DKU= k8s.io/code-generator v0.30.3/go.mod h1:PFgBiv+miFV7TZYp+RXgROkhA+sWYZ+mtpbMLofMke8= k8s.io/component-base v0.30.3 h1:Ci0UqKWf4oiwy8hr1+E3dsnliKnkMLZMVbWzeorlk7s= k8s.io/component-base v0.30.3/go.mod h1:C1SshT3rGPCuNtBs14RmVD2xW0EhRSeLvBh7AGk1quA= -k8s.io/component-helpers v0.30.3 h1:KPc8l0eGx9Wg2OcKc58k9ozNcVcOInAi3NGiuS2xJ/c= -k8s.io/component-helpers v0.30.3/go.mod h1:VOQ7g3q+YbKWwKeACG2BwPv4ftaN8jXYJ5U3xpzuYAE= -k8s.io/controller-manager v0.30.3 h1:QRFGkWWD5gi/KCSU0qxyUoZRbt+BKgiCUXiTD1RO95w= -k8s.io/controller-manager v0.30.3/go.mod h1:F95rjHCOH2WwV9XlVxRo71CtddKLhF3FzE+s1lc7E/0= -k8s.io/cri-api v0.30.3 h1:o7AAGb3645Ik44WkHI0eqUc7JbQVmstlINLlLAtU/rI= -k8s.io/cri-api v0.30.3/go.mod h1://4/umPJSW1ISNSNng4OwjpkvswJOQwU8rnkvO8P+xg= -k8s.io/csi-translation-lib v0.30.3 h1:wBaPWnOi14/vANRIrp8pmbdx/Pgz2QRcroH7wkodezc= -k8s.io/csi-translation-lib v0.30.3/go.mod h1:3AizNZbDttVDH1RO0x1yGEQP74e9Xbfb60IBP1oWO1o= -k8s.io/dynamic-resource-allocation v0.30.3 h1:49aLgEhknKF8gPVhsquJ3ylOnfC8ddxnqVP6y3T+hkM= -k8s.io/dynamic-resource-allocation v0.30.3/go.mod h1:Dj7OzA3pYT/OfN9PvuYt9CH5e5KcjKBRAik8XeG0nB8= k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kms v0.30.3 h1:NLg+oN45S2Y3U0WiLRzbS61AY/XrS5JBMZp531Z+Pho= -k8s.io/kms v0.30.3/go.mod h1:GrMurD0qk3G4yNgGcsCEmepqf9KyyIrTXYR2lyUOJC4= -k8s.io/kube-aggregator v0.30.3 h1:hy5zfQ7p6BuJgc/XtGp3GBh2MPfOj6b1n3raKKMHOQE= -k8s.io/kube-aggregator v0.30.3/go.mod h1:2SP0IckvQoOwwZN8lmtWUnTZTgIpwOWvidWtxyqLwuk= k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f h1:0LQagt0gDpKqvIkAMPaRGcXawNMouPECM1+F9BVxEaM= k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f/go.mod h1:S9tOR0FxgyusSNR+MboCuiDpVWkAifZvaYI1Q2ubgro= -k8s.io/kube-scheduler v0.30.3 h1:Fn2sX+tOvOMkRG6W2kntNAQStIZlr3/X6DfpMcy/rik= -k8s.io/kube-scheduler v0.30.3/go.mod h1:wzEWmB9+4/e+x96n1d1BHuETHHrOxb22ESAtVS/fJts= -k8s.io/kubectl v0.30.3 h1:YIBBvMdTW0xcDpmrOBzcpUVsn+zOgjMYIu7kAq+yqiI= -k8s.io/kubectl v0.30.3/go.mod h1:IcR0I9RN2+zzTRUa1BzZCm4oM0NLOawE6RzlDvd1Fpo= -k8s.io/kubelet v0.30.3 h1:KvGWDdhzD0vEyDyGTCjsDc8D+0+lwRMw3fJbfQgF7ys= -k8s.io/kubelet v0.30.3/go.mod h1:D9or45Vkzcqg55CEiqZ8dVbwP3Ksj7DruEVRS9oq3Ys= k8s.io/kubernetes v1.30.3 h1:A0qoXI1YQNzrQZiff33y5zWxYHFT/HeZRK98/sRDJI0= k8s.io/kubernetes v1.30.3/go.mod h1:yPbIk3MhmhGigX62FLJm+CphNtjxqCvAIFQXup6RKS0= -k8s.io/legacy-cloud-providers v0.30.3 h1:6C50kKmsdKNTsQqfy8V6MTbQKlEkR1oJoeh+WrilM4w= -k8s.io/legacy-cloud-providers v0.30.3/go.mod h1:VATC0a8MFqrTeVBCSYnMPhMP83bZA7vaMbE7eA8xSa8= -k8s.io/metrics v0.30.3 h1:gKCpte5zykrOmQhZ8qmsxyJslMdiLN+sqbBfIWNpbGM= -k8s.io/metrics v0.30.3/go.mod h1:W06L2nXRhOwPkFYDJYWdEIS3u6JcJy3ebIPYbndRs6A= -k8s.io/mount-utils v0.30.3 h1:8Z3wSW5+GSvGNtlDhtoZrBCKLMIf5z/9tf8pie+G06s= -k8s.io/mount-utils v0.30.3/go.mod h1:9sCVmwGLcV1MPvbZ+rToMDnl1QcGozy+jBPd0MsQLIo= -k8s.io/pod-security-admission v0.30.3 h1:UDGZWR3ry/XrN/Ki/w7qrp49OwgQsKyh+6xWbexvJi8= -k8s.io/pod-security-admission v0.30.3/go.mod h1:T1EQSOLl9YyDMnXNJfsq2jeci6uoymY0mrRkkKihd98= -k8s.io/sample-apiserver v0.30.3 h1:SGlc1FvY+5CGolD0Qn1iGuhbhBWMUru/kcjQ9ki2iEs= -k8s.io/sample-apiserver v0.30.3/go.mod h1:P4g1Jw2lq2wtCiibqVX3KIRAfXtHpw6pOD/dzwmVG/w= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= @@ -2362,10 +2151,6 @@ sigs.k8s.io/controller-runtime v0.18.5 h1:nTHio/W+Q4aBlQMgbnC5hZb4IjIidyrizMai9P sigs.k8s.io/controller-runtime v0.18.5/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.14.0 h1:6+QLmXXA8X4eDM7ejeaNUyruA1DDB3PVIjbpVhDOJRA= -sigs.k8s.io/kustomize/api v0.14.0/go.mod h1:vmOXlC8BcmcUJQjiceUbcyQ75JBP6eg8sgoyzc+eLpQ= -sigs.k8s.io/kustomize/kyaml v0.14.3 h1:WpabVAKZe2YEp/irTSHwD6bfjwZnTtSDewd2BVJGMZs= -sigs.k8s.io/kustomize/kyaml v0.14.3/go.mod h1:npvh9epWysfQ689Rtt/U+dpOJDTBn8kUnF1O6VzvmZA= sigs.k8s.io/network-policy-api v0.1.5 h1:xyS7VAaM9EfyB428oFk7WjWaCK6B129i+ILUF4C8l6E= sigs.k8s.io/network-policy-api v0.1.5/go.mod h1:D7Nkr43VLNd7iYryemnj8qf0N/WjBzTZDxYA+g4u1/Y= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= diff --git a/test/e2e/framework/cni.go b/test/e2e/framework/cni.go deleted file mode 100644 index 4e62b1c6535..00000000000 --- a/test/e2e/framework/cni.go +++ /dev/null @@ -1,75 +0,0 @@ -package framework - -import ( - "encoding/json" - - "github.com/containernetworking/cni/pkg/types" - nadv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - - "github.com/kubeovn/kube-ovn/pkg/netconf" - "github.com/kubeovn/kube-ovn/pkg/request" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -const CNIVersion = "0.3.1" - -// https://github.com/containernetworking/plugins/blob/main/plugins/main/macvlan/macvlan.go#L37 -type MacvlanNetConf struct { - netconf.NetConf - Master string `json:"master"` - Mode string `json:"mode"` - MTU int `json:"mtu"` - Mac string `json:"mac,omitempty"` - LinkContNs bool `json:"linkInContainer,omitempty"` - - RuntimeConfig struct { - Mac string `json:"mac,omitempty"` - } `json:"runtimeConfig,omitempty"` -} - -func MakeMacvlanNetworkAttachmentDefinition(name, namespace, master, mode, provider string, routes []request.Route) *nadv1.NetworkAttachmentDefinition { - ginkgo.GinkgoHelper() - - config := &MacvlanNetConf{ - NetConf: netconf.NetConf{ - NetConf: types.NetConf{ - CNIVersion: CNIVersion, - Type: "macvlan", - }, - IPAM: &netconf.IPAMConf{ - Type: util.CniTypeName, - ServerSocket: "/run/openvswitch/kube-ovn-daemon.sock", - Provider: provider, - Routes: routes, - }, - }, - Master: master, - Mode: mode, - LinkContNs: true, - } - buf, err := json.MarshalIndent(config, "", " ") - framework.ExpectNoError(err) - - return MakeNetworkAttachmentDefinition(name, namespace, string(buf)) -} - -func MakeOVNNetworkAttachmentDefinition(name, namespace, provider string, routes []request.Route) *nadv1.NetworkAttachmentDefinition { - ginkgo.GinkgoHelper() - - config := &netconf.NetConf{ - NetConf: types.NetConf{ - CNIVersion: CNIVersion, - Type: util.CniTypeName, - }, - ServerSocket: "/run/openvswitch/kube-ovn-daemon.sock", - Provider: provider, - Routes: routes, - } - buf, err := json.MarshalIndent(config, "", " ") - framework.ExpectNoError(err) - - return MakeNetworkAttachmentDefinition(name, namespace, string(buf)) -} diff --git a/test/e2e/framework/daemonset.go b/test/e2e/framework/daemonset.go deleted file mode 100644 index 642297a0ba7..00000000000 --- a/test/e2e/framework/daemonset.go +++ /dev/null @@ -1,146 +0,0 @@ -package framework - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "strings" - "time" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - v1apps "k8s.io/client-go/kubernetes/typed/apps/v1" - "k8s.io/kubectl/pkg/polymorphichelpers" - - "github.com/onsi/ginkgo/v2" -) - -type DaemonSetClient struct { - f *Framework - v1apps.DaemonSetInterface - namespace string -} - -func (f *Framework) DaemonSetClient() *DaemonSetClient { - return f.DaemonSetClientNS(f.Namespace.Name) -} - -func (f *Framework) DaemonSetClientNS(namespace string) *DaemonSetClient { - return &DaemonSetClient{ - f: f, - DaemonSetInterface: f.ClientSet.AppsV1().DaemonSets(namespace), - namespace: namespace, - } -} - -func (c *DaemonSetClient) Get(name string) *appsv1.DaemonSet { - ginkgo.GinkgoHelper() - ds, err := c.DaemonSetInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return ds -} - -func (c *DaemonSetClient) GetPods(ds *appsv1.DaemonSet) (*corev1.PodList, error) { - podSelector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector) - if err != nil { - return nil, err - } - podListOptions := metav1.ListOptions{LabelSelector: podSelector.String()} - allPods, err := c.f.ClientSet.CoreV1().Pods(ds.Namespace).List(context.TODO(), podListOptions) - if err != nil { - return nil, err - } - - ownedPods := &corev1.PodList{Items: make([]corev1.Pod, 0, len(allPods.Items))} - for i, pod := range allPods.Items { - controllerRef := metav1.GetControllerOf(&allPods.Items[i]) - if controllerRef != nil && controllerRef.UID == ds.UID { - ownedPods.Items = append(ownedPods.Items, pod) - } - } - - return ownedPods, nil -} - -func (c *DaemonSetClient) GetPodOnNode(ds *appsv1.DaemonSet, node string) (*corev1.Pod, error) { - pods, err := c.GetPods(ds) - if err != nil { - return nil, err - } - for _, pod := range pods.Items { - if pod.Spec.NodeName == node { - return pod.DeepCopy(), nil - } - } - - return nil, fmt.Errorf("pod for daemonset %s/%s on node %s not found", ds.Namespace, ds.Name, node) -} - -func (c *DaemonSetClient) Patch(daemonset *appsv1.DaemonSet) *appsv1.DaemonSet { - ginkgo.GinkgoHelper() - - modifiedBytes, err := json.Marshal(daemonset) - if err != nil { - Failf("failed to marshal modified DaemonSet: %v", err) - } - ExpectNoError(err) - var patchedDaemonSet *appsv1.DaemonSet - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - daemonSet, err := c.DaemonSetInterface.Patch(ctx, daemonset.Name, types.MergePatchType, modifiedBytes, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch daemonset %s/%s", daemonset.Namespace, daemonset.Name) - } - patchedDaemonSet = daemonSet - return true, nil - }) - if err == nil { - return patchedDaemonSet.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch daemonset %s/%s", daemonset.Namespace, daemonset.Name) - } - Failf("error occurred while retrying to patch daemonset %s/%s: %v", daemonset.Namespace, daemonset.Name, err) - - return nil -} - -func (c *DaemonSetClient) PatchSync(modifiedDaemonset *appsv1.DaemonSet) *appsv1.DaemonSet { - ginkgo.GinkgoHelper() - daemonSet := c.Patch(modifiedDaemonset) - return c.RolloutStatus(daemonSet.Name) -} - -func (c *DaemonSetClient) RolloutStatus(name string) *appsv1.DaemonSet { - ginkgo.GinkgoHelper() - - var daemonSet *appsv1.DaemonSet - WaitUntil(2*time.Second, timeout, func(_ context.Context) (bool, error) { - var err error - daemonSet = c.Get(name) - unstructured := &unstructured.Unstructured{} - if unstructured.Object, err = runtime.DefaultUnstructuredConverter.ToUnstructured(daemonSet); err != nil { - return false, err - } - - dsv := &polymorphichelpers.DaemonSetStatusViewer{} - msg, done, err := dsv.Status(unstructured, 0) - if err != nil { - return false, err - } - if done { - return true, nil - } - - Logf(strings.TrimSpace(msg)) - return false, nil - }, "") - - return daemonSet -} diff --git a/test/e2e/framework/deployment.go b/test/e2e/framework/deployment.go deleted file mode 100644 index 1a04b0c786f..00000000000 --- a/test/e2e/framework/deployment.go +++ /dev/null @@ -1,232 +0,0 @@ -package framework - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "strings" - "time" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - v1apps "k8s.io/client-go/kubernetes/typed/apps/v1" - "k8s.io/kubectl/pkg/polymorphichelpers" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/deployment" - testutils "k8s.io/kubernetes/test/utils" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - "github.com/kubeovn/kube-ovn/pkg/util" -) - -type DeploymentClient struct { - f *Framework - v1apps.DeploymentInterface - namespace string -} - -func (f *Framework) DeploymentClient() *DeploymentClient { - return f.DeploymentClientNS(f.Namespace.Name) -} - -func (f *Framework) DeploymentClientNS(namespace string) *DeploymentClient { - return &DeploymentClient{ - f: f, - DeploymentInterface: f.ClientSet.AppsV1().Deployments(namespace), - namespace: namespace, - } -} - -func (c *DeploymentClient) Get(name string) *appsv1.Deployment { - ginkgo.GinkgoHelper() - deploy, err := c.DeploymentInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return deploy -} - -func (c *DeploymentClient) GetPods(deploy *appsv1.Deployment) (*corev1.PodList, error) { - return deployment.GetPodsForDeployment(context.Background(), c.f.ClientSet, deploy) -} - -func (c *DeploymentClient) GetAllPods(deploy *appsv1.Deployment) (*corev1.PodList, error) { - podSelector, err := metav1.LabelSelectorAsSelector(deploy.Spec.Selector) - if err != nil { - return nil, err - } - podListOptions := metav1.ListOptions{LabelSelector: podSelector.String()} - return c.f.ClientSet.CoreV1().Pods(deploy.Namespace).List(context.TODO(), podListOptions) -} - -// Create creates a new deployment according to the framework specifications -func (c *DeploymentClient) Create(deploy *appsv1.Deployment) *appsv1.Deployment { - ginkgo.GinkgoHelper() - d, err := c.DeploymentInterface.Create(context.TODO(), deploy, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating deployment") - return d.DeepCopy() -} - -// CreateSync creates a new deployment according to the framework specifications, and waits for it to complete. -func (c *DeploymentClient) CreateSync(deploy *appsv1.Deployment) *appsv1.Deployment { - ginkgo.GinkgoHelper() - - d := c.Create(deploy) - err := c.WaitToComplete(d) - ExpectNoError(err, "deployment failed to complete") - // Get the newest deployment - return c.Get(d.Name).DeepCopy() -} - -func (c *DeploymentClient) RolloutStatus(name string) *appsv1.Deployment { - ginkgo.GinkgoHelper() - - var deploy *appsv1.Deployment - WaitUntil(2*time.Second, timeout, func(_ context.Context) (bool, error) { - var err error - deploy = c.Get(name) - unstructured := &unstructured.Unstructured{} - if unstructured.Object, err = runtime.DefaultUnstructuredConverter.ToUnstructured(deploy); err != nil { - return false, err - } - - dsv := &polymorphichelpers.DeploymentStatusViewer{} - msg, done, err := dsv.Status(unstructured, 0) - if err != nil { - return false, err - } - if done { - return true, nil - } - - Logf(strings.TrimSpace(msg)) - return false, nil - }, "") - - return deploy -} - -func (c *DeploymentClient) Patch(original, modified *appsv1.Deployment) *appsv1.Deployment { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedDeploy *appsv1.Deployment - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - deploy, err := c.DeploymentInterface.Patch(ctx, original.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch deployment %s/%s", original.Namespace, original.Name) - } - patchedDeploy = deploy - return true, nil - }) - if err == nil { - return patchedDeploy.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch deployment %s/%s", original.Namespace, original.Name) - } - Failf("error occurred while retrying to patch deployment %s/%s: %v", original.Namespace, original.Name, err) - - return nil -} - -func (c *DeploymentClient) PatchSync(original, modified *appsv1.Deployment) *appsv1.Deployment { - ginkgo.GinkgoHelper() - deploy := c.Patch(original, modified) - return c.RolloutStatus(deploy.Name) -} - -// Restart restarts the deployment as kubectl does -func (c *DeploymentClient) Restart(deploy *appsv1.Deployment) *appsv1.Deployment { - ginkgo.GinkgoHelper() - - buf, err := polymorphichelpers.ObjectRestarterFn(deploy) - ExpectNoError(err) - - m := make(map[string]interface{}) - err = json.Unmarshal(buf, &m) - ExpectNoError(err) - - deploy = new(appsv1.Deployment) - err = runtime.DefaultUnstructuredConverter.FromUnstructured(m, deploy) - ExpectNoError(err) - - deploy, err = c.DeploymentInterface.Update(context.TODO(), deploy, metav1.UpdateOptions{}) - ExpectNoError(err) - - return deploy.DeepCopy() -} - -// RestartSync restarts the deployment and wait it to be ready -func (c *DeploymentClient) RestartSync(deploy *appsv1.Deployment) *appsv1.Deployment { - ginkgo.GinkgoHelper() - _ = c.Restart(deploy) - return c.RolloutStatus(deploy.Name) -} - -func (c *DeploymentClient) SetScale(deployment string, replicas int32) { - ginkgo.GinkgoHelper() - - scale, err := c.GetScale(context.Background(), deployment, metav1.GetOptions{}) - framework.ExpectNoError(err) - if scale.Spec.Replicas == replicas { - Logf("replicas of deployment %s/%s has already been set to %d", c.namespace, deployment, replicas) - return - } - - scale.Spec.Replicas = replicas - _, err = c.UpdateScale(context.Background(), deployment, scale, metav1.UpdateOptions{}) - framework.ExpectNoError(err) -} - -// Delete deletes a deployment if the deployment exists -func (c *DeploymentClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.DeploymentInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete deployment %q: %v", name, err) - } -} - -// DeleteSync deletes the deployment and waits for the deployment to disappear for `timeout`. -// If the deployment doesn't disappear before the timeout, it will fail the test. -func (c *DeploymentClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for deployment %q to disappear", name) -} - -func (c *DeploymentClient) WaitToComplete(deploy *appsv1.Deployment) error { - return testutils.WaitForDeploymentComplete(c.f.ClientSet, deploy, Logf, 2*time.Second, 2*time.Minute) -} - -// WaitToDisappear waits the given timeout duration for the specified deployment to disappear. -func (c *DeploymentClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*appsv1.Deployment, error) { - deploy, err := c.DeploymentInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return deploy, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected deployment %s to not be found: %w", name, err) - } - return nil -} - -func MakeDeployment(name string, replicas int32, podLabels, podAnnotations map[string]string, containerName, image string, strategyType appsv1.DeploymentStrategyType) *appsv1.Deployment { - deploy := deployment.NewDeployment(name, replicas, podLabels, containerName, image, strategyType) - deploy.Spec.Template.Annotations = podAnnotations - return deploy -} diff --git a/test/e2e/framework/docker/container.go b/test/e2e/framework/docker/container.go deleted file mode 100644 index 2e3f280676e..00000000000 --- a/test/e2e/framework/docker/container.go +++ /dev/null @@ -1,87 +0,0 @@ -package docker - -import ( - "context" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - dockerfilters "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/client" -) - -func ContainerList(filters map[string][]string) ([]types.Container, error) { - cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - return nil, err - } - defer cli.Close() - - f := dockerfilters.NewArgs() - for k, v := range filters { - for _, v1 := range v { - f.Add(k, v1) - } - } - return cli.ContainerList(context.Background(), container.ListOptions{All: true, Filters: f}) -} - -func ContainerCreate(name, image, networkName string, cmd []string) (*types.ContainerJSON, error) { - cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - return nil, err - } - defer cli.Close() - - containerConfig := &container.Config{ - Image: image, - Cmd: cmd, - Tty: false, - } - networkConfig := &network.NetworkingConfig{ - EndpointsConfig: map[string]*network.EndpointSettings{ - networkName: new(network.EndpointSettings), - }, - } - - resp, err := cli.ContainerCreate(context.Background(), containerConfig, nil, networkConfig, nil, name) - if err != nil { - return nil, err - } - - if err = cli.ContainerStart(context.Background(), resp.ID, container.StartOptions{}); err != nil { - return nil, err - } - - info, err := cli.ContainerInspect(context.Background(), resp.ID) - if err != nil { - return nil, err - } - - return &info, nil -} - -func ContainerInspect(id string) (*types.ContainerJSON, error) { - cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - return nil, err - } - defer cli.Close() - - result, err := cli.ContainerInspect(context.Background(), id) - if err != nil { - return nil, err - } - - return &result, nil -} - -func ContainerRemove(id string) error { - cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - return err - } - defer cli.Close() - - return cli.ContainerRemove(context.Background(), id, container.RemoveOptions{Force: true}) -} diff --git a/test/e2e/framework/docker/exec.go b/test/e2e/framework/docker/exec.go deleted file mode 100644 index 3c11391e8dc..00000000000 --- a/test/e2e/framework/docker/exec.go +++ /dev/null @@ -1,71 +0,0 @@ -package docker - -import ( - "bytes" - "context" - "fmt" - "strings" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/client" - "github.com/docker/docker/pkg/stdcopy" - - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -type ErrNonZeroExitCode struct { - cmd string - code int -} - -func (e ErrNonZeroExitCode) Error() string { - return fmt.Sprintf("command %q exited with code %d", e.cmd, e.code) -} - -func Exec(id string, env []string, cmd ...string) (stdout, stderr []byte, err error) { - cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - return nil, nil, err - } - defer cli.Close() - - framework.Logf("Executing command %q in container %s", strings.Join(cmd, " "), id) - config := container.ExecOptions{ - Privileged: true, - AttachStderr: true, - AttachStdout: true, - Env: env, - Cmd: cmd, - } - createResp, err := cli.ContainerExecCreate(context.Background(), id, config) - if err != nil { - return nil, nil, err - } - - attachResp, err := cli.ContainerExecAttach(context.Background(), createResp.ID, container.ExecStartOptions{}) - if err != nil { - return nil, nil, err - } - defer attachResp.Close() - - var outBuf, errBuf bytes.Buffer - if _, err = stdcopy.StdCopy(&outBuf, &errBuf, attachResp.Reader); err != nil { - return nil, nil, err - } - - inspectResp, err := cli.ContainerExecInspect(context.Background(), createResp.ID) - if err != nil { - return nil, nil, err - } - - if inspectResp.ExitCode != 0 { - framework.Logf("command exited with code %d", inspectResp.ExitCode) - err = ErrNonZeroExitCode{cmd: strings.Join(cmd, " "), code: inspectResp.ExitCode} - } - - stdout, stderr = outBuf.Bytes(), errBuf.Bytes() - framework.Logf("stdout: %s", string(stdout)) - framework.Logf("stderr: %s", string(stderr)) - - return -} diff --git a/test/e2e/framework/docker/network.go b/test/e2e/framework/docker/network.go deleted file mode 100644 index 8706a970116..00000000000 --- a/test/e2e/framework/docker/network.go +++ /dev/null @@ -1,150 +0,0 @@ -package docker - -import ( - "context" - "crypto/sha1" - "encoding/binary" - "fmt" - "net" - "strconv" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/client" - "k8s.io/utils/ptr" - - "github.com/kubeovn/kube-ovn/pkg/util" -) - -const MTU = 1500 - -// https://github.com/kubernetes-sigs/kind/tree/main/pkg/cluster/internal/providers/docker/network.go#L313 -// generateULASubnetFromName generate an IPv6 subnet based on the -// name and Nth probing attempt -func generateULASubnetFromName(name string, attempt int32) string { - ip := make([]byte, 16) - ip[0] = 0xfc - ip[1] = 0x00 - h := sha1.New() - _, _ = h.Write([]byte(name)) - _ = binary.Write(h, binary.LittleEndian, attempt) - bs := h.Sum(nil) - for i := 2; i < 8; i++ { - ip[i] = bs[i] - } - subnet := &net.IPNet{ - IP: net.IP(ip), - Mask: net.CIDRMask(64, 128), - } - return subnet.String() -} - -func getNetwork(name string, ignoreNotFound bool) (*network.Inspect, error) { - cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - return nil, err - } - defer cli.Close() - - f := filters.NewArgs() - f.Add("name", name) - networks, err := cli.NetworkList(context.Background(), network.ListOptions{Filters: f}) - if err != nil { - return nil, err - } - - if len(networks) == 0 { - if !ignoreNotFound { - return nil, fmt.Errorf("network %s does not exist", name) - } - return nil, nil - } - - info, err := cli.NetworkInspect(context.Background(), networks[0].ID, network.InspectOptions{}) - if err != nil { - return nil, err - } - return &info, nil -} - -func NetworkInspect(name string) (*network.Inspect, error) { - return getNetwork(name, false) -} - -func NetworkCreate(name string, ipv6, skipIfExists bool) (*network.Inspect, error) { - if skipIfExists { - network, err := getNetwork(name, true) - if err != nil { - return nil, err - } - if network != nil { - return network, nil - } - } - - options := network.CreateOptions{ - Driver: "bridge", - Attachable: true, - IPAM: &network.IPAM{ - Driver: "default", - }, - Options: map[string]string{ - "com.docker.network.bridge.enable_ip_masquerade": "true", - "com.docker.network.driver.mtu": strconv.Itoa(MTU), - }, - } - if ipv6 { - options.EnableIPv6 = ptr.To(true) - subnet := generateULASubnetFromName(name, 0) - gateway, err := util.FirstIP(subnet) - if err != nil { - return nil, err - } - config := network.IPAMConfig{ - Subnet: subnet, - Gateway: gateway, - } - options.IPAM.Config = append(options.IPAM.Config, config) - } - - cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - return nil, err - } - defer cli.Close() - - if _, err = cli.NetworkCreate(context.Background(), name, options); err != nil { - return nil, err - } - - return getNetwork(name, false) -} - -func NetworkConnect(networkID, containerID string) error { - cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - return err - } - defer cli.Close() - - return cli.NetworkConnect(context.Background(), networkID, containerID, nil) -} - -func NetworkDisconnect(networkID, containerID string) error { - cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - return err - } - defer cli.Close() - - return cli.NetworkDisconnect(context.Background(), networkID, containerID, false) -} - -func NetworkRemove(networkID string) error { - cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) - if err != nil { - return err - } - defer cli.Close() - return cli.NetworkRemove(context.Background(), networkID) -} diff --git a/test/e2e/framework/endpoints.go b/test/e2e/framework/endpoints.go deleted file mode 100644 index af2dca93a16..00000000000 --- a/test/e2e/framework/endpoints.go +++ /dev/null @@ -1,170 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "time" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// EndpointsClient is a struct for endpoint client. -type EndpointsClient struct { - f *Framework - v1core.EndpointsInterface - namespace string -} - -func (f *Framework) EndpointClient() *EndpointsClient { - return f.EndpointsClientNS(f.Namespace.Name) -} - -func (f *Framework) EndpointsClientNS(namespace string) *EndpointsClient { - return &EndpointsClient{ - f: f, - EndpointsInterface: f.ClientSet.CoreV1().Endpoints(namespace), - namespace: namespace, - } -} - -func (c *EndpointsClient) Get(name string) *corev1.Endpoints { - ginkgo.GinkgoHelper() - endpoints, err := c.EndpointsInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return endpoints -} - -// Create creates a new endpoints according to the framework specifications -func (c *EndpointsClient) Create(endpoints *corev1.Endpoints) *corev1.Endpoints { - ginkgo.GinkgoHelper() - e, err := c.EndpointsInterface.Create(context.TODO(), endpoints, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating endpoints") - return e.DeepCopy() -} - -// CreateSync creates a new endpoints according to the framework specifications, and waits for it to be updated. -func (c *EndpointsClient) CreateSync(endpoints *corev1.Endpoints, cond func(s *corev1.Endpoints) (bool, error), condDesc string) *corev1.Endpoints { - ginkgo.GinkgoHelper() - _ = c.Create(endpoints) - return c.WaitUntil(endpoints.Name, cond, condDesc, 2*time.Second, timeout) -} - -// Patch patches the endpoints -func (c *EndpointsClient) Patch(original, modified *corev1.Endpoints) *corev1.Endpoints { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedEndpoints *corev1.Endpoints - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(_ context.Context) (bool, error) { - s, err := c.EndpointsInterface.Patch(context.TODO(), original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch endpoints %q", original.Name) - } - patchedEndpoints = s - return true, nil - }) - if err == nil { - return patchedEndpoints.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch endpoints %s", original.Name) - } - Failf("error occurred while retrying to patch endpoints %s: %v", original.Name, err) - - return nil -} - -// PatchSync patches the endpoints and waits the endpoints to meet the condition -func (c *EndpointsClient) PatchSync(original, modified *corev1.Endpoints, cond func(s *corev1.Endpoints) (bool, error), condDesc string) *corev1.Endpoints { - ginkgo.GinkgoHelper() - _ = c.Patch(original, modified) - return c.WaitUntil(original.Name, cond, condDesc, 2*time.Second, timeout) -} - -// Delete deletes a endpoints if the endpoints exists -func (c *EndpointsClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.EndpointsInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete endpoints %q: %v", name, err) - } -} - -// DeleteSync deletes the endpoints and waits for the endpoints to disappear for `timeout`. -// If the endpoints doesn't disappear before the timeout, it will fail the test. -func (c *EndpointsClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for endpoints %q to disappear", name) -} - -// WaitUntil waits the given timeout duration for the specified condition to be met. -func (c *EndpointsClient) WaitUntil(name string, cond func(s *corev1.Endpoints) (bool, error), condDesc string, _, timeout time.Duration) *corev1.Endpoints { - ginkgo.GinkgoHelper() - - var endpoints *corev1.Endpoints - err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(_ context.Context) (bool, error) { - Logf("Waiting for endpoints %s to meet condition %q", name, condDesc) - endpoints = c.Get(name).DeepCopy() - met, err := cond(endpoints) - if err != nil { - return false, fmt.Errorf("failed to check condition for endpoints %s: %w", name, err) - } - if met { - Logf("endpoints %s met condition %q", name, condDesc) - } else { - Logf("endpoints %s not met condition %q", name, condDesc) - } - return met, nil - }) - if err == nil { - return endpoints - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch endpoints %s", name) - } - Failf("error occurred while retrying to patch endpoints %s: %v", name, err) - - return nil -} - -// WaitToDisappear waits the given timeout duration for the specified endpoints to disappear. -func (c *EndpointsClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*corev1.Endpoints, error) { - svc, err := c.EndpointsInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return svc, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected endpoints %s to not be found: %w", name, err) - } - return nil -} - -func MakeEndpoints(name string, annotations map[string]string, subset []corev1.EndpointSubset) *corev1.Endpoints { - return &corev1.Endpoints{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Annotations: annotations, - }, - Subsets: subset, - } -} diff --git a/test/e2e/framework/event.go b/test/e2e/framework/event.go deleted file mode 100644 index 1d9f28239be..00000000000 --- a/test/e2e/framework/event.go +++ /dev/null @@ -1,65 +0,0 @@ -package framework - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/util/wait" - typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" - - "github.com/onsi/ginkgo/v2" -) - -type EventClient struct { - f *Framework - typedcorev1.EventInterface - namespace string -} - -func (f *Framework) EventClient() *EventClient { - return f.EventClientNS(f.Namespace.Name) -} - -func (f *Framework) EventClientNS(namespace string) *EventClient { - return &EventClient{ - f: f, - EventInterface: f.ClientSet.CoreV1().Events(namespace), - namespace: namespace, - } -} - -// WaitToHaveEvent waits the provided resource to have the specified event(s) -func (c *EventClient) WaitToHaveEvent(kind, name, eventType, reason, sourceComponent, sourceHost string) []corev1.Event { - ginkgo.GinkgoHelper() - - var result []corev1.Event - err := wait.PollUntilContextTimeout(context.Background(), poll, timeout, false, func(ctx context.Context) (bool, error) { - Logf("Waiting for %s %s/%s to have event %s/%s", kind, c.namespace, name, eventType, reason) - selector := fields.Set{ - "involvedObject.kind": kind, - "involvedObject.name": name, - "type": eventType, - "reason": reason, - } - - events, err := c.List(ctx, metav1.ListOptions{FieldSelector: selector.AsSelector().String()}) - if err != nil { - return handleWaitingAPIError(err, true, "listing events") - } - for _, event := range events.Items { - if sourceComponent != "" && event.Source.Component != sourceComponent { - continue - } - if sourceHost != "" && event.Source.Host != sourceHost { - continue - } - result = append(result, event) - } - return len(result) != 0, nil - }) - - ExpectNoError(err) - return result -} diff --git a/test/e2e/framework/exec_utils.go b/test/e2e/framework/exec_utils.go deleted file mode 100644 index e8eb477745e..00000000000 --- a/test/e2e/framework/exec_utils.go +++ /dev/null @@ -1,38 +0,0 @@ -package framework - -import ( - "context" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// ExecCommandInContainer executes a command in the specified container. -func ExecCommandInContainer(f *Framework, namespace, pod, container string, cmd ...string) (string, string, error) { - return util.ExecuteCommandInContainer(f.ClientSet, f.ClientConfig(), namespace, pod, container, cmd...) -} - -// ExecShellInContainer executes the specified command on the pod's container. -func ExecShellInContainer(f *Framework, namespace, pod, container, cmd string) (string, string, error) { - return ExecCommandInContainer(f, namespace, pod, container, "/bin/sh", "-c", cmd) -} - -func execCommandInPod(ctx context.Context, f *Framework, namespace, pod string, cmd ...string) (string, string, error) { - ginkgo.GinkgoHelper() - - p, err := f.PodClientNS(namespace).Get(ctx, pod, metav1.GetOptions{}) - framework.ExpectNoError(err, "failed to get pod %s/%s", namespace, pod) - gomega.Expect(p.Spec.Containers).NotTo(gomega.BeEmpty()) - return ExecCommandInContainer(f, namespace, pod, p.Spec.Containers[0].Name, cmd...) -} - -// ExecShellInPod executes the specified command on the pod. -func ExecShellInPod(ctx context.Context, f *Framework, namespace, pod, cmd string) (string, string, error) { - ginkgo.GinkgoHelper() - return execCommandInPod(ctx, f, namespace, pod, "/bin/sh", "-c", cmd) -} diff --git a/test/e2e/framework/expect.go b/test/e2e/framework/expect.go deleted file mode 100644 index e26722901ef..00000000000 --- a/test/e2e/framework/expect.go +++ /dev/null @@ -1,223 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "fmt" - "regexp" - "runtime" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/ginkgo/v2/types" - "github.com/onsi/gomega" - "github.com/onsi/gomega/format" - - "github.com/kubeovn/kube-ovn/pkg/util" -) - -var ( - macRegex = regexp.MustCompile(`^([0-9A-Fa-f]{2}:){5}([0-9A-Fa-f]{2})$`) - uuidRegex = regexp.MustCompile(`^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$`) -) - -func buildDescription(explain ...interface{}) string { - switch len(explain) { - case 0: - return "" - case 1: - if describe, ok := explain[0].(func() string); ok { - return describe() + "\n" - } - } - return fmt.Sprintf(explain[0].(string), explain[1:]...) + "\n" -} - -func buildExplainWithOffset(offset int, explain ...interface{}) string { - cl := types.NewCodeLocation(3) - _, file, line, _ := runtime.Caller(offset + 2) - description := buildDescription(explain...) - if cl.FileName == file && cl.LineNumber == line { - return description - } - - return description + fmt.Sprintf("Code Location: %s:%d", file, line) -} - -func buildExplain(explain ...interface{}) string { - return buildExplainWithOffset(1, explain...) -} - -// ExpectEqual expects the specified two are the same, otherwise an exception raises -func ExpectEqual(actual, extra interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), buildExplain(explain...)) -} - -// ExpectNotEqual expects the specified two are not the same, otherwise an exception raises -func ExpectNotEqual(actual, extra interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).NotTo(gomega.Equal(extra), buildExplain(explain...)) -} - -// ExpectError expects an error happens, otherwise an exception raises -func ExpectError(err error, explain ...interface{}) { - gomega.ExpectWithOffset(1, err).To(gomega.HaveOccurred(), buildExplain(explain...)) -} - -// ExpectNoError checks if "err" is set, and if so, fails assertion while logging the error. -func ExpectNoError(err error, explain ...interface{}) { - ExpectNoErrorWithOffset(1, err, buildExplain(explain...)) -} - -// ExpectNoErrorWithOffset checks if "err" is set, and if so, fails assertion while logging the error at "offset" levels above its caller -// (for example, for call chain f -> g -> ExpectNoErrorWithOffset(1, ...) error would be logged for "f"). -func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) { - if err == nil { - return - } - - // Errors usually contain unexported fields. We have to use - // a formatter here which can print those. - prefix := "" - if len(explain) > 0 { - if str, ok := explain[0].(string); ok { - prefix = fmt.Sprintf(str, explain[1:]...) + ": " - } else { - prefix = fmt.Sprintf("unexpected explain arguments, need format string: %v", explain) - } - } - - // This intentionally doesn't use gomega.Expect. Instead,we take - // full control over what information is presented where: - // - The complete error object is logged because it may contain - // additional information that isn't included in its error - // string. - // - It is not included in the failure message because - // it might make the failure message very large and/or - // cause error aggregation to work less well: two - // failures at the same code line might not be matched in - // https://go.k8s.io/triage because the error details are too - // different. - Logf("Unexpected error: %s\n%s", prefix, format.Object(err, 1)) - Fail(prefix+err.Error(), 1+offset) -} - -// ExpectConsistOf expects actual contains precisely the extra elements. -// The ordering of the elements does not matter. -func ExpectConsistOf(actual, extra interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).To(gomega.ConsistOf(extra), buildExplain(explain...)) -} - -// ExpectContainElement expects actual contains the extra elements. -func ExpectContainElement(actual, extra interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).To(gomega.ContainElement(extra), buildExplain(explain...)) -} - -// ExpectNotContainElement expects actual does not contain the extra elements. -func ExpectNotContainElement(actual, extra interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).NotTo(gomega.ContainElement(extra), buildExplain(explain...)) -} - -// ExpectContainSubstring expects actual contains the passed-in substring. -func ExpectContainSubstring(actual, substr string, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).To(gomega.ContainSubstring(substr), buildExplain(explain...)) -} - -// ExpectNotContainSubstring expects actual does not contain the passed-in substring. -func ExpectNotContainSubstring(actual, substr string, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).NotTo(gomega.ContainSubstring(substr), buildExplain(explain...)) -} - -// ExpectHaveKey expects the actual map has the key in the keyset -func ExpectHaveKey(actual, key interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).To(gomega.HaveKey(key), buildExplain(explain...)) -} - -// ExpectHaveKeyWithValue expects the actual map has the passed in key/value pair. -func ExpectHaveKeyWithValue(actual, key, value interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).To(gomega.HaveKeyWithValue(key, value), buildExplain(explain...)) -} - -// ExpectNotHaveKey expects the actual map does not have the key in the keyset -func ExpectNotHaveKey(actual, key interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).NotTo(gomega.HaveKey(key), buildExplain(explain...)) -} - -// ExpectNil expects actual is nil -func ExpectNil(actual interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).To(gomega.BeNil(), buildExplain(explain...)) -} - -// ExpectNotNil expects actual is not nil -func ExpectNotNil(actual interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).NotTo(gomega.BeNil(), buildExplain(explain...)) -} - -// ExpectEmpty expects actual is empty -func ExpectEmpty(actual interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).To(gomega.BeEmpty(), buildExplain(explain...)) -} - -// ExpectNotEmpty expects actual is not empty -func ExpectNotEmpty(actual interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).NotTo(gomega.BeEmpty(), buildExplain(explain...)) -} - -// ExpectHaveLen expects actual has the passed-in length -func ExpectHaveLen(actual interface{}, count int, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).To(gomega.HaveLen(count), buildExplain(explain...)) -} - -// ExpectTrue expects actual is true -func ExpectTrue(actual interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).To(gomega.BeTrue(), buildExplain(explain...)) -} - -func expectTrueWithOffset(offset int, actual interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).To(gomega.BeTrue(), buildExplainWithOffset(offset, explain...)) -} - -// ExpectFalse expects actual is false -func ExpectFalse(actual interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).NotTo(gomega.BeTrue(), buildExplain(explain...)) -} - -// ExpectZero expects actual is the zero value for its type or actual is nil. -func ExpectZero(actual interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).To(gomega.BeZero(), buildExplain(explain...)) -} - -// ExpectNotZero expects actual is not nil nor the zero value for its type. -func ExpectNotZero(actual interface{}, explain ...interface{}) { - gomega.ExpectWithOffset(1, actual).NotTo(gomega.BeZero(), buildExplain(explain...)) -} - -// ExpectUUID expects that the given string is a UUID. -func ExpectUUID(s string) { - ginkgo.GinkgoHelper() - ginkgo.By(fmt.Sprintf("verifying the string %q is an UUID", s)) - expectTrueWithOffset(1, uuidRegex.MatchString(s)) -} - -// ExpectMAC expects that the given string is a MAC address. -func ExpectMAC(s string) { - ginkgo.GinkgoHelper() - ginkgo.By(fmt.Sprintf("verifying the string %q is a MAC address", s)) - expectTrueWithOffset(1, macRegex.MatchString(s)) -} - -// ExpectIPInCIDR expects that the given IP address in within the CIDR. -func ExpectIPInCIDR(ip, cidr string) { - ginkgo.GinkgoHelper() - ginkgo.By(fmt.Sprintf("verifying IP address %q is within the CIDR %q", ip, cidr)) - expectTrueWithOffset(1, util.CIDRContainIP(cidr, ip)) -} diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go deleted file mode 100644 index 7f86273d7ec..00000000000 --- a/test/e2e/framework/framework.go +++ /dev/null @@ -1,252 +0,0 @@ -package framework - -import ( - "context" - "fmt" - "os" - "strings" - "time" - - nad "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/utils/format" - admissionapi "k8s.io/pod-security-admission/api" - "kubevirt.io/client-go/kubecli" - - "github.com/onsi/ginkgo/v2" - - kubeovncs "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -const ( - IPv4 = "ipv4" - IPv6 = "ipv6" - Dual = "dual" -) - -const ( - // poll is how often to Poll resources. - poll = 2 * time.Second - - timeout = 2 * time.Minute -) - -type Framework struct { - KubeContext string - *framework.Framework - KubeOVNClientSet kubeovncs.Interface - KubeVirtClientSet kubecli.KubevirtClient - AttachNetClient nad.Interface - // master/release-1.10/... - ClusterVersion string - // 999.999 for master - ClusterVersionMajor uint - ClusterVersionMinor uint - // ipv4/ipv6/dual - ClusterIPFamily string - // overlay/underlay/underlay-hairpin - ClusterNetworkMode string - KubeOVNImage string -} - -func dumpEvents(ctx context.Context, f *framework.Framework, namespace string) { - ginkgo.By("Dumping events in namespace " + namespace) - events, err := f.ClientSet.CoreV1().Events(namespace).List(ctx, metav1.ListOptions{}) - if err != nil { - Logf("Failed to get events: %v", err) - return - } - for _, event := range events.Items { - event.ManagedFields = nil - fmt.Fprintln(ginkgo.GinkgoWriter, format.Object(event, 2)) - } -} - -func NewDefaultFramework(baseName string) *Framework { - f := &Framework{ - Framework: framework.NewDefaultFramework(baseName), - } - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - f.NamespacePodSecurityWarnLevel = admissionapi.LevelPrivileged - f.DumpAllNamespaceInfo = dumpEvents - f.ClusterIPFamily = os.Getenv("E2E_IP_FAMILY") - f.ClusterVersion = os.Getenv("E2E_BRANCH") - f.ClusterNetworkMode = os.Getenv("E2E_NETWORK_MODE") - - if strings.HasPrefix(f.ClusterVersion, "release-") { - n, err := fmt.Sscanf(f.ClusterVersion, "release-%d.%d", &f.ClusterVersionMajor, &f.ClusterVersionMinor) - if err != nil || n != 2 { - defer ginkgo.GinkgoRecover() - ginkgo.Fail(fmt.Sprintf("Failed to parse Kube-OVN version string %q", f.ClusterVersion)) - } - } else { - f.ClusterVersionMajor, f.ClusterVersionMinor = 999, 999 - } - - ginkgo.BeforeEach(f.BeforeEach) - - return f -} - -func (f *Framework) useContext() error { - if f.KubeContext == "" { - return nil - } - - pathOptions := clientcmd.NewDefaultPathOptions() - pathOptions.GlobalFile = framework.TestContext.KubeConfig - pathOptions.EnvVar = "" - - config, err := pathOptions.GetStartingConfig() - if err != nil { - return err - } - - if config.CurrentContext != f.KubeContext { - Logf("Switching context to " + f.KubeContext) - config.CurrentContext = f.KubeContext - if err = clientcmd.ModifyConfig(pathOptions, *config, true); err != nil { - return err - } - } - - return nil -} - -func NewFrameworkWithContext(baseName, kubeContext string) *Framework { - f := &Framework{KubeContext: kubeContext} - ginkgo.BeforeEach(f.BeforeEach) - f.Framework = framework.NewDefaultFramework(baseName) - ginkgo.BeforeEach(f.BeforeEach) - - f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - f.NamespacePodSecurityWarnLevel = admissionapi.LevelPrivileged - f.DumpAllNamespaceInfo = dumpEvents - f.ClusterIPFamily = os.Getenv("E2E_IP_FAMILY") - f.ClusterVersion = os.Getenv("E2E_BRANCH") - f.ClusterNetworkMode = os.Getenv("E2E_NETWORK_MODE") - - if strings.HasPrefix(f.ClusterVersion, "release-") { - n, err := fmt.Sscanf(f.ClusterVersion, "release-%d.%d", &f.ClusterVersionMajor, &f.ClusterVersionMinor) - if err != nil || n != 2 { - defer ginkgo.GinkgoRecover() - ginkgo.Fail(fmt.Sprintf("Failed to parse Kube-OVN version string %q", f.ClusterVersion)) - } - } else { - f.ClusterVersionMajor, f.ClusterVersionMinor = 999, 999 - } - - return f -} - -func (f *Framework) IsIPv4() bool { - return f.ClusterIPFamily == IPv4 -} - -func (f *Framework) IsIPv6() bool { - return f.ClusterIPFamily == IPv6 -} - -func (f *Framework) IsDual() bool { - return f.ClusterIPFamily == Dual -} - -func (f *Framework) HasIPv4() bool { - return !f.IsIPv6() -} - -func (f *Framework) HasIPv6() bool { - return !f.IsIPv4() -} - -// BeforeEach gets a kube-ovn client -func (f *Framework) BeforeEach() { - ginkgo.By("Setting kubernetes context") - ExpectNoError(f.useContext()) - - if f.KubeOVNClientSet == nil { - ginkgo.By("Creating a Kube-OVN client") - config, err := framework.LoadConfig() - ExpectNoError(err) - - config.QPS = f.Options.ClientQPS - config.Burst = f.Options.ClientBurst - f.KubeOVNClientSet, err = kubeovncs.NewForConfig(config) - ExpectNoError(err) - } - - if f.KubeVirtClientSet == nil { - ginkgo.By("Creating a KubeVirt client") - config, err := framework.LoadConfig() - ExpectNoError(err) - - config.QPS = f.Options.ClientQPS - config.Burst = f.Options.ClientBurst - f.KubeVirtClientSet, err = kubecli.GetKubevirtClientFromRESTConfig(config) - ExpectNoError(err) - } - - if f.AttachNetClient == nil { - ginkgo.By("Creating a network attachment definition client") - config, err := framework.LoadConfig() - ExpectNoError(err) - - config.QPS = f.Options.ClientQPS - config.Burst = f.Options.ClientBurst - f.AttachNetClient, err = nad.NewForConfig(config) - ExpectNoError(err) - } - - if f.KubeOVNImage == "" && f.ClientSet != nil { - framework.Logf("Getting Kube-OVN image") - f.KubeOVNImage = GetKubeOvnImage(f.ClientSet) - framework.Logf("Got Kube-OVN image: %s", f.KubeOVNImage) - } - - framework.TestContext.Host = "" -} - -func (f *Framework) VersionPriorTo(major, minor uint) bool { - return f.ClusterVersionMajor < major || (f.ClusterVersionMajor == major && f.ClusterVersionMinor < minor) -} - -func (f *Framework) SkipVersionPriorTo(major, minor uint, reason string) { - ginkgo.GinkgoHelper() - - if f.VersionPriorTo(major, minor) { - ginkgo.Skip(reason) - } -} - -func (f *Framework) ValidateFinalizers(obj metav1.Object) { - ginkgo.GinkgoHelper() - - finalizers := obj.GetFinalizers() - ExpectContainElement(finalizers, util.KubeOVNControllerFinalizer) - ExpectNotContainElement(finalizers, util.DepreciatedFinalizerName) -} - -func Describe(text string, body func()) bool { - return ginkgo.Describe("[CNI:Kube-OVN] "+text, ginkgo.Offset(1), body) -} - -func FDescribe(text string, body func()) bool { - return ginkgo.FDescribe("[CNI:Kube-OVN] "+text, ginkgo.Offset(1), body) -} - -func SerialDescribe(text string, body func()) bool { - return ginkgo.Describe("[CNI:Kube-OVN] "+text, ginkgo.Offset(1), ginkgo.Serial, body) -} - -func OrderedDescribe(text string, body func()) bool { - return ginkgo.Describe("[CNI:Kube-OVN] "+text, ginkgo.Offset(1), ginkgo.Ordered, body) -} - -var ConformanceIt func(args ...interface{}) bool = framework.ConformanceIt - -func DisruptiveIt(text string, body interface{}) bool { - return framework.It(text, ginkgo.Offset(1), body, framework.WithDisruptive()) -} diff --git a/test/e2e/framework/image.go b/test/e2e/framework/image.go deleted file mode 100644 index e2fd91f78a1..00000000000 --- a/test/e2e/framework/image.go +++ /dev/null @@ -1,6 +0,0 @@ -package framework - -const ( - PauseImage = "kubeovn/pause:3.9" - AgnhostImage = "kubeovn/agnhost:2.47" -) diff --git a/test/e2e/framework/ip.go b/test/e2e/framework/ip.go deleted file mode 100644 index 953044daa1a..00000000000 --- a/test/e2e/framework/ip.go +++ /dev/null @@ -1,149 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "time" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// IPClient is a struct for IP client. -type IPClient struct { - f *Framework - v1.IPInterface -} - -func (f *Framework) IPClient() *IPClient { - return &IPClient{ - f: f, - IPInterface: f.KubeOVNClientSet.KubeovnV1().IPs(), - } -} - -func (c *IPClient) Get(name string) *apiv1.IP { - ginkgo.GinkgoHelper() - IP, err := c.IPInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return IP.DeepCopy() -} - -// Create creates a new IP according to the framework specifications -func (c *IPClient) Create(iP *apiv1.IP) *apiv1.IP { - ginkgo.GinkgoHelper() - iP, err := c.IPInterface.Create(context.TODO(), iP, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating IP") - return iP.DeepCopy() -} - -// CreateSync creates a new IP according to the framework specifications, and waits for it to be ready. -func (c *IPClient) CreateSync(iP *apiv1.IP) *apiv1.IP { - ginkgo.GinkgoHelper() - - iP = c.Create(iP) - ExpectTrue(c.WaitToBeReady(iP.Name, timeout)) - // Get the newest IP after it becomes ready - return c.Get(iP.Name).DeepCopy() -} - -// WaitToBeReady returns whether the IP is ready within timeout. -func (c *IPClient) WaitToBeReady(name string, timeout time.Duration) bool { - Logf("Waiting up to %v for IP %s to be ready", timeout, name) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - ip := c.Get(name) - if ip.Spec.V4IPAddress != "" || ip.Spec.V6IPAddress != "" { - Logf("IP %s is ready", name) - return true - } - Logf("IP %s is not ready", name) - } - Logf("IP %s was not ready within %v", name, timeout) - return false -} - -// Patch patches the IP -func (c *IPClient) Patch(original, modified *apiv1.IP, timeout time.Duration) *apiv1.IP { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedIP *apiv1.IP - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - p, err := c.IPInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch IP %q", original.Name) - } - patchedIP = p - return true, nil - }) - if err == nil { - return patchedIP.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch IP %s", original.Name) - } - Failf("error occurred while retrying to patch IP %s: %v", original.Name, err) - - return nil -} - -// Delete deletes a IP if the IP exists -func (c *IPClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.IPInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete IP %q: %v", name, err) - } -} - -// DeleteSync deletes the IP and waits for the IP to disappear for `timeout`. -// If the IP doesn't disappear before the timeout, it will fail the test. -func (c *IPClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for ovn eip %q to disappear", name) -} - -// WaitToDisappear waits the given timeout duration for the specified IP to disappear. -func (c *IPClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*apiv1.IP, error) { - ip, err := c.IPInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return ip, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected IP %s to not be found: %w", name, err) - } - return nil -} - -func MakeIP(name, ns, subnet string) *apiv1.IP { - // pod ip name should including: pod name and namespace - // node ip name: only node name - IP := &apiv1.IP{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apiv1.IPSpec{ - Namespace: ns, - Subnet: subnet, - }, - } - return IP -} diff --git a/test/e2e/framework/ippool.go b/test/e2e/framework/ippool.go deleted file mode 100644 index d7f523fd8e6..00000000000 --- a/test/e2e/framework/ippool.go +++ /dev/null @@ -1,273 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "math/big" - "time" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// IPPoolClient is a struct for ippool client. -type IPPoolClient struct { - f *Framework - v1.IPPoolInterface -} - -func (f *Framework) IPPoolClient() *IPPoolClient { - return &IPPoolClient{ - f: f, - IPPoolInterface: f.KubeOVNClientSet.KubeovnV1().IPPools(), - } -} - -func (c *IPPoolClient) Get(name string) *apiv1.IPPool { - ginkgo.GinkgoHelper() - ippool, err := c.IPPoolInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return ippool -} - -// Create creates a new ippool according to the framework specifications -func (c *IPPoolClient) Create(ippool *apiv1.IPPool) *apiv1.IPPool { - ginkgo.GinkgoHelper() - s, err := c.IPPoolInterface.Create(context.TODO(), ippool, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating ippool") - return s.DeepCopy() -} - -// CreateSync creates a new ippool according to the framework specifications, and waits for it to be ready. -func (c *IPPoolClient) CreateSync(ippool *apiv1.IPPool) *apiv1.IPPool { - ginkgo.GinkgoHelper() - - s := c.Create(ippool) - ExpectTrue(c.WaitToBeReady(s.Name, timeout)) - // Get the newest ippool after it becomes ready - return c.Get(s.Name).DeepCopy() -} - -// Update updates the ippool -func (c *IPPoolClient) Update(ippool *apiv1.IPPool, options metav1.UpdateOptions, timeout time.Duration) *apiv1.IPPool { - ginkgo.GinkgoHelper() - - var updatedIPPool *apiv1.IPPool - err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - s, err := c.IPPoolInterface.Update(ctx, ippool, options) - if err != nil { - return handleWaitingAPIError(err, false, "update ippool %q", ippool.Name) - } - updatedIPPool = s - return true, nil - }) - if err == nil { - return updatedIPPool.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to update ippool %s", ippool.Name) - } - Failf("error occurred while retrying to update ippool %s: %v", ippool.Name, err) - - return nil -} - -// UpdateSync updates the ippool and waits for the ippool to be ready for `timeout`. -// If the ippool doesn't become ready before the timeout, it will fail the test. -func (c *IPPoolClient) UpdateSync(ippool *apiv1.IPPool, options metav1.UpdateOptions, timeout time.Duration) *apiv1.IPPool { - ginkgo.GinkgoHelper() - - s := c.Update(ippool, options, timeout) - ExpectTrue(c.WaitToBeUpdated(s, timeout)) - ExpectTrue(c.WaitToBeReady(s.Name, timeout)) - // Get the newest ippool after it becomes ready - return c.Get(s.Name).DeepCopy() -} - -// Patch patches the ippool -func (c *IPPoolClient) Patch(original, modified *apiv1.IPPool, timeout time.Duration) *apiv1.IPPool { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedIPPool *apiv1.IPPool - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - s, err := c.IPPoolInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch ippool %q", original.Name) - } - patchedIPPool = s - return true, nil - }) - if err == nil { - return patchedIPPool.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch ippool %s", original.Name) - } - Failf("error occurred while retrying to patch ippool %s: %v", original.Name, err) - - return nil -} - -// PatchSync patches the ippool and waits for the ippool to be ready for `timeout`. -// If the ippool doesn't become ready before the timeout, it will fail the test. -func (c *IPPoolClient) PatchSync(original, modified *apiv1.IPPool) *apiv1.IPPool { - ginkgo.GinkgoHelper() - - s := c.Patch(original, modified, timeout) - ExpectTrue(c.WaitToBeUpdated(s, timeout)) - ExpectTrue(c.WaitToBeReady(s.Name, timeout)) - // Get the newest ippool after it becomes ready - return c.Get(s.Name).DeepCopy() -} - -// Delete deletes a ippool if the ippool exists -func (c *IPPoolClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.IPPoolInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete ippool %q: %v", name, err) - } -} - -// DeleteSync deletes the ippool and waits for the ippool to disappear for `timeout`. -// If the ippool doesn't disappear before the timeout, it will fail the test. -func (c *IPPoolClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for ippool %q to disappear", name) -} - -func isIPPoolConditionSetAsExpected(ippool *apiv1.IPPool, conditionType apiv1.ConditionType, wantTrue, silent bool) bool { - for _, cond := range ippool.Status.Conditions { - if cond.Type == conditionType { - if (wantTrue && (cond.Status == corev1.ConditionTrue)) || (!wantTrue && (cond.Status != corev1.ConditionTrue)) { - return true - } - if !silent { - Logf("Condition %s of ippool %s is %v instead of %t. Reason: %v, message: %v", - conditionType, ippool.Name, cond.Status == corev1.ConditionTrue, wantTrue, cond.Reason, cond.Message) - } - return false - } - } - if !silent { - Logf("Couldn't find condition %v on ippool %v", conditionType, ippool.Name) - } - return false -} - -// IsIPPoolConditionSetAsExpected returns a wantTrue value if the ippool has a match to the conditionType, -// otherwise returns an opposite value of the wantTrue with detailed logging. -func IsIPPoolConditionSetAsExpected(ippool *apiv1.IPPool, conditionType apiv1.ConditionType, wantTrue bool) bool { - return isIPPoolConditionSetAsExpected(ippool, conditionType, wantTrue, false) -} - -// WaitConditionToBe returns whether ippool "name's" condition state matches wantTrue -// within timeout. If wantTrue is true, it will ensure the ippool condition status is -// ConditionTrue; if it's false, it ensures the ippool condition is in any state other -// than ConditionTrue (e.g. not true or unknown). -func (c *IPPoolClient) WaitConditionToBe(name string, conditionType apiv1.ConditionType, wantTrue bool, timeout time.Duration) bool { - Logf("Waiting up to %v for ippool %s condition %s to be %t", timeout, name, conditionType, wantTrue) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - ippool := c.Get(name) - if IsIPPoolConditionSetAsExpected(ippool, conditionType, wantTrue) { - Logf("IPPool %s reach desired %t condition status", name, wantTrue) - return true - } - Logf("IPPool %s still not reach desired %t condition status", name, wantTrue) - } - Logf("IPPool %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout) - return false -} - -// WaitToBeReady returns whether the ippool is ready within timeout. -func (c *IPPoolClient) WaitToBeReady(name string, timeout time.Duration) bool { - return c.WaitConditionToBe(name, apiv1.Ready, true, timeout) -} - -// WaitToBeUpdated returns whether the ippool is updated within timeout. -func (c *IPPoolClient) WaitToBeUpdated(ippool *apiv1.IPPool, timeout time.Duration) bool { - Logf("Waiting up to %v for ippool %s to be updated", timeout, ippool.Name) - rv, _ := big.NewInt(0).SetString(ippool.ResourceVersion, 10) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - s := c.Get(ippool.Name) - if current, _ := big.NewInt(0).SetString(s.ResourceVersion, 10); current.Cmp(rv) > 0 { - Logf("IPPool %s updated", ippool.Name) - return true - } - Logf("IPPool %s still not updated", ippool.Name) - } - Logf("IPPool %s was not updated within %v", ippool.Name, timeout) - return false -} - -// WaitUntil waits the given timeout duration for the specified condition to be met. -func (c *IPPoolClient) WaitUntil(name string, cond func(s *apiv1.IPPool) (bool, error), condDesc string, interval, timeout time.Duration) *apiv1.IPPool { - ginkgo.GinkgoHelper() - - var ippool *apiv1.IPPool - err := wait.PollUntilContextTimeout(context.Background(), interval, timeout, true, func(_ context.Context) (bool, error) { - Logf("Waiting for ippool %s to meet condition %q", name, condDesc) - ippool = c.Get(name).DeepCopy() - met, err := cond(ippool) - if err != nil { - return false, fmt.Errorf("failed to check condition for ippool %s: %w", name, err) - } - return met, nil - }) - if err == nil { - return ippool - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while waiting for ippool %s to meet condition %q", name, condDesc) - } - Failf("error occurred while waiting for ippool %s to meet condition %q: %v", name, condDesc, err) - - return nil -} - -// WaitToDisappear waits the given timeout duration for the specified ippool to disappear. -func (c *IPPoolClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*apiv1.IPPool, error) { - ippool, err := c.IPPoolInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return ippool, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected ippool %s to not be found: %w", name, err) - } - return nil -} - -func MakeIPPool(name, subnet string, ips, namespaces []string) *apiv1.IPPool { - return &apiv1.IPPool{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apiv1.IPPoolSpec{ - Subnet: subnet, - IPs: ips, - Namespaces: namespaces, - }, - } -} diff --git a/test/e2e/framework/iproute/iproute.go b/test/e2e/framework/iproute/iproute.go deleted file mode 100644 index ccab6c1097a..00000000000 --- a/test/e2e/framework/iproute/iproute.go +++ /dev/null @@ -1,178 +0,0 @@ -package iproute - -import ( - "encoding/json" - "fmt" - "net" - "reflect" - "strings" - - "github.com/kubeovn/kube-ovn/test/e2e/framework/docker" -) - -type LinkInfo struct { - InfoKind string `json:"info_kind"` -} - -type AddrInfo struct { - Family string `json:"family"` - Local string `json:"local"` - PrefixLen int `json:"prefixlen"` - Broadcast string `json:"broadcast,omitempty"` - Scope string `json:"scope"` - Label string `json:"label,omitempty"` - ValidLifeTime int64 `json:"valid_life_time"` - PreferredLifeTime int64 `json:"preferred_life_time"` - NoDAD bool `json:"nodad,omitempty"` -} - -type Link struct { - IfIndex int `json:"ifindex"` - LinkIndex int `json:"link_index"` - IfName string `json:"ifname"` - Flags []string `json:"flags"` - Mtu int `json:"mtu"` - Qdisc string `json:"qdisc"` - Master string `json:"master"` - OperState string `json:"operstate"` - Group string `json:"group"` - LinkType string `json:"link_type"` - Address string `json:"address"` - Broadcast string `json:"broadcast"` - LinkNetnsID int `json:"link_netnsid"` - Promiscuity int `json:"promiscuity"` - MinMtu int `json:"min_mtu"` - MaxMtu int `json:"max_mtu"` - LinkInfo LinkInfo `json:"linkinfo"` - NumTxQueues int `json:"num_tx_queues"` - NumRxQueues int `json:"num_rx_queues"` - GsoMaxSize int `json:"gso_max_size"` - GsoMaxSegs int `json:"gso_max_segs"` - AddrInfo []AddrInfo `json:"addr_info"` -} - -func (l *Link) NonLinkLocalAddresses() []string { - var result []string - for _, addr := range l.AddrInfo { - if !net.ParseIP(addr.Local).IsLinkLocalUnicast() { - result = append(result, fmt.Sprintf("%s/%d", addr.Local, addr.PrefixLen)) - } - } - return result -} - -type Route struct { - Type string `json:"type"` - Dst string `json:"dst"` - Gateway string `json:"gateway,omitempty"` - Dev string `json:"dev"` - Protocol string `json:"protocol"` - Scope string `json:"scope"` - Metric int `json:"metric"` - Flags []interface{} `json:"flags"` - PrefSrc string `json:"prefsrc,omitempty"` - Pref string `json:"pref"` -} - -type Rule struct { - Priority int `json:"priority"` - Src string `json:"src"` - Table string `json:"table"` - Protocol string `json:"protocol"` - SrcLen int `json:"srclen,omitempty"` -} - -type ExecFunc func(cmd ...string) (stdout, stderr []byte, err error) - -type execer struct { - fn ExecFunc - ignoredErrors []reflect.Type -} - -func (e *execer) exec(cmd string, result interface{}) error { - stdout, stderr, err := e.fn(strings.Fields(cmd)...) - if err != nil { - t := reflect.TypeOf(err) - for _, err := range e.ignoredErrors { - if t == err { - return nil - } - } - return fmt.Errorf("failed to exec cmd %q: %w\nstdout:\n%s\nstderr:\n%s", cmd, err, stdout, stderr) - } - - if result != nil { - if err = json.Unmarshal(stdout, result); err != nil { - return fmt.Errorf("failed to decode json %q: %w", string(stdout), err) - } - } - - return nil -} - -func devArg(device string) string { - if device == "" { - return "" - } - return " dev " + device -} - -func AddressShow(device string, execFunc ExecFunc) ([]Link, error) { - var links []Link - e := execer{fn: execFunc} - if err := e.exec("ip -d -j address show"+devArg(device), &links); err != nil { - return nil, err - } - - return links, nil -} - -func RouteShow(table, device string, execFunc ExecFunc) ([]Route, error) { - e := execer{fn: execFunc} - var args string - if table != "" { - // ignore the following error: - // Error: ipv4/ipv6: FIB table does not exist. - // Dump terminated - e.ignoredErrors = append(e.ignoredErrors, reflect.TypeOf(docker.ErrNonZeroExitCode{})) - args = " table " + table - } - args += devArg(device) - - var routes []Route - if err := e.exec("ip -d -j route show"+args, &routes); err != nil { - return nil, err - } - - var routes6 []Route - if err := e.exec("ip -d -j -6 route show"+args, &routes6); err != nil { - return nil, err - } - - return append(routes, routes6...), nil -} - -func RouteDel(table, dst string, execFunc ExecFunc) error { - e := execer{fn: execFunc} - args := dst - if table != "" { - args = " table " + table - } - - return e.exec("ip route del "+args, nil) -} - -func RuleShow(device string, execFunc ExecFunc) ([]Rule, error) { - e := execer{fn: execFunc} - - var rules []Rule - if err := e.exec("ip -d -j rule show"+devArg(device), &rules); err != nil { - return nil, err - } - - var rules6 []Rule - if err := e.exec("ip -d -j -6 rule show"+devArg(device), &rules6); err != nil { - return nil, err - } - return append(rules, rules6...), nil -} diff --git a/test/e2e/framework/iptables-dnat.go b/test/e2e/framework/iptables-dnat.go deleted file mode 100644 index 68f2d429544..00000000000 --- a/test/e2e/framework/iptables-dnat.go +++ /dev/null @@ -1,177 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "math/big" - "time" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// IptablesDnatClient is a struct for iptables dnat client. -type IptablesDnatClient struct { - f *Framework - v1.IptablesDnatRuleInterface -} - -func (f *Framework) IptablesDnatClient() *IptablesDnatClient { - return &IptablesDnatClient{ - f: f, - IptablesDnatRuleInterface: f.KubeOVNClientSet.KubeovnV1().IptablesDnatRules(), - } -} - -func (c *IptablesDnatClient) Get(name string) *apiv1.IptablesDnatRule { - ginkgo.GinkgoHelper() - - dnat, err := c.IptablesDnatRuleInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return dnat -} - -// Create creates a new iptables dnat according to the framework specifications -func (c *IptablesDnatClient) Create(dnat *apiv1.IptablesDnatRule) *apiv1.IptablesDnatRule { - ginkgo.GinkgoHelper() - - dnat, err := c.IptablesDnatRuleInterface.Create(context.TODO(), dnat, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating iptables dnat") - return dnat.DeepCopy() -} - -// CreateSync creates a new iptables dnat according to the framework specifications, and waits for it to be ready. -func (c *IptablesDnatClient) CreateSync(dnat *apiv1.IptablesDnatRule) *apiv1.IptablesDnatRule { - ginkgo.GinkgoHelper() - - dnat = c.Create(dnat) - ExpectTrue(c.WaitToBeReady(dnat.Name, timeout)) - // Get the newest iptables dnat after it becomes ready - return c.Get(dnat.Name).DeepCopy() -} - -// Patch patches the iptables dnat -func (c *IptablesDnatClient) Patch(original, modified *apiv1.IptablesDnatRule) *apiv1.IptablesDnatRule { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedIptablesDnatRule *apiv1.IptablesDnatRule - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - dnat, err := c.IptablesDnatRuleInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch iptables dnat %q", original.Name) - } - patchedIptablesDnatRule = dnat - return true, nil - }) - if err == nil { - return patchedIptablesDnatRule.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch iptables DNAT rule %s", original.Name) - } - Failf("error occurred while retrying to patch iptables DNAT rule %s: %v", original.Name, err) - - return nil -} - -// PatchSync patches the iptables dnat and waits for the iptables dnat to be ready for `timeout`. -// If the iptables dnat doesn't become ready before the timeout, it will fail the test. -func (c *IptablesDnatClient) PatchSync(original, modified *apiv1.IptablesDnatRule, _ []string, timeout time.Duration) *apiv1.IptablesDnatRule { - ginkgo.GinkgoHelper() - - dnat := c.Patch(original, modified) - ExpectTrue(c.WaitToBeUpdated(dnat, timeout)) - ExpectTrue(c.WaitToBeReady(dnat.Name, timeout)) - // Get the newest iptables dnat after it becomes ready - return c.Get(dnat.Name).DeepCopy() -} - -// Delete deletes a iptables dnat if the iptables dnat exists -func (c *IptablesDnatClient) Delete(name string) { - ginkgo.GinkgoHelper() - - err := c.IptablesDnatRuleInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete iptables dnat %q: %v", name, err) - } -} - -// DeleteSync deletes the iptables dnat and waits for the iptables dnat to disappear for `timeout`. -// If the iptables dnat doesn't disappear before the timeout, it will fail the test. -func (c *IptablesDnatClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for iptables dnat %q to disappear", name) -} - -// WaitToBeReady returns whether the iptables dnat is ready within timeout. -func (c *IptablesDnatClient) WaitToBeReady(name string, timeout time.Duration) bool { - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - if c.Get(name).Status.Ready { - Logf("dnat %s is ready", name) - return true - } - Logf("dnat %s is not ready", name) - } - return false -} - -// WaitToBeUpdated returns whether the iptables dnat is updated within timeout. -func (c *IptablesDnatClient) WaitToBeUpdated(dnat *apiv1.IptablesDnatRule, timeout time.Duration) bool { - Logf("Waiting up to %v for iptables dnat %s to be updated", timeout, dnat.Name) - rv, _ := big.NewInt(0).SetString(dnat.ResourceVersion, 10) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - s := c.Get(dnat.Name) - if current, _ := big.NewInt(0).SetString(s.ResourceVersion, 10); current.Cmp(rv) > 0 { - return true - } - } - Logf("iptables dnat %s was not updated within %v", dnat.Name, timeout) - return false -} - -// WaitToDisappear waits the given timeout duration for the specified iptables DNAT rule to disappear. -func (c *IptablesDnatClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*apiv1.IptablesDnatRule, error) { - rule, err := c.IptablesDnatRuleInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return rule, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected iptables DNAT rule %s to not be found: %w", name, err) - } - return nil -} - -func MakeIptablesDnatRule(name, eip, externalPort, protocol, internalIP, internalPort string) *apiv1.IptablesDnatRule { - dnat := &apiv1.IptablesDnatRule{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apiv1.IptablesDnatRuleSpec{ - EIP: eip, - ExternalPort: externalPort, - Protocol: protocol, - InternalIP: internalIP, - InternalPort: internalPort, - }, - } - return dnat -} diff --git a/test/e2e/framework/iptables-eip.go b/test/e2e/framework/iptables-eip.go deleted file mode 100644 index 34ece31d137..00000000000 --- a/test/e2e/framework/iptables-eip.go +++ /dev/null @@ -1,204 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "math/big" - "time" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// IptablesEIPClient is a struct for iptables eip client. -type IptablesEIPClient struct { - f *Framework - v1.IptablesEIPInterface -} - -func (f *Framework) IptablesEIPClient() *IptablesEIPClient { - return &IptablesEIPClient{ - f: f, - IptablesEIPInterface: f.KubeOVNClientSet.KubeovnV1().IptablesEIPs(), - } -} - -func (c *IptablesEIPClient) Get(name string) *apiv1.IptablesEIP { - ginkgo.GinkgoHelper() - eip, err := c.IptablesEIPInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return eip -} - -// Create creates a new iptables eip according to the framework specifications -func (c *IptablesEIPClient) Create(eip *apiv1.IptablesEIP) *apiv1.IptablesEIP { - ginkgo.GinkgoHelper() - eip, err := c.IptablesEIPInterface.Create(context.TODO(), eip, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating iptables eip") - return eip.DeepCopy() -} - -// CreateSync creates a new iptables eip according to the framework specifications, and waits for it to be ready. -func (c *IptablesEIPClient) CreateSync(eip *apiv1.IptablesEIP) *apiv1.IptablesEIP { - ginkgo.GinkgoHelper() - - eip = c.Create(eip) - ExpectTrue(c.WaitToBeReady(eip.Name, timeout)) - // Get the newest iptables eip after it becomes ready - return c.Get(eip.Name).DeepCopy() -} - -// Patch patches the iptables eip -func (c *IptablesEIPClient) Patch(original, modified *apiv1.IptablesEIP) *apiv1.IptablesEIP { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedIptablesEIP *apiv1.IptablesEIP - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - eip, err := c.IptablesEIPInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch iptables eip %q", original.Name) - } - patchedIptablesEIP = eip - return true, nil - }) - if err == nil { - return patchedIptablesEIP.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch iptables EIP %s", original.Name) - } - Failf("error occurred while retrying to patch iptables EIP %s: %v", original.Name, err) - - return nil -} - -// PatchSync patches the iptables eip and waits for the iptables eip to be ready for `timeout`. -// If the iptables eip doesn't become ready before the timeout, it will fail the test. -func (c *IptablesEIPClient) PatchSync(original, modified *apiv1.IptablesEIP, _ []string, timeout time.Duration) *apiv1.IptablesEIP { - ginkgo.GinkgoHelper() - - eip := c.Patch(original, modified) - ExpectTrue(c.WaitToBeUpdated(eip, timeout)) - ExpectTrue(c.WaitToBeReady(eip.Name, timeout)) - // Get the newest iptables eip after it becomes ready - return c.Get(eip.Name).DeepCopy() -} - -// PatchQoS patches the vpc nat gw and waits for the qos to be ready for `timeout`. -// If the qos doesn't become ready before the timeout, it will fail the test. -func (c *IptablesEIPClient) PatchQoSPolicySync(eipName, qosPolicyName string) *apiv1.IptablesEIP { - ginkgo.GinkgoHelper() - - eip := c.Get(eipName) - modifiedEIP := eip.DeepCopy() - modifiedEIP.Spec.QoSPolicy = qosPolicyName - _ = c.Patch(eip, modifiedEIP) - ExpectTrue(c.WaitToQoSReady(eipName)) - return c.Get(eipName).DeepCopy() -} - -// Delete deletes a iptables eip if the iptables eip exists -func (c *IptablesEIPClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.IptablesEIPInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete iptables eip %q: %v", name, err) - } -} - -// DeleteSync deletes the iptables eip and waits for the iptables eip to disappear for `timeout`. -// If the iptables eip doesn't disappear before the timeout, it will fail the test. -func (c *IptablesEIPClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for iptables eip %q to disappear", name) -} - -// WaitToBeReady returns whether the iptables eip is ready within timeout. -func (c *IptablesEIPClient) WaitToBeReady(name string, timeout time.Duration) bool { - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - eip := c.Get(name) - if eip.Status.Ready && eip.Status.IP != "" && eip.Spec.V4ip != "" { - Logf("eip %s is ready", name) - return true - } - Logf("eip %s is not ready", name) - } - return false -} - -// WaitToQoSReady returns whether the qos is ready within timeout. -func (c *IptablesEIPClient) WaitToQoSReady(name string) bool { - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - eip := c.Get(name) - if eip.Status.QoSPolicy == eip.Spec.QoSPolicy { - Logf("qos %s of eip %s is ready", eip.Spec.QoSPolicy, name) - return true - } - Logf("qos %s of eip %s is not ready", eip.Spec.QoSPolicy, name) - } - return false -} - -// WaitToBeUpdated returns whether the iptables eip is updated within timeout. -func (c *IptablesEIPClient) WaitToBeUpdated(eip *apiv1.IptablesEIP, timeout time.Duration) bool { - Logf("Waiting up to %v for iptables eip %s to be updated", timeout, eip.Name) - rv, _ := big.NewInt(0).SetString(eip.ResourceVersion, 10) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - s := c.Get(eip.Name) - if current, _ := big.NewInt(0).SetString(s.ResourceVersion, 10); current.Cmp(rv) > 0 { - return true - } - } - Logf("iptables eip %s was not updated within %v", eip.Name, timeout) - return false -} - -// WaitToDisappear waits the given timeout duration for the specified iptables eip to disappear. -func (c *IptablesEIPClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*apiv1.IptablesEIP, error) { - eip, err := c.IptablesEIPInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return eip, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected iptables EIP %s to not be found: %w", name, err) - } - return nil -} - -func MakeIptablesEIP(name, v4ip, v6ip, mac, natGwDp, externalSubnet, qosPolicyName string) *apiv1.IptablesEIP { - eip := &apiv1.IptablesEIP{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apiv1.IptablesEipSpec{ - V4ip: v4ip, - V6ip: v6ip, - MacAddress: mac, - NatGwDp: natGwDp, - }, - } - if externalSubnet != "" { - eip.Spec.ExternalSubnet = externalSubnet - } - eip.Spec.QoSPolicy = qosPolicyName - return eip -} diff --git a/test/e2e/framework/iptables-fip.go b/test/e2e/framework/iptables-fip.go deleted file mode 100644 index b3e420d7200..00000000000 --- a/test/e2e/framework/iptables-fip.go +++ /dev/null @@ -1,171 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "math/big" - "time" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// IptablesFIPClient is a struct for iptables fip client. -type IptablesFIPClient struct { - f *Framework - v1.IptablesFIPRuleInterface -} - -func (f *Framework) IptablesFIPClient() *IptablesFIPClient { - return &IptablesFIPClient{ - f: f, - IptablesFIPRuleInterface: f.KubeOVNClientSet.KubeovnV1().IptablesFIPRules(), - } -} - -func (c *IptablesFIPClient) Get(name string) *apiv1.IptablesFIPRule { - ginkgo.GinkgoHelper() - fip, err := c.IptablesFIPRuleInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return fip -} - -// Create creates a new iptables fip according to the framework specifications -func (c *IptablesFIPClient) Create(fip *apiv1.IptablesFIPRule) *apiv1.IptablesFIPRule { - ginkgo.GinkgoHelper() - fip, err := c.IptablesFIPRuleInterface.Create(context.TODO(), fip, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating iptables fip") - return fip.DeepCopy() -} - -// CreateSync creates a new iptables fip according to the framework specifications, and waits for it to be ready. -func (c *IptablesFIPClient) CreateSync(fip *apiv1.IptablesFIPRule) *apiv1.IptablesFIPRule { - ginkgo.GinkgoHelper() - - fip = c.Create(fip) - ExpectTrue(c.WaitToBeReady(fip.Name, timeout)) - // Get the newest iptables fip after it becomes ready - return c.Get(fip.Name).DeepCopy() -} - -// Patch patches the iptables fip -func (c *IptablesFIPClient) Patch(original, modified *apiv1.IptablesFIPRule) *apiv1.IptablesFIPRule { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedIptablesFIPRule *apiv1.IptablesFIPRule - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - fip, err := c.IptablesFIPRuleInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch iptables fip %q", original.Name) - } - patchedIptablesFIPRule = fip - return true, nil - }) - if err == nil { - return patchedIptablesFIPRule.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch iptables FIP rule %s", original.Name) - } - Failf("error occurred while retrying to patch iptables FIP rule %s: %v", original.Name, err) - - return nil -} - -// PatchSync patches the iptables fip and waits for the iptables fip to be ready for `timeout`. -// If the iptables fip doesn't become ready before the timeout, it will fail the test. -func (c *IptablesFIPClient) PatchSync(original, modified *apiv1.IptablesFIPRule, _ []string, timeout time.Duration) *apiv1.IptablesFIPRule { - ginkgo.GinkgoHelper() - - fip := c.Patch(original, modified) - ExpectTrue(c.WaitToBeUpdated(fip, timeout)) - ExpectTrue(c.WaitToBeReady(fip.Name, timeout)) - // Get the newest iptables fip after it becomes ready - return c.Get(fip.Name).DeepCopy() -} - -// Delete deletes a iptables fip if the iptables fip exists -func (c *IptablesFIPClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.IptablesFIPRuleInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete iptables fip %q: %v", name, err) - } -} - -// DeleteSync deletes the iptables fip and waits for the iptables fip to disappear for `timeout`. -// If the iptables fip doesn't disappear before the timeout, it will fail the test. -func (c *IptablesFIPClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for iptables fip %q to disappear", name) -} - -// WaitToBeReady returns whether the iptables fip is ready within timeout. -func (c *IptablesFIPClient) WaitToBeReady(name string, timeout time.Duration) bool { - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - if c.Get(name).Status.Ready { - Logf("fip %s is ready", name) - return true - } - Logf("fip %s is not ready", name) - } - return false -} - -// WaitToBeUpdated returns whether the iptables fip is updated within timeout. -func (c *IptablesFIPClient) WaitToBeUpdated(fip *apiv1.IptablesFIPRule, timeout time.Duration) bool { - Logf("Waiting up to %v for iptables fip %s to be updated", timeout, fip.Name) - rv, _ := big.NewInt(0).SetString(fip.ResourceVersion, 10) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - s := c.Get(fip.Name) - if current, _ := big.NewInt(0).SetString(s.ResourceVersion, 10); current.Cmp(rv) > 0 { - return true - } - } - Logf("iptables fip %s was not updated within %v", fip.Name, timeout) - return false -} - -// WaitToDisappear waits the given timeout duration for the specified iptables FIP rule to disappear. -func (c *IptablesFIPClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*apiv1.IptablesFIPRule, error) { - rule, err := c.IptablesFIPRuleInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return rule, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected iptables FIP rule %s to not be found: %w", name, err) - } - return nil -} - -func MakeIptablesFIPRule(name, eip, internalIP string) *apiv1.IptablesFIPRule { - fip := &apiv1.IptablesFIPRule{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apiv1.IptablesFIPRuleSpec{ - EIP: eip, - InternalIP: internalIP, - }, - } - return fip -} diff --git a/test/e2e/framework/iptables-snat.go b/test/e2e/framework/iptables-snat.go deleted file mode 100644 index 7c550334313..00000000000 --- a/test/e2e/framework/iptables-snat.go +++ /dev/null @@ -1,171 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "math/big" - "time" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// IptablesSnatClient is a struct for iptables snat client. -type IptablesSnatClient struct { - f *Framework - v1.IptablesSnatRuleInterface -} - -func (f *Framework) IptablesSnatClient() *IptablesSnatClient { - return &IptablesSnatClient{ - f: f, - IptablesSnatRuleInterface: f.KubeOVNClientSet.KubeovnV1().IptablesSnatRules(), - } -} - -func (c *IptablesSnatClient) Get(name string) *apiv1.IptablesSnatRule { - ginkgo.GinkgoHelper() - snat, err := c.IptablesSnatRuleInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return snat -} - -// Create creates a new iptables snat according to the framework specifications -func (c *IptablesSnatClient) Create(snat *apiv1.IptablesSnatRule) *apiv1.IptablesSnatRule { - ginkgo.GinkgoHelper() - snat, err := c.IptablesSnatRuleInterface.Create(context.TODO(), snat, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating iptables snat") - return snat.DeepCopy() -} - -// CreateSync creates a new iptables snat according to the framework specifications, and waits for it to be ready. -func (c *IptablesSnatClient) CreateSync(snat *apiv1.IptablesSnatRule) *apiv1.IptablesSnatRule { - ginkgo.GinkgoHelper() - - snat = c.Create(snat) - ExpectTrue(c.WaitToBeReady(snat.Name, timeout)) - // Get the newest iptables snat after it becomes ready - return c.Get(snat.Name).DeepCopy() -} - -// Patch patches the iptables snat -func (c *IptablesSnatClient) Patch(original, modified *apiv1.IptablesSnatRule) *apiv1.IptablesSnatRule { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedIptablesSnatRule *apiv1.IptablesSnatRule - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - snat, err := c.IptablesSnatRuleInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch iptables snat %q", original.Name) - } - patchedIptablesSnatRule = snat - return true, nil - }) - if err == nil { - return patchedIptablesSnatRule.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch iptables SNAT rule %s", original.Name) - } - Failf("error occurred while retrying to patch iptables SNAT rule %s: %v", original.Name, err) - - return nil -} - -// PatchSync patches the iptables snat and waits for the iptables snat to be ready for `timeout`. -// If the iptables snat doesn't become ready before the timeout, it will fail the test. -func (c *IptablesSnatClient) PatchSync(original, modified *apiv1.IptablesSnatRule, _ []string, timeout time.Duration) *apiv1.IptablesSnatRule { - ginkgo.GinkgoHelper() - - snat := c.Patch(original, modified) - ExpectTrue(c.WaitToBeUpdated(snat, timeout)) - ExpectTrue(c.WaitToBeReady(snat.Name, timeout)) - // Get the newest iptables snat after it becomes ready - return c.Get(snat.Name).DeepCopy() -} - -// Delete deletes a iptables snat if the iptables snat exists -func (c *IptablesSnatClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.IptablesSnatRuleInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete iptables snat %q: %v", name, err) - } -} - -// DeleteSync deletes the iptables snat and waits for the iptables snat to disappear for `timeout`. -// If the iptables snat doesn't disappear before the timeout, it will fail the test. -func (c *IptablesSnatClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for iptables snat %q to disappear", name) -} - -// WaitToBeReady returns whether the iptables snat is ready within timeout. -func (c *IptablesSnatClient) WaitToBeReady(name string, timeout time.Duration) bool { - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - if c.Get(name).Status.Ready { - Logf("snat %s is ready", name) - return true - } - Logf("snat %s is not ready", name) - } - return false -} - -// WaitToBeUpdated returns whether the iptables snat is updated within timeout. -func (c *IptablesSnatClient) WaitToBeUpdated(snat *apiv1.IptablesSnatRule, timeout time.Duration) bool { - Logf("Waiting up to %v for iptables snat %s to be updated", timeout, snat.Name) - rv, _ := big.NewInt(0).SetString(snat.ResourceVersion, 10) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - s := c.Get(snat.Name) - if current, _ := big.NewInt(0).SetString(s.ResourceVersion, 10); current.Cmp(rv) > 0 { - return true - } - } - Logf("iptables snat %s was not updated within %v", snat.Name, timeout) - return false -} - -// WaitToDisappear waits the given timeout duration for the specified iptables SNAT rule to disappear. -func (c *IptablesSnatClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*apiv1.IptablesSnatRule, error) { - rule, err := c.IptablesSnatRuleInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return rule, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected iptables SNAT rule %s to not be found: %w", name, err) - } - return nil -} - -func MakeIptablesSnatRule(name, eip, internalCIDR string) *apiv1.IptablesSnatRule { - snat := &apiv1.IptablesSnatRule{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apiv1.IptablesSnatRuleSpec{ - EIP: eip, - InternalCIDR: internalCIDR, - }, - } - return snat -} diff --git a/test/e2e/framework/iptables/iptables.go b/test/e2e/framework/iptables/iptables.go deleted file mode 100644 index 8dc7a04975c..00000000000 --- a/test/e2e/framework/iptables/iptables.go +++ /dev/null @@ -1,53 +0,0 @@ -package iptables - -import ( - "context" - "fmt" - "strings" - "time" - - corev1 "k8s.io/api/core/v1" - e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -func CheckIptablesRulesOnNode(f *framework.Framework, node, table, chain, protocol string, expectedRules []string, shouldExist bool) { - ovsPod := getOvsPodOnNode(f, node) - - iptBin := "iptables" - if protocol == apiv1.ProtocolIPv6 { - iptBin = "ip6tables" - } - - cmd := fmt.Sprintf(`%s -t %s -S `, iptBin, table) - if chain != "" { - cmd += chain - } - framework.WaitUntil(2*time.Second, time.Minute, func(_ context.Context) (bool, error) { - output := e2epodoutput.RunHostCmdOrDie(ovsPod.Namespace, ovsPod.Name, cmd) - rules := strings.Split(output, "\n") - for _, r := range expectedRules { - framework.Logf("checking rule %s", r) - ok, err := gomega.ContainElement(gomega.HavePrefix(r)).Match(rules) - if err != nil || ok != shouldExist { - return false, err - } - } - return true, nil - }, "") -} - -func getOvsPodOnNode(f *framework.Framework, node string) *corev1.Pod { - ginkgo.GinkgoHelper() - - daemonSetClient := f.DaemonSetClientNS(framework.KubeOvnNamespace) - ds := daemonSetClient.Get("ovs-ovn") - pod, err := daemonSetClient.GetPodOnNode(ds, node) - framework.ExpectNoError(err) - return pod -} diff --git a/test/e2e/framework/kind/kind.go b/test/e2e/framework/kind/kind.go deleted file mode 100644 index b19c39ade37..00000000000 --- a/test/e2e/framework/kind/kind.go +++ /dev/null @@ -1,179 +0,0 @@ -package kind - -import ( - "context" - "errors" - "net" - "net/url" - "slices" - "strings" - "time" - - "github.com/docker/docker/api/types" - "k8s.io/apimachinery/pkg/util/wait" - - "github.com/kubeovn/kube-ovn/test/e2e/framework" - "github.com/kubeovn/kube-ovn/test/e2e/framework/docker" - "github.com/kubeovn/kube-ovn/test/e2e/framework/iproute" -) - -const NetworkName = "kind" - -const ( - labelCluster = "io.x-k8s.kind.cluster" - labelRole = "io.x-k8s.kind.role" -) - -type Node struct { - types.Container -} - -func (n *Node) Name() string { - return strings.TrimPrefix(n.Names[0], "/") -} - -func (n *Node) Exec(cmd ...string) (stdout, stderr []byte, err error) { - return docker.Exec(n.ID, nil, cmd...) -} - -func (n *Node) NetworkConnect(networkID string) error { - for _, settings := range n.NetworkSettings.Networks { - if settings.NetworkID == networkID { - return nil - } - } - return docker.NetworkConnect(networkID, n.ID) -} - -func (n *Node) NetworkDisconnect(networkID string) error { - for _, settings := range n.NetworkSettings.Networks { - if settings.NetworkID == networkID { - return docker.NetworkDisconnect(networkID, n.ID) - } - } - return nil -} - -func (n *Node) ListLinks() ([]iproute.Link, error) { - return iproute.AddressShow("", n.Exec) -} - -func (n *Node) ListRoutes(nonLinkLocalUnicast bool) ([]iproute.Route, error) { - routes, err := iproute.RouteShow("", "", n.Exec) - if err != nil { - return nil, err - } - - if !nonLinkLocalUnicast { - return routes, nil - } - - result := make([]iproute.Route, 0, len(routes)) - for _, route := range routes { - if route.Dst == "default" { - result = append(result, route) - } - if ip := net.ParseIP(strings.Split(route.Dst, "/")[0]); !ip.IsLinkLocalUnicast() { - result = append(result, route) - } - } - return result, nil -} - -func (n *Node) WaitLinkToDisappear(linkName string, interval time.Duration, deadline time.Time) error { - err := wait.PollUntilContextTimeout(context.Background(), interval, time.Until(deadline), false, func(_ context.Context) (bool, error) { - framework.Logf("Waiting for link %s in node %s to disappear", linkName, n.Name()) - links, err := n.ListLinks() - if err != nil { - return false, err - } - for _, link := range links { - if link.IfName == linkName { - framework.Logf("link %s still exists", linkName) - return false, nil - } - } - framework.Logf("link %s no longer exists", linkName) - return true, nil - }) - if err == nil { - return nil - } - - if errors.Is(err, context.DeadlineExceeded) { - framework.Failf("timed out while waiting for link %s in node %s to disappear", linkName, n.Name()) - } - framework.Failf("error occurred while waiting for link %s in node %s to disappear: %v", linkName, n.Name(), err) - - return err -} - -func ListClusters() ([]string, error) { - filters := map[string][]string{"label": {labelCluster}} - nodeList, err := docker.ContainerList(filters) - if err != nil { - return nil, err - } - - var clusters []string - for _, node := range nodeList { - if cluster := node.Labels[labelCluster]; !slices.Contains(clusters, cluster) { - clusters = append(clusters, node.Labels[labelCluster]) - } - } - - return clusters, nil -} - -func ListNodes(cluster, role string) ([]Node, error) { - labels := []string{labelCluster + "=" + cluster} - if role != "" { - // control-plane or worker - labels = append(labels, labelRole+"="+role) - } - - filters := map[string][]string{"label": labels} - nodeList, err := docker.ContainerList(filters) - if err != nil { - return nil, err - } - - nodes := make([]Node, 0, len(nodeList)) - for _, node := range nodeList { - nodes = append(nodes, Node{node}) - } - - return nodes, nil -} - -func IsKindProvided(providerID string) (string, bool) { - // kind://docker/kube-ovn/kube-ovn-control-plane - u, err := url.Parse(providerID) - if err != nil || u.Scheme != "kind" || u.Host != "docker" { - return "", false - } - - fields := strings.Split(strings.Trim(u.Path, "/"), "/") - if len(fields) != 2 { - return "", false - } - return fields[0], true -} - -func NetworkConnect(networkID string, nodes []Node) error { - for _, node := range nodes { - if err := node.NetworkConnect(networkID); err != nil { - return err - } - } - return nil -} - -func NetworkDisconnect(networkID string, nodes []Node) error { - for _, node := range nodes { - if err := node.NetworkDisconnect(networkID); err != nil { - return err - } - } - return nil -} diff --git a/test/e2e/framework/kube-ovn.go b/test/e2e/framework/kube-ovn.go deleted file mode 100644 index 0d9c64b3b80..00000000000 --- a/test/e2e/framework/kube-ovn.go +++ /dev/null @@ -1,17 +0,0 @@ -package framework - -import ( - "context" - - "github.com/onsi/ginkgo/v2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" -) - -func GetKubeOvnImage(cs clientset.Interface) string { - ginkgo.GinkgoHelper() - ds, err := cs.AppsV1().DaemonSets(KubeOvnNamespace).Get(context.TODO(), DaemonSetOvsOvn, metav1.GetOptions{}) - ExpectNoError(err, "getting daemonset %s/%s", KubeOvnNamespace, DaemonSetOvsOvn) - ExpectNotNil(ds, "daemonset %s/%s not found", KubeOvnNamespace, DaemonSetOvsOvn) - return ds.Spec.Template.Spec.Containers[0].Image -} diff --git a/test/e2e/framework/kubectl.go b/test/e2e/framework/kubectl.go deleted file mode 100644 index 979a8facd0a..00000000000 --- a/test/e2e/framework/kubectl.go +++ /dev/null @@ -1,38 +0,0 @@ -package framework - -import ( - "fmt" - "strings" - - e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" -) - -func KubectlExec(namespace, name string, cmd ...string) (stdout, stderr []byte, err error) { - c := strings.Join(cmd, " ") - outStr, errStr, err := e2epodoutput.RunHostCmdWithFullOutput(namespace, name, c) - if err != nil { - return nil, nil, fmt.Errorf("failed to exec cmd %q in %s of namespace %s: %w\nstderr:\n%s", c, name, namespace, err, errStr) - } - - return []byte(outStr), []byte(errStr), nil -} - -func ovnExecSvc(db string, cmd ...string) (stdout, stderr []byte, err error) { - c := strings.Join(cmd, " ") - outStr, errStr, err := e2epodoutput.RunHostCmdWithFullOutput(KubeOvnNamespace, "svc/ovn-"+db, c) - if err != nil { - return nil, nil, fmt.Errorf("failed to exec ovn %s cmd %q: %w\nstderr:\n%s", db, c, err, errStr) - } - - return []byte(outStr), []byte(errStr), nil -} - -// NBExec executes the command in svc/ovn-nb and returns the result -func NBExec(cmd ...string) (stdout, stderr []byte, err error) { - return ovnExecSvc("nb", cmd...) -} - -// SBExec executes the command in svc/ovn-sb and returns the result -func SBExec(cmd ...string) (stdout, stderr []byte, err error) { - return ovnExecSvc("sb", cmd...) -} diff --git a/test/e2e/framework/log.go b/test/e2e/framework/log.go deleted file mode 100644 index 4c8e9ddaa49..00000000000 --- a/test/e2e/framework/log.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "bytes" - "fmt" - "regexp" - "runtime/debug" - "time" - - "github.com/onsi/ginkgo/v2" -) - -func nowStamp() string { - return time.Now().Format(time.StampMilli) -} - -func log(level, format string, args ...interface{}) { - fmt.Fprintf(ginkgo.GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...) -} - -// Logf logs the info. -func Logf(format string, args ...interface{}) { - log("INFO", format, args...) -} - -// Failf logs the fail info, including a stack trace starts with its direct caller -// (for example, for call chain f -> g -> Failf("foo", ...) error would be logged for "g"). -func Failf(format string, args ...interface{}) { - msg := fmt.Sprintf(format, args...) - skip := 1 - log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip)) - ginkgo.Fail(nowStamp()+": "+msg, skip) - panic("unreachable") -} - -// Fail is a replacement for ginkgo.Fail which logs the problem as it occurs -// together with a stack trace and then calls ginkgowrapper.Fail. -func Fail(msg string, callerSkip ...int) { - skip := 1 - if len(callerSkip) > 0 { - skip += callerSkip[0] - } - log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip)) - ginkgo.Fail(nowStamp()+": "+msg, skip) -} - -var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/v2/`) - -// PrunedStack is a wrapper around debug.Stack() that removes information -// about the current goroutine and optionally skips some of the initial stack entries. -// With skip == 0, the returned stack will start with the caller of PruneStack. -// From the remaining entries it automatically filters out useless ones like -// entries coming from Ginkgo. -// -// This is a modified copy of PruneStack in https://github.com/onsi/ginkgo/v2/blob/f90f37d87fa6b1dd9625e2b1e83c23ffae3de228/internal/codelocation/code_location.go#L25: -// - simplified API and thus renamed (calls debug.Stack() instead of taking a parameter) -// - source code filtering updated to be specific to Kubernetes -// - optimized to use bytes and in-place slice filtering from -// https://github.com/golang/go/wiki/SliceTricks#filter-in-place -func PrunedStack(skip int) []byte { - fullStackTrace := debug.Stack() - stack := bytes.Split(fullStackTrace, []byte("\n")) - // Ensure that the even entries are the method names and - // the odd entries the source code information. - if len(stack) > 0 && bytes.HasPrefix(stack[0], []byte("goroutine ")) { - // Ignore "goroutine 29 [running]:" line. - stack = stack[1:] - } - // The "+2" is for skipping over: - // - runtime/debug.Stack() - // - PrunedStack() - skip += 2 - if len(stack) > 2*skip { - stack = stack[2*skip:] - } - n := 0 - for i := 0; i < len(stack)/2; i++ { - // We filter out based on the source code file name. - if !codeFilterRE.Match(stack[i*2+1]) { - stack[n] = stack[i*2] - stack[n+1] = stack[i*2+1] - n += 2 - } - } - stack = stack[:n] - - return bytes.Join(stack, []byte("\n")) -} diff --git a/test/e2e/framework/namespace.go b/test/e2e/framework/namespace.go deleted file mode 100644 index 4812f5ab91d..00000000000 --- a/test/e2e/framework/namespace.go +++ /dev/null @@ -1,108 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "time" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// NamespaceClient is a struct for namespace client. -type NamespaceClient struct { - f *Framework - v1core.NamespaceInterface -} - -func (f *Framework) NamespaceClient() *NamespaceClient { - return &NamespaceClient{ - f: f, - NamespaceInterface: f.ClientSet.CoreV1().Namespaces(), - } -} - -func (c *NamespaceClient) Get(name string) *corev1.Namespace { - ginkgo.GinkgoHelper() - np, err := c.NamespaceInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return np -} - -// Create creates a new namespace according to the framework specifications -func (c *NamespaceClient) Create(ns *corev1.Namespace) *corev1.Namespace { - ginkgo.GinkgoHelper() - np, err := c.NamespaceInterface.Create(context.TODO(), ns, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating namespace") - return np.DeepCopy() -} - -func (c *NamespaceClient) Patch(original, modified *corev1.Namespace) *corev1.Namespace { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedNS *corev1.Namespace - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - ns, err := c.NamespaceInterface.Patch(ctx, original.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch namespace %s", original.Name) - } - patchedNS = ns - return true, nil - }) - if err == nil { - return patchedNS.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch namespace %s", original.Name) - } - Failf("error occurred while retrying to patch namespace %s: %v", original.Name, err) - - return nil -} - -// Delete deletes a namespace if the namespace exists -func (c *NamespaceClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.NamespaceInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete namespace %q: %v", name, err) - } -} - -// DeleteSync deletes the namespace and waits for the namespace to disappear for `timeout`. -// If the namespace doesn't disappear before the timeout, it will fail the test. -func (c *NamespaceClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for namespace %q to disappear", name) -} - -// WaitToDisappear waits the given timeout duration for the specified namespace to disappear. -func (c *NamespaceClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*corev1.Namespace, error) { - policy, err := c.NamespaceInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return policy, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected namespace %s to not be found: %w", name, err) - } - return nil -} diff --git a/test/e2e/framework/network-attachment-definition.go b/test/e2e/framework/network-attachment-definition.go deleted file mode 100644 index ecfe68939e1..00000000000 --- a/test/e2e/framework/network-attachment-definition.go +++ /dev/null @@ -1,67 +0,0 @@ -package framework - -import ( - "context" - - apiv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" - v1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/client/clientset/versioned/typed/k8s.cni.cncf.io/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/onsi/ginkgo/v2" -) - -// NetworkAttachmentDefinitionClient is a struct for nad client. -type NetworkAttachmentDefinitionClient struct { - f *Framework - v1.NetworkAttachmentDefinitionInterface -} - -func (f *Framework) NetworkAttachmentDefinitionClient() *NetworkAttachmentDefinitionClient { - return f.NetworkAttachmentDefinitionClientNS(f.Namespace.Name) -} - -func (f *Framework) NetworkAttachmentDefinitionClientNS(namespace string) *NetworkAttachmentDefinitionClient { - return &NetworkAttachmentDefinitionClient{ - f: f, - NetworkAttachmentDefinitionInterface: f.AttachNetClient.K8sCniCncfIoV1().NetworkAttachmentDefinitions(namespace), - } -} - -func (c *NetworkAttachmentDefinitionClient) Get(name string) *apiv1.NetworkAttachmentDefinition { - ginkgo.GinkgoHelper() - nad, err := c.NetworkAttachmentDefinitionInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return nad -} - -// Create creates a new nad according to the framework specifications -func (c *NetworkAttachmentDefinitionClient) Create(nad *apiv1.NetworkAttachmentDefinition) *apiv1.NetworkAttachmentDefinition { - ginkgo.GinkgoHelper() - nad, err := c.NetworkAttachmentDefinitionInterface.Create(context.TODO(), nad, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating nad") - return c.Get(nad.Name) -} - -// Delete deletes a nad if the nad exists -func (c *NetworkAttachmentDefinitionClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.NetworkAttachmentDefinitionInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if k8serrors.IsNotFound(err) { - return - } - ExpectNoError(err, "Error deleting nad") -} - -func MakeNetworkAttachmentDefinition(name, namespace, conf string) *apiv1.NetworkAttachmentDefinition { - nad := &apiv1.NetworkAttachmentDefinition{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: apiv1.NetworkAttachmentDefinitionSpec{ - Config: conf, - }, - } - return nad -} diff --git a/test/e2e/framework/network-policy.go b/test/e2e/framework/network-policy.go deleted file mode 100644 index 96802a16287..00000000000 --- a/test/e2e/framework/network-policy.go +++ /dev/null @@ -1,82 +0,0 @@ -package framework - -import ( - "context" - "fmt" - "time" - - netv1 "k8s.io/api/networking/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - v1net "k8s.io/client-go/kubernetes/typed/networking/v1" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" -) - -// NetworkPolicyClient is a struct for network policy client. -type NetworkPolicyClient struct { - f *Framework - v1net.NetworkPolicyInterface - namespace string -} - -func (f *Framework) NetworkPolicyClient() *NetworkPolicyClient { - return f.NetworkPolicyClientNS(f.Namespace.Name) -} - -func (f *Framework) NetworkPolicyClientNS(namespace string) *NetworkPolicyClient { - return &NetworkPolicyClient{ - f: f, - NetworkPolicyInterface: f.ClientSet.NetworkingV1().NetworkPolicies(namespace), - namespace: namespace, - } -} - -func (c *NetworkPolicyClient) Get(name string) *netv1.NetworkPolicy { - ginkgo.GinkgoHelper() - np, err := c.NetworkPolicyInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return np -} - -// Create creates a new network policy according to the framework specifications -func (c *NetworkPolicyClient) Create(netpol *netv1.NetworkPolicy) *netv1.NetworkPolicy { - ginkgo.GinkgoHelper() - np, err := c.NetworkPolicyInterface.Create(context.TODO(), netpol, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating network policy") - return np.DeepCopy() -} - -// Delete deletes a network policy if the network policy exists -func (c *NetworkPolicyClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.NetworkPolicyInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete network policy %q: %v", name, err) - } -} - -// DeleteSync deletes the network policy and waits for the network policy to disappear for `timeout`. -// If the network policy doesn't disappear before the timeout, it will fail the test. -func (c *NetworkPolicyClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for network policy %q to disappear", name) -} - -// WaitToDisappear waits the given timeout duration for the specified network policy to disappear. -func (c *NetworkPolicyClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*netv1.NetworkPolicy, error) { - policy, err := c.NetworkPolicyInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return policy, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected network policy %s to not be found: %w", name, err) - } - return nil -} diff --git a/test/e2e/framework/ovn-dnat.go b/test/e2e/framework/ovn-dnat.go deleted file mode 100644 index 3a8cb669da4..00000000000 --- a/test/e2e/framework/ovn-dnat.go +++ /dev/null @@ -1,179 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "math/big" - "time" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// OvnDnatRuleClient is a struct for ovn dnat client. -type OvnDnatRuleClient struct { - f *Framework - v1.OvnDnatRuleInterface -} - -func (f *Framework) OvnDnatRuleClient() *OvnDnatRuleClient { - return &OvnDnatRuleClient{ - f: f, - OvnDnatRuleInterface: f.KubeOVNClientSet.KubeovnV1().OvnDnatRules(), - } -} - -func (c *OvnDnatRuleClient) Get(name string) *apiv1.OvnDnatRule { - ginkgo.GinkgoHelper() - dnat, err := c.OvnDnatRuleInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return dnat -} - -// Create creates a new ovn dnat according to the framework specifications -func (c *OvnDnatRuleClient) Create(dnat *apiv1.OvnDnatRule) *apiv1.OvnDnatRule { - ginkgo.GinkgoHelper() - dnat, err := c.OvnDnatRuleInterface.Create(context.TODO(), dnat, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating ovn dnat") - return dnat.DeepCopy() -} - -// CreateSync creates a new ovn dnat according to the framework specifications, and waits for it to be ready. -func (c *OvnDnatRuleClient) CreateSync(dnat *apiv1.OvnDnatRule) *apiv1.OvnDnatRule { - ginkgo.GinkgoHelper() - - dnat = c.Create(dnat) - ExpectTrue(c.WaitToBeReady(dnat.Name, timeout)) - // Get the newest ovn dnat after it becomes ready - return c.Get(dnat.Name).DeepCopy() -} - -// Patch patches the ovn dnat -func (c *OvnDnatRuleClient) Patch(original, modified *apiv1.OvnDnatRule) *apiv1.OvnDnatRule { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedOvnDnatRule *apiv1.OvnDnatRule - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - dnat, err := c.OvnDnatRuleInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch ovn dnat %q", original.Name) - } - patchedOvnDnatRule = dnat - return true, nil - }) - if err == nil { - return patchedOvnDnatRule.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch OVN DNAT rule %s", original.Name) - } - Failf("error occurred while retrying to patch OVN DNAT rule %s: %v", original.Name, err) - - return nil -} - -// PatchSync patches the ovn dnat and waits for the ovn dnat to be ready for `timeout`. -// If the ovn dnat doesn't become ready before the timeout, it will fail the test. -func (c *OvnDnatRuleClient) PatchSync(original, modified *apiv1.OvnDnatRule, _ []string, timeout time.Duration) *apiv1.OvnDnatRule { - ginkgo.GinkgoHelper() - - dnat := c.Patch(original, modified) - ExpectTrue(c.WaitToBeUpdated(dnat, timeout)) - ExpectTrue(c.WaitToBeReady(dnat.Name, timeout)) - // Get the newest ovn dnat after it becomes ready - return c.Get(dnat.Name).DeepCopy() -} - -// Delete deletes a ovn dnat if the ovn dnat exists -func (c *OvnDnatRuleClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.OvnDnatRuleInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete ovn dnat %q: %v", name, err) - } -} - -// DeleteSync deletes the ovn dnat and waits for the ovn dnat to disappear for `timeout`. -// If the ovn dnat doesn't disappear before the timeout, it will fail the test. -func (c *OvnDnatRuleClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for ovn dnat %q to disappear", name) -} - -// WaitToBeReady returns whether the ovn dnat is ready within timeout. -func (c *OvnDnatRuleClient) WaitToBeReady(name string, timeout time.Duration) bool { - Logf("Waiting up to %v for ovn dnat %s to be ready", timeout, name) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - if c.Get(name).Status.Ready { - Logf("ovn dnat %s is ready", name) - return true - } - Logf("ovn dnat %s is not ready", name) - } - Logf("ovn dnat %s was not ready within %v", name, timeout) - return false -} - -// WaitToBeUpdated returns whether the ovn dnat is updated within timeout. -func (c *OvnDnatRuleClient) WaitToBeUpdated(dnat *apiv1.OvnDnatRule, timeout time.Duration) bool { - Logf("Waiting up to %v for ovn dnat %s to be updated", timeout, dnat.Name) - rv, _ := big.NewInt(0).SetString(dnat.ResourceVersion, 10) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - s := c.Get(dnat.Name) - if current, _ := big.NewInt(0).SetString(s.ResourceVersion, 10); current.Cmp(rv) > 0 { - return true - } - } - Logf("ovn dnat %s was not updated within %v", dnat.Name, timeout) - return false -} - -// WaitToDisappear waits the given timeout duration for the specified ovn dnat to disappear. -func (c *OvnDnatRuleClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*apiv1.OvnDnatRule, error) { - rule, err := c.OvnDnatRuleInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return rule, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected OVN DNAT rule %s to not be found: %w", name, err) - } - return nil -} - -func MakeOvnDnatRule(name, ovnEip, ipType, ipName, vpc, v4Ip, internalPort, externalPort, protocol string) *apiv1.OvnDnatRule { - dnat := &apiv1.OvnDnatRule{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apiv1.OvnDnatRuleSpec{ - OvnEip: ovnEip, - IPType: ipType, - IPName: ipName, - Vpc: vpc, - V4Ip: v4Ip, - InternalPort: internalPort, - ExternalPort: externalPort, - Protocol: protocol, - }, - } - return dnat -} diff --git a/test/e2e/framework/ovn-eip.go b/test/e2e/framework/ovn-eip.go deleted file mode 100644 index 945b436de56..00000000000 --- a/test/e2e/framework/ovn-eip.go +++ /dev/null @@ -1,176 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "math/big" - "time" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// OvnEipClient is a struct for ovn eip client. -type OvnEipClient struct { - f *Framework - v1.OvnEipInterface -} - -func (f *Framework) OvnEipClient() *OvnEipClient { - return &OvnEipClient{ - f: f, - OvnEipInterface: f.KubeOVNClientSet.KubeovnV1().OvnEips(), - } -} - -func (c *OvnEipClient) Get(name string) *apiv1.OvnEip { - ginkgo.GinkgoHelper() - eip, err := c.OvnEipInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return eip -} - -// Create creates a new ovn eip according to the framework specifications -func (c *OvnEipClient) Create(eip *apiv1.OvnEip) *apiv1.OvnEip { - ginkgo.GinkgoHelper() - eip, err := c.OvnEipInterface.Create(context.TODO(), eip, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating ovn eip") - return eip.DeepCopy() -} - -// CreateSync creates a new ovn eip according to the framework specifications, and waits for it to be ready. -func (c *OvnEipClient) CreateSync(eip *apiv1.OvnEip) *apiv1.OvnEip { - ginkgo.GinkgoHelper() - - eip = c.Create(eip) - ExpectTrue(c.WaitToBeReady(eip.Name, timeout)) - // Get the newest ovn eip after it becomes ready - return c.Get(eip.Name).DeepCopy() -} - -// Patch patches the ovn eip -func (c *OvnEipClient) Patch(original, modified *apiv1.OvnEip) *apiv1.OvnEip { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedOvnEip *apiv1.OvnEip - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - eip, err := c.OvnEipInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch ovn eip %q", original.Name) - } - patchedOvnEip = eip - return true, nil - }) - if err == nil { - return patchedOvnEip.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch OVN EIP %s", original.Name) - } - Failf("error occurred while retrying to patch OVN EIP %s: %v", original.Name, err) - - return nil -} - -// PatchSync patches the ovn eip and waits for the ovn eip to be ready for `timeout`. -// If the ovn eip doesn't become ready before the timeout, it will fail the test. -func (c *OvnEipClient) PatchSync(original, modified *apiv1.OvnEip, _ []string, timeout time.Duration) *apiv1.OvnEip { - ginkgo.GinkgoHelper() - - eip := c.Patch(original, modified) - ExpectTrue(c.WaitToBeUpdated(eip, timeout)) - ExpectTrue(c.WaitToBeReady(eip.Name, timeout)) - // Get the newest ovn eip after it becomes ready - return c.Get(eip.Name).DeepCopy() -} - -// Delete deletes a ovn eip if the ovn eip exists -func (c *OvnEipClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.OvnEipInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete ovn eip %q: %v", name, err) - } -} - -// DeleteSync deletes the ovn eip and waits for the ovn eip to disappear for `timeout`. -// If the ovn eip doesn't disappear before the timeout, it will fail the test. -func (c *OvnEipClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for ovn eip %q to disappear", name) -} - -// WaitToBeReady returns whether the ovn eip is ready within timeout. -func (c *OvnEipClient) WaitToBeReady(name string, timeout time.Duration) bool { - Logf("Waiting up to %v for ovn eip %s to be ready", timeout, name) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - if c.Get(name).Status.Ready { - Logf("ovn eip %s is ready", name) - return true - } - Logf("ovn eip %s is not ready", name) - } - Logf("ovn eip %s was not ready within %v", name, timeout) - return false -} - -// WaitToBeUpdated returns whether the ovn eip is updated within timeout. -func (c *OvnEipClient) WaitToBeUpdated(eip *apiv1.OvnEip, timeout time.Duration) bool { - Logf("Waiting up to %v for ovn eip %s to be updated", timeout, eip.Name) - rv, _ := big.NewInt(0).SetString(eip.ResourceVersion, 10) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - s := c.Get(eip.Name) - if current, _ := big.NewInt(0).SetString(s.ResourceVersion, 10); current.Cmp(rv) > 0 { - return true - } - } - Logf("ovn eip %s was not updated within %v", eip.Name, timeout) - return false -} - -// WaitToDisappear waits the given timeout duration for the specified OVN EIP to disappear. -func (c *OvnEipClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*apiv1.OvnEip, error) { - eip, err := c.OvnEipInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return eip, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected OVN EIP %s to not be found: %w", name, err) - } - return nil -} - -func MakeOvnEip(name, subnet, v4ip, v6ip, mac, usage string) *apiv1.OvnEip { - eip := &apiv1.OvnEip{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apiv1.OvnEipSpec{ - ExternalSubnet: subnet, - V4Ip: v4ip, - V6Ip: v6ip, - MacAddress: mac, - Type: usage, - }, - } - return eip -} diff --git a/test/e2e/framework/ovn-fip.go b/test/e2e/framework/ovn-fip.go deleted file mode 100644 index 8edfea2d3f3..00000000000 --- a/test/e2e/framework/ovn-fip.go +++ /dev/null @@ -1,176 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "math/big" - "time" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// OvnFipClient is a struct for ovn fip client. -type OvnFipClient struct { - f *Framework - v1.OvnFipInterface -} - -func (f *Framework) OvnFipClient() *OvnFipClient { - return &OvnFipClient{ - f: f, - OvnFipInterface: f.KubeOVNClientSet.KubeovnV1().OvnFips(), - } -} - -func (c *OvnFipClient) Get(name string) *apiv1.OvnFip { - ginkgo.GinkgoHelper() - fip, err := c.OvnFipInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return fip -} - -// Create creates a new ovn fip according to the framework specifications -func (c *OvnFipClient) Create(fip *apiv1.OvnFip) *apiv1.OvnFip { - ginkgo.GinkgoHelper() - fip, err := c.OvnFipInterface.Create(context.TODO(), fip, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating ovn fip") - return fip.DeepCopy() -} - -// CreateSync creates a new ovn fip according to the framework specifications, and waits for it to be ready. -func (c *OvnFipClient) CreateSync(fip *apiv1.OvnFip) *apiv1.OvnFip { - ginkgo.GinkgoHelper() - - fip = c.Create(fip) - ExpectTrue(c.WaitToBeReady(fip.Name, timeout)) - // Get the newest ovn fip after it becomes ready - return c.Get(fip.Name).DeepCopy() -} - -// Patch patches the ovn fip -func (c *OvnFipClient) Patch(original, modified *apiv1.OvnFip) *apiv1.OvnFip { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedOvnFip *apiv1.OvnFip - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - fip, err := c.OvnFipInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch ovn fip %q", original.Name) - } - patchedOvnFip = fip - return true, nil - }) - if err == nil { - return patchedOvnFip.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch OVN FIP %s", original.Name) - } - Failf("error occurred while retrying to patch OVN FIP %s: %v", original.Name, err) - - return nil -} - -// PatchSync patches the ovn fip and waits for the ovn fip to be ready for `timeout`. -// If the ovn fip doesn't become ready before the timeout, it will fail the test. -func (c *OvnFipClient) PatchSync(original, modified *apiv1.OvnFip, _ []string, timeout time.Duration) *apiv1.OvnFip { - ginkgo.GinkgoHelper() - - fip := c.Patch(original, modified) - ExpectTrue(c.WaitToBeUpdated(fip, timeout)) - ExpectTrue(c.WaitToBeReady(fip.Name, timeout)) - // Get the newest ovn fip after it becomes ready - return c.Get(fip.Name).DeepCopy() -} - -// Delete deletes a ovn fip if the ovn fip exists -func (c *OvnFipClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.OvnFipInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete ovn fip %q: %v", name, err) - } -} - -// DeleteSync deletes the ovn fip and waits for the ovn fip to disappear for `timeout`. -// If the ovn fip doesn't disappear before the timeout, it will fail the test. -func (c *OvnFipClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for ovn fip %q to disappear", name) -} - -// WaitToBeReady returns whether the ovn fip is ready within timeout. -func (c *OvnFipClient) WaitToBeReady(name string, timeout time.Duration) bool { - Logf("Waiting up to %v for ovn fip %s to be ready", timeout, name) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - if c.Get(name).Status.Ready { - Logf("ovn fip %s is ready", name) - return true - } - Logf("ovn fip %s is not ready", name) - } - Logf("ovn fip %s was not ready within %v", name, timeout) - return false -} - -// WaitToBeUpdated returns whether the ovn fip is updated within timeout. -func (c *OvnFipClient) WaitToBeUpdated(fip *apiv1.OvnFip, timeout time.Duration) bool { - Logf("Waiting up to %v for ovn fip %s to be updated", timeout, fip.Name) - rv, _ := big.NewInt(0).SetString(fip.ResourceVersion, 10) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - s := c.Get(fip.Name) - if current, _ := big.NewInt(0).SetString(s.ResourceVersion, 10); current.Cmp(rv) > 0 { - return true - } - } - Logf("ovn fip %s was not updated within %v", fip.Name, timeout) - return false -} - -// WaitToDisappear waits the given timeout duration for the specified ovn fip to disappear. -func (c *OvnFipClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*apiv1.OvnFip, error) { - fip, err := c.OvnFipInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return fip, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected OVN FIP %s to not be found: %w", name, err) - } - return nil -} - -func MakeOvnFip(name, ovnEip, ipType, ipName, vpc, v4Ip string) *apiv1.OvnFip { - fip := &apiv1.OvnFip{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apiv1.OvnFipSpec{ - OvnEip: ovnEip, - IPType: ipType, - IPName: ipName, - Vpc: vpc, - V4Ip: v4Ip, - }, - } - return fip -} diff --git a/test/e2e/framework/ovn-snat.go b/test/e2e/framework/ovn-snat.go deleted file mode 100644 index 95a94b46bd5..00000000000 --- a/test/e2e/framework/ovn-snat.go +++ /dev/null @@ -1,176 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "math/big" - "time" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// OvnSnatRuleClient is a struct for ovn snat client. -type OvnSnatRuleClient struct { - f *Framework - v1.OvnSnatRuleInterface -} - -func (f *Framework) OvnSnatRuleClient() *OvnSnatRuleClient { - return &OvnSnatRuleClient{ - f: f, - OvnSnatRuleInterface: f.KubeOVNClientSet.KubeovnV1().OvnSnatRules(), - } -} - -func (c *OvnSnatRuleClient) Get(name string) *apiv1.OvnSnatRule { - ginkgo.GinkgoHelper() - snat, err := c.OvnSnatRuleInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return snat -} - -// Create creates a new ovn snat according to the framework specifications -func (c *OvnSnatRuleClient) Create(snat *apiv1.OvnSnatRule) *apiv1.OvnSnatRule { - ginkgo.GinkgoHelper() - snat, err := c.OvnSnatRuleInterface.Create(context.TODO(), snat, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating ovn snat") - return snat.DeepCopy() -} - -// CreateSync creates a new ovn snat according to the framework specifications, and waits for it to be ready. -func (c *OvnSnatRuleClient) CreateSync(snat *apiv1.OvnSnatRule) *apiv1.OvnSnatRule { - ginkgo.GinkgoHelper() - - snat = c.Create(snat) - ExpectTrue(c.WaitToBeReady(snat.Name, timeout)) - // Get the newest ovn snat after it becomes ready - return c.Get(snat.Name).DeepCopy() -} - -// Patch patches the ovn snat -func (c *OvnSnatRuleClient) Patch(original, modified *apiv1.OvnSnatRule) *apiv1.OvnSnatRule { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedOvnSnatRule *apiv1.OvnSnatRule - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - snat, err := c.OvnSnatRuleInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch ovn snat %q", original.Name) - } - patchedOvnSnatRule = snat - return true, nil - }) - if err == nil { - return patchedOvnSnatRule.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch OVN SNAT rule %s", original.Name) - } - Failf("error occurred while retrying to patch OVN SNAT rule %s: %v", original.Name, err) - - return nil -} - -// PatchSync patches the ovn snat and waits for the ovn snat to be ready for `timeout`. -// If the ovn snat doesn't become ready before the timeout, it will fail the test. -func (c *OvnSnatRuleClient) PatchSync(original, modified *apiv1.OvnSnatRule, _ []string, timeout time.Duration) *apiv1.OvnSnatRule { - ginkgo.GinkgoHelper() - - snat := c.Patch(original, modified) - ExpectTrue(c.WaitToBeUpdated(snat, timeout)) - ExpectTrue(c.WaitToBeReady(snat.Name, timeout)) - // Get the newest ovn snat after it becomes ready - return c.Get(snat.Name).DeepCopy() -} - -// Delete deletes a ovn snat if the ovn snat exists -func (c *OvnSnatRuleClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.OvnSnatRuleInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete ovn snat %q: %v", name, err) - } -} - -// DeleteSync deletes the ovn snat and waits for the ovn snat to disappear for `timeout`. -// If the ovn snat doesn't disappear before the timeout, it will fail the test. -func (c *OvnSnatRuleClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for ovn snat %q to disappear", name) -} - -// WaitToBeReady returns whether the ovn snat is ready within timeout. -func (c *OvnSnatRuleClient) WaitToBeReady(name string, timeout time.Duration) bool { - Logf("Waiting up to %v for ovn snat %s to be ready", timeout, name) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - if c.Get(name).Status.Ready { - Logf("ovn snat %s is ready", name) - return true - } - Logf("ovn snat %s is not ready", name) - } - Logf("ovn snat %s was not ready within %v", name, timeout) - return false -} - -// WaitToBeUpdated returns whether the ovn snat is updated within timeout. -func (c *OvnSnatRuleClient) WaitToBeUpdated(snat *apiv1.OvnSnatRule, timeout time.Duration) bool { - Logf("Waiting up to %v for ovn snat %s to be updated", timeout, snat.Name) - rv, _ := big.NewInt(0).SetString(snat.ResourceVersion, 10) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - s := c.Get(snat.Name) - if current, _ := big.NewInt(0).SetString(s.ResourceVersion, 10); current.Cmp(rv) > 0 { - return true - } - } - Logf("ovn snat %s was not updated within %v", snat.Name, timeout) - return false -} - -// WaitToDisappear waits the given timeout duration for the specified OVN SNAT rule to disappear. -func (c *OvnSnatRuleClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*apiv1.OvnSnatRule, error) { - rule, err := c.OvnSnatRuleInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return rule, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected OVN SNAT rule %s to not be found: %w", name, err) - } - return nil -} - -func MakeOvnSnatRule(name, ovnEip, vpcSubnet, ipName, vpc, v4IpCidr string) *apiv1.OvnSnatRule { - snat := &apiv1.OvnSnatRule{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apiv1.OvnSnatRuleSpec{ - OvnEip: ovnEip, - VpcSubnet: vpcSubnet, - IPName: ipName, - Vpc: vpc, - V4IpCidr: v4IpCidr, - }, - } - return snat -} diff --git a/test/e2e/framework/pod.go b/test/e2e/framework/pod.go deleted file mode 100644 index bc37c4a9863..00000000000 --- a/test/e2e/framework/pod.go +++ /dev/null @@ -1,144 +0,0 @@ -package framework - -import ( - "context" - "errors" - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - e2epod "k8s.io/kubernetes/test/e2e/framework/pod" - psaapi "k8s.io/pod-security-admission/api" - "k8s.io/utils/ptr" - - "github.com/onsi/ginkgo/v2" - - "github.com/kubeovn/kube-ovn/pkg/util" -) - -type PodClient struct { - f *Framework - *e2epod.PodClient - namespace string -} - -func (f *Framework) PodClient() *PodClient { - return f.PodClientNS(f.Namespace.Name) -} - -func (f *Framework) PodClientNS(namespace string) *PodClient { - return &PodClient{f, e2epod.PodClientNS(f.Framework, namespace), namespace} -} - -func (c *PodClient) GetPod(name string) *corev1.Pod { - ginkgo.GinkgoHelper() - pod, err := c.PodInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return pod -} - -func (c *PodClient) Create(pod *corev1.Pod) *corev1.Pod { - ginkgo.GinkgoHelper() - return c.PodClient.Create(context.Background(), pod) -} - -func (c *PodClient) CreateSync(pod *corev1.Pod) *corev1.Pod { - ginkgo.GinkgoHelper() - return c.PodClient.CreateSync(context.Background(), pod) -} - -func (c *PodClient) Delete(name string) error { - ginkgo.GinkgoHelper() - return c.PodClient.Delete(context.Background(), name, metav1.DeleteOptions{}) -} - -func (c *PodClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.PodClient.DeleteSync(context.Background(), name, metav1.DeleteOptions{GracePeriodSeconds: ptr.To(int64(1))}, timeout) -} - -func (c *PodClient) Patch(original, modified *corev1.Pod) *corev1.Pod { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedPod *corev1.Pod - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - p, err := c.PodInterface.Patch(ctx, original.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch pod %s/%s", original.Namespace, original.Name) - } - patchedPod = p - return true, nil - }) - if err == nil { - return patchedPod.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch pod %s/%s", original.Namespace, original.Name) - } - Failf("error occurred while retrying to patch pod %s/%s: %v", original.Namespace, original.Name, err) - - return nil -} - -func (c *PodClient) WaitForRunning(name string) { - ginkgo.GinkgoHelper() - err := e2epod.WaitTimeoutForPodRunningInNamespace(context.TODO(), c.f.ClientSet, name, c.namespace, timeout) - ExpectNoError(err) -} - -func (c *PodClient) WaitForNotFound(name string) { - ginkgo.GinkgoHelper() - err := e2epod.WaitForPodNotFoundInNamespace(context.TODO(), c.f.ClientSet, name, c.namespace, timeout) - ExpectNoError(err) -} - -func makePod(ns, name string, labels, annotations map[string]string, image string, command, args []string, securityLevel psaapi.Level) *corev1.Pod { - if image == "" { - image = PauseImage - } - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - Labels: labels, - Annotations: annotations, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "container", - Image: image, - ImagePullPolicy: corev1.PullIfNotPresent, - Command: command, - Args: args, - SecurityContext: e2epod.GenerateContainerSecurityContext(securityLevel), - }, - }, - SecurityContext: e2epod.GeneratePodSecurityContext(nil, nil), - TerminationGracePeriodSeconds: ptr.To(int64(3)), - }, - } - if securityLevel == psaapi.LevelRestricted { - pod = e2epod.MustMixinRestrictedPodSecurity(pod) - } - return pod -} - -func MakePod(ns, name string, labels, annotations map[string]string, image string, command, args []string) *corev1.Pod { - return makePod(ns, name, labels, annotations, image, command, args, psaapi.LevelBaseline) -} - -func MakeRestrictedPod(ns, name string, labels, annotations map[string]string, image string, command, args []string) *corev1.Pod { - return makePod(ns, name, labels, annotations, image, command, args, psaapi.LevelRestricted) -} - -func MakePrivilegedPod(ns, name string, labels, annotations map[string]string, image string, command, args []string) *corev1.Pod { - return makePod(ns, name, labels, annotations, image, command, args, psaapi.LevelPrivileged) -} diff --git a/test/e2e/framework/provider-network.go b/test/e2e/framework/provider-network.go deleted file mode 100644 index 76fdc007e7b..00000000000 --- a/test/e2e/framework/provider-network.go +++ /dev/null @@ -1,219 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "math/big" - "time" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// ProviderNetworkClient is a struct for provider network client. -type ProviderNetworkClient struct { - f *Framework - v1.ProviderNetworkInterface -} - -func (f *Framework) ProviderNetworkClient() *ProviderNetworkClient { - return &ProviderNetworkClient{ - f: f, - ProviderNetworkInterface: f.KubeOVNClientSet.KubeovnV1().ProviderNetworks(), - } -} - -func (c *ProviderNetworkClient) Get(name string) *apiv1.ProviderNetwork { - ginkgo.GinkgoHelper() - pn, err := c.ProviderNetworkInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return pn -} - -// Create creates a new provider network according to the framework specifications -func (c *ProviderNetworkClient) Create(pn *apiv1.ProviderNetwork) *apiv1.ProviderNetwork { - ginkgo.GinkgoHelper() - pn, err := c.ProviderNetworkInterface.Create(context.TODO(), pn, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating provider network") - return pn.DeepCopy() -} - -// CreateSync creates a new provider network according to the framework specifications, and waits for it to be ready. -func (c *ProviderNetworkClient) CreateSync(pn *apiv1.ProviderNetwork) *apiv1.ProviderNetwork { - ginkgo.GinkgoHelper() - - pn = c.Create(pn) - ExpectTrue(c.WaitToBeReady(pn.Name, timeout)) - // Get the newest provider network after it becomes ready - return c.Get(pn.Name).DeepCopy() -} - -// Patch patches the provider network -func (c *ProviderNetworkClient) Patch(original, modified *apiv1.ProviderNetwork) *apiv1.ProviderNetwork { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedProviderNetwork *apiv1.ProviderNetwork - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - pn, err := c.ProviderNetworkInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch provider network %q", original.Name) - } - patchedProviderNetwork = pn - return true, nil - }) - if err == nil { - return patchedProviderNetwork.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch provider network %s", original.Name) - } - Failf("error occurred while retrying to patch provider network %s: %v", original.Name, err) - - return nil -} - -// PatchSync patches the provider network and waits for the provider network to be ready for `timeout`. -// If the provider network doesn't become ready before the timeout, it will fail the test. -func (c *ProviderNetworkClient) PatchSync(original, modified *apiv1.ProviderNetwork, _ []string, timeout time.Duration) *apiv1.ProviderNetwork { - ginkgo.GinkgoHelper() - - pn := c.Patch(original, modified) - ExpectTrue(c.WaitToBeUpdated(pn, timeout)) - ExpectTrue(c.WaitToBeReady(pn.Name, timeout)) - // Get the newest subnet after it becomes ready - return c.Get(pn.Name).DeepCopy() -} - -// Delete deletes a provider network if the provider network exists -func (c *ProviderNetworkClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.ProviderNetworkInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete provider network %q: %v", name, err) - } -} - -// DeleteSync deletes the provider network and waits for the provider network to disappear for `timeout`. -// If the provider network doesn't disappear before the timeout, it will fail the test. -func (c *ProviderNetworkClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for provider network %q to disappear", name) -} - -func isProviderNetworkConditionSetAsExpected(pn *apiv1.ProviderNetwork, node string, conditionType apiv1.ConditionType, wantTrue, silent bool) bool { - for _, cond := range pn.Status.Conditions { - if cond.Node == node && cond.Type == conditionType { - if (wantTrue && (cond.Status == corev1.ConditionTrue)) || (!wantTrue && (cond.Status != corev1.ConditionTrue)) { - return true - } - if !silent { - Logf("Condition %s for node %s of provider network %s is %v instead of %t. Reason: %v, message: %v", - conditionType, node, pn.Name, cond.Status == corev1.ConditionTrue, wantTrue, cond.Reason, cond.Message) - } - return false - } - } - if !silent { - Logf("Couldn't find condition %v of node %s on provider network %v", conditionType, node, pn.Name) - } - return false -} - -// IsProviderNetworkConditionSetAsExpected returns a wantTrue value if the subnet has a match to the conditionType, -// otherwise returns an opposite value of the wantTrue with detailed logging. -func IsProviderNetworkConditionSetAsExpected(pn *apiv1.ProviderNetwork, node string, conditionType apiv1.ConditionType, wantTrue bool) bool { - return isProviderNetworkConditionSetAsExpected(pn, node, conditionType, wantTrue, false) -} - -// WaitConditionToBe returns whether provider network "name's" condition state matches wantTrue -// within timeout. If wantTrue is true, it will ensure the provider network condition status is -// ConditionTrue; if it's false, it ensures the provider network condition is in any state other -// than ConditionTrue (e.g. not true or unknown). -func (c *ProviderNetworkClient) WaitConditionToBe(name, node string, conditionType apiv1.ConditionType, wantTrue bool, deadline time.Time) bool { - timeout := time.Until(deadline) - Logf("Waiting up to %v for provider network %s condition %s of node %s to be %t", timeout, name, conditionType, node, wantTrue) - for ; time.Now().Before(deadline); time.Sleep(poll) { - if pn := c.Get(name); IsProviderNetworkConditionSetAsExpected(pn, node, conditionType, wantTrue) { - return true - } - } - Logf("ProviderNetwork %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout) - return false -} - -// WaitToBeReady returns whether the provider network is ready within timeout. -func (c *ProviderNetworkClient) WaitToBeReady(name string, timeout time.Duration) bool { - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - if c.Get(name).Status.Ready { - return true - } - } - return false -} - -// WaitToBeUpdated returns whether the provider network is updated within timeout. -func (c *ProviderNetworkClient) WaitToBeUpdated(pn *apiv1.ProviderNetwork, timeout time.Duration) bool { - Logf("Waiting up to %v for provider network %s to be updated", timeout, pn.Name) - rv, _ := big.NewInt(0).SetString(pn.ResourceVersion, 10) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - s := c.Get(pn.Name) - if current, _ := big.NewInt(0).SetString(s.ResourceVersion, 10); current.Cmp(rv) > 0 { - return true - } - } - Logf("ProviderNetwork %s was not updated within %v", pn.Name, timeout) - return false -} - -// WaitToDisappear waits the given timeout duration for the specified provider network to disappear. -func (c *ProviderNetworkClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*apiv1.ProviderNetwork, error) { - pn, err := c.ProviderNetworkInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return pn, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected provider network %s to not be found: %w", name, err) - } - return nil -} - -func MakeProviderNetwork(name string, exchangeLinkName bool, defaultInterface string, customInterfaces map[string][]string, excludeNodes []string) *apiv1.ProviderNetwork { - pn := &apiv1.ProviderNetwork{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apiv1.ProviderNetworkSpec{ - DefaultInterface: defaultInterface, - ExcludeNodes: excludeNodes, - ExchangeLinkName: exchangeLinkName, - }, - } - for iface, nodes := range customInterfaces { - ci := apiv1.CustomInterface{ - Interface: iface, - Nodes: nodes, - } - pn.Spec.CustomInterfaces = append(pn.Spec.CustomInterfaces, ci) - } - return pn -} diff --git a/test/e2e/framework/qos-policy.go b/test/e2e/framework/qos-policy.go deleted file mode 100644 index 5cb2696e0ab..00000000000 --- a/test/e2e/framework/qos-policy.go +++ /dev/null @@ -1,305 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "math/big" - "reflect" - "sort" - "time" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// QoSPolicyClient is a struct for qosPolicy client. -type QoSPolicyClient struct { - f *Framework - v1.QoSPolicyInterface -} - -func (f *Framework) QoSPolicyClient() *QoSPolicyClient { - return &QoSPolicyClient{ - f: f, - QoSPolicyInterface: f.KubeOVNClientSet.KubeovnV1().QoSPolicies(), - } -} - -func (c *QoSPolicyClient) Get(name string) *apiv1.QoSPolicy { - ginkgo.GinkgoHelper() - qosPolicy, err := c.QoSPolicyInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return qosPolicy -} - -// Create creates a new qosPolicy according to the framework specifications -func (c *QoSPolicyClient) Create(qosPolicy *apiv1.QoSPolicy) *apiv1.QoSPolicy { - ginkgo.GinkgoHelper() - s, err := c.QoSPolicyInterface.Create(context.TODO(), qosPolicy, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating qosPolicy") - return s.DeepCopy() -} - -// CreateSync creates a new qosPolicy according to the framework specifications, and waits for it to be ready. -func (c *QoSPolicyClient) CreateSync(qosPolicy *apiv1.QoSPolicy) *apiv1.QoSPolicy { - ginkgo.GinkgoHelper() - - s := c.Create(qosPolicy) - ExpectTrue(c.WaitToQoSReady(s.Name)) - // Get the newest qosPolicy after it becomes ready - return c.Get(s.Name).DeepCopy() -} - -// Update updates the qosPolicy -func (c *QoSPolicyClient) Update(qosPolicy *apiv1.QoSPolicy, options metav1.UpdateOptions, timeout time.Duration) *apiv1.QoSPolicy { - ginkgo.GinkgoHelper() - - var updatedQoSPolicy *apiv1.QoSPolicy - err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - s, err := c.QoSPolicyInterface.Update(ctx, qosPolicy, options) - if err != nil { - return handleWaitingAPIError(err, false, "update qosPolicy %q", qosPolicy.Name) - } - updatedQoSPolicy = s - return true, nil - }) - if err == nil { - return updatedQoSPolicy.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to update qosPolicy %s", qosPolicy.Name) - } - Failf("error occurred while retrying to update qosPolicy %s: %v", qosPolicy.Name, err) - - return nil -} - -// UpdateSync updates the qosPolicy and waits for the qosPolicy to be ready for `timeout`. -// If the qosPolicy doesn't become ready before the timeout, it will fail the test. -func (c *QoSPolicyClient) UpdateSync(qosPolicy *apiv1.QoSPolicy, options metav1.UpdateOptions, timeout time.Duration) *apiv1.QoSPolicy { - ginkgo.GinkgoHelper() - - s := c.Update(qosPolicy, options, timeout) - ExpectTrue(c.WaitToBeUpdated(s, timeout)) - ExpectTrue(c.WaitToBeReady(s.Name, timeout)) - // Get the newest qosPolicy after it becomes ready - return c.Get(s.Name).DeepCopy() -} - -// Patch patches the qosPolicy -func (c *QoSPolicyClient) Patch(original, modified *apiv1.QoSPolicy) *apiv1.QoSPolicy { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedQoSPolicy *apiv1.QoSPolicy - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - s, err := c.QoSPolicyInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch qosPolicy %q", original.Name) - } - patchedQoSPolicy = s - return true, nil - }) - if err == nil { - return patchedQoSPolicy.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch qosPolicy %s", original.Name) - } - Failf("error occurred while retrying to patch qosPolicy %s: %v", original.Name, err) - - return nil -} - -// PatchSync patches the qosPolicy and waits for the qosPolicy to be ready for `timeout`. -// If the qosPolicy doesn't become ready before the timeout, it will fail the test. -func (c *QoSPolicyClient) PatchSync(original, modified *apiv1.QoSPolicy) *apiv1.QoSPolicy { - ginkgo.GinkgoHelper() - - s := c.Patch(original, modified) - ExpectTrue(c.WaitToBeUpdated(s, timeout)) - ExpectTrue(c.WaitToBeReady(s.Name, timeout)) - // Get the newest qosPolicy after it becomes ready - return c.Get(s.Name).DeepCopy() -} - -// Delete deletes a qosPolicy if the qosPolicy exists -func (c *QoSPolicyClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.QoSPolicyInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete qosPolicy %q: %v", name, err) - } -} - -// DeleteSync deletes the qosPolicy and waits for the qosPolicy to disappear for `timeout`. -// If the qosPolicy doesn't disappear before the timeout, it will fail the test. -func (c *QoSPolicyClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for qosPolicy %q to disappear", name) -} - -func isQoSPolicyConditionSetAsExpected(qosPolicy *apiv1.QoSPolicy, conditionType apiv1.ConditionType, wantTrue, silent bool) bool { - for _, cond := range qosPolicy.Status.Conditions { - if cond.Type == conditionType { - if (wantTrue && (cond.Status == corev1.ConditionTrue)) || (!wantTrue && (cond.Status != corev1.ConditionTrue)) { - return true - } - if !silent { - Logf("Condition %s of qosPolicy %s is %v instead of %t. Reason: %v, message: %v", - conditionType, qosPolicy.Name, cond.Status == corev1.ConditionTrue, wantTrue, cond.Reason, cond.Message) - } - return false - } - } - if !silent { - Logf("Couldn't find condition %v on qosPolicy %v", conditionType, qosPolicy.Name) - } - return false -} - -// IsQoSPolicyConditionSetAsExpected returns a wantTrue value if the qosPolicy has a match to the conditionType, -// otherwise returns an opposite value of the wantTrue with detailed logging. -func IsQoSPolicyConditionSetAsExpected(qosPolicy *apiv1.QoSPolicy, conditionType apiv1.ConditionType, wantTrue bool) bool { - return isQoSPolicyConditionSetAsExpected(qosPolicy, conditionType, wantTrue, false) -} - -// WaitConditionToBe returns whether qosPolicy "name's" condition state matches wantTrue -// within timeout. If wantTrue is true, it will ensure the qosPolicy condition status is -// ConditionTrue; if it's false, it ensures the qosPolicy condition is in any state other -// than ConditionTrue (e.g. not true or unknown). -func (c *QoSPolicyClient) WaitConditionToBe(name string, conditionType apiv1.ConditionType, wantTrue bool, timeout time.Duration) bool { - Logf("Waiting up to %v for qosPolicy %s condition %s to be %t", timeout, name, conditionType, wantTrue) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - qosPolicy := c.Get(name) - if IsQoSPolicyConditionSetAsExpected(qosPolicy, conditionType, wantTrue) { - Logf("QoSPolicy %s reach desired %t condition status", name, wantTrue) - return true - } - Logf("QoSPolicy %s still not reach desired %t condition status", name, wantTrue) - } - Logf("QoSPolicy %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout) - return false -} - -// WaitToBeReady returns whether the qosPolicy is ready within timeout. -func (c *QoSPolicyClient) WaitToBeReady(name string, timeout time.Duration) bool { - return c.WaitConditionToBe(name, apiv1.Ready, true, timeout) -} - -// WaitToBeUpdated returns whether the qosPolicy is updated within timeout. -func (c *QoSPolicyClient) WaitToBeUpdated(qosPolicy *apiv1.QoSPolicy, timeout time.Duration) bool { - Logf("Waiting up to %v for qosPolicy %s to be updated", timeout, qosPolicy.Name) - rv, _ := big.NewInt(0).SetString(qosPolicy.ResourceVersion, 10) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - s := c.Get(qosPolicy.Name) - if current, _ := big.NewInt(0).SetString(s.ResourceVersion, 10); current.Cmp(rv) > 0 { - Logf("QoSPolicy %s updated", qosPolicy.Name) - return true - } - Logf("QoSPolicy %s still not updated", qosPolicy.Name) - } - Logf("QoSPolicy %s was not updated within %v", qosPolicy.Name, timeout) - return false -} - -// WaitUntil waits the given timeout duration for the specified condition to be met. -func (c *QoSPolicyClient) WaitUntil(name string, cond func(s *apiv1.QoSPolicy) (bool, error), condDesc string, interval, timeout time.Duration) *apiv1.QoSPolicy { - var qosPolicy *apiv1.QoSPolicy - err := wait.PollUntilContextTimeout(context.Background(), interval, timeout, true, func(_ context.Context) (bool, error) { - Logf("Waiting for qosPolicy %s to meet condition %q", name, condDesc) - qosPolicy = c.Get(name).DeepCopy() - met, err := cond(qosPolicy) - if err != nil { - return false, fmt.Errorf("failed to check condition for qosPolicy %s: %w", name, err) - } - return met, nil - }) - if err == nil { - return qosPolicy - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while waiting for qosPolicy %s to meet condition %q", name, condDesc) - } - Failf("error occurred while waiting for qosPolicy %s to meet condition %q: %v", name, condDesc, err) - - return nil -} - -// WaitToDisappear waits the given timeout duration for the specified qosPolicy to disappear. -func (c *QoSPolicyClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*apiv1.QoSPolicy, error) { - qosPolicy, err := c.QoSPolicyInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return qosPolicy, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected qosPolicy %s to not be found: %w", name, err) - } - return nil -} - -// WaitToQoSReady returns whether the qos is ready within timeout. -func (c *QoSPolicyClient) WaitToQoSReady(name string) bool { - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - qos := c.Get(name) - if len(qos.Spec.BandwidthLimitRules) != len(qos.Status.BandwidthLimitRules) { - Logf("qos %s is not ready", name) - continue - } - sort.Slice(qos.Spec.BandwidthLimitRules, func(i, j int) bool { - return qos.Spec.BandwidthLimitRules[i].Name < qos.Spec.BandwidthLimitRules[j].Name - }) - sort.Slice(qos.Status.BandwidthLimitRules, func(i, j int) bool { - return qos.Status.BandwidthLimitRules[i].Name < qos.Status.BandwidthLimitRules[j].Name - }) - equalCount := 0 - for index, specRule := range qos.Spec.BandwidthLimitRules { - statusRule := qos.Status.BandwidthLimitRules[index] - if reflect.DeepEqual(specRule, statusRule) { - equalCount++ - } - } - - if equalCount == len(qos.Spec.BandwidthLimitRules) { - Logf("qos %s is ready", name) - return true - } - Logf("qos %s is not ready", name) - } - return false -} - -func MakeQoSPolicy(name string, shared bool, qosType apiv1.QoSPolicyBindingType, rules apiv1.QoSPolicyBandwidthLimitRules) *apiv1.QoSPolicy { - qosPolicy := &apiv1.QoSPolicy{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apiv1.QoSPolicySpec{ - BandwidthLimitRules: rules, - Shared: shared, - BindingType: qosType, - }, - } - return qosPolicy -} diff --git a/test/e2e/framework/security-group.go b/test/e2e/framework/security-group.go deleted file mode 100644 index 786492dcef4..00000000000 --- a/test/e2e/framework/security-group.go +++ /dev/null @@ -1,147 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "time" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// SecurityGroupClient is a struct for security-group client. -type SecurityGroupClient struct { - f *Framework - v1.SecurityGroupInterface -} - -func (f *Framework) SecurityGroupClient() *SecurityGroupClient { - return &SecurityGroupClient{ - f: f, - SecurityGroupInterface: f.KubeOVNClientSet.KubeovnV1().SecurityGroups(), - } -} - -func (c *SecurityGroupClient) Get(name string) *apiv1.SecurityGroup { - ginkgo.GinkgoHelper() - sg, err := c.SecurityGroupInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return sg.DeepCopy() -} - -// Create creates a new security group according to the framework specifications -func (c *SecurityGroupClient) Create(sg *apiv1.SecurityGroup) *apiv1.SecurityGroup { - ginkgo.GinkgoHelper() - sg, err := c.SecurityGroupInterface.Create(context.TODO(), sg, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating security group") - return sg.DeepCopy() -} - -// CreateSync creates a new security group according to the framework specifications, and waits for it to be ready. -func (c *SecurityGroupClient) CreateSync(sg *apiv1.SecurityGroup) *apiv1.SecurityGroup { - ginkgo.GinkgoHelper() - - sg = c.Create(sg) - ExpectTrue(c.WaitToBeReady(sg.Name, timeout)) - // Get the newest ovn security group after it becomes ready - return c.Get(sg.Name).DeepCopy() -} - -// WaitToBeReady returns whether the security group is ready within timeout. -func (c *SecurityGroupClient) WaitToBeReady(name string, timeout time.Duration) bool { - Logf("Waiting up to %v for security group %s to be ready", timeout, name) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - if c.Get(name).Status.PortGroup != "" { - Logf("security group %s is ready", name) - return true - } - Logf("security group %s is not ready", name) - } - Logf("security group %s was not ready within %v", name, timeout) - return false -} - -// Patch patches the security group -func (c *SecurityGroupClient) Patch(original, modified *apiv1.SecurityGroup, timeout time.Duration) *apiv1.SecurityGroup { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedSg *apiv1.SecurityGroup - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - p, err := c.SecurityGroupInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch security group %q", original.Name) - } - patchedSg = p - return true, nil - }) - if err == nil { - return patchedSg.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch security group %s", original.Name) - } - Failf("error occurred while retrying to patch security group %s: %v", original.Name, err) - - return nil -} - -// Delete deletes a security group if the security group exists -func (c *SecurityGroupClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.SecurityGroupInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete security group %q: %v", name, err) - } -} - -// DeleteSync deletes the security group and waits for the security group to disappear for `timeout`. -// If the security group doesn't disappear before the timeout, it will fail the test. -func (c *SecurityGroupClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for security group %q to disappear", name) -} - -// WaitToDisappear waits the given timeout duration for the specified Security Group to disappear. -func (c *SecurityGroupClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*apiv1.SecurityGroup, error) { - sg, err := c.SecurityGroupInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return sg, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected security group %s to not be found: %w", name, err) - } - return nil -} - -func MakeSecurityGroup(name string, allowSameGroupTraffic bool, ingressRules, egressRules []*apiv1.SgRule) *apiv1.SecurityGroup { - sg := &apiv1.SecurityGroup{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apiv1.SecurityGroupSpec{ - AllowSameGroupTraffic: allowSameGroupTraffic, - IngressRules: ingressRules, - EgressRules: egressRules, - }, - } - return sg -} diff --git a/test/e2e/framework/service.go b/test/e2e/framework/service.go deleted file mode 100644 index b0ade5ee136..00000000000 --- a/test/e2e/framework/service.go +++ /dev/null @@ -1,177 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "time" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/utils/ptr" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// ServiceClient is a struct for service client. -type ServiceClient struct { - f *Framework - v1core.ServiceInterface - namespace string -} - -func (f *Framework) ServiceClient() *ServiceClient { - return f.ServiceClientNS(f.Namespace.Name) -} - -func (f *Framework) ServiceClientNS(namespace string) *ServiceClient { - return &ServiceClient{ - f: f, - ServiceInterface: f.ClientSet.CoreV1().Services(namespace), - namespace: namespace, - } -} - -func (c *ServiceClient) Get(name string) *corev1.Service { - ginkgo.GinkgoHelper() - service, err := c.ServiceInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return service -} - -// Create creates a new service according to the framework specifications -func (c *ServiceClient) Create(service *corev1.Service) *corev1.Service { - ginkgo.GinkgoHelper() - s, err := c.ServiceInterface.Create(context.TODO(), service, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating service") - return s.DeepCopy() -} - -// CreateSync creates a new service according to the framework specifications, and waits for it to be updated. -func (c *ServiceClient) CreateSync(service *corev1.Service, cond func(s *corev1.Service) (bool, error), condDesc string) *corev1.Service { - ginkgo.GinkgoHelper() - _ = c.Create(service) - return c.WaitUntil(service.Name, cond, condDesc, 2*time.Second, timeout) -} - -// Patch patches the service -func (c *ServiceClient) Patch(original, modified *corev1.Service) *corev1.Service { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedService *corev1.Service - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - s, err := c.ServiceInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch service %q", original.Name) - } - patchedService = s - return true, nil - }) - if err == nil { - return patchedService.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch service %s", original.Name) - } - Failf("error occurred while retrying to patch service %s: %v", original.Name, err) - - return nil -} - -// PatchSync patches the service and waits the service to meet the condition -func (c *ServiceClient) PatchSync(original, modified *corev1.Service, cond func(s *corev1.Service) (bool, error), condDesc string) *corev1.Service { - ginkgo.GinkgoHelper() - _ = c.Patch(original, modified) - return c.WaitUntil(original.Name, cond, condDesc, 2*time.Second, timeout) -} - -// Delete deletes a service if the service exists -func (c *ServiceClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.ServiceInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete service %q: %v", name, err) - } -} - -// DeleteSync deletes the service and waits for the service to disappear for `timeout`. -// If the service doesn't disappear before the timeout, it will fail the test. -func (c *ServiceClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for service %q to disappear", name) -} - -// WaitUntil waits the given timeout duration for the specified condition to be met. -func (c *ServiceClient) WaitUntil(name string, cond func(s *corev1.Service) (bool, error), condDesc string, interval, timeout time.Duration) *corev1.Service { - var service *corev1.Service - err := wait.PollUntilContextTimeout(context.Background(), interval, timeout, false, func(_ context.Context) (bool, error) { - Logf("Waiting for service %s to meet condition %q", name, condDesc) - service = c.Get(name).DeepCopy() - met, err := cond(service) - if err != nil { - return false, fmt.Errorf("failed to check condition for service %s: %w", name, err) - } - if met { - Logf("service %s met condition %q", name, condDesc) - } else { - Logf("service %s not met condition %q", name, condDesc) - } - return met, nil - }) - if err == nil { - return service - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while waiting for service %s to meet condition %q", name, condDesc) - } - Failf("error occurred while waiting for service %s to meet condition %q: %v", name, condDesc, err) - - return nil -} - -// WaitToDisappear waits the given timeout duration for the specified service to disappear. -func (c *ServiceClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*corev1.Service, error) { - svc, err := c.ServiceInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return svc, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected service %s to not be found: %w", name, err) - } - return nil -} - -func MakeService(name string, svcType corev1.ServiceType, annotations, selector map[string]string, ports []corev1.ServicePort, affinity corev1.ServiceAffinity) *corev1.Service { - service := &corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Annotations: annotations, - }, - Spec: corev1.ServiceSpec{ - IPFamilyPolicy: ptr.To(corev1.IPFamilyPolicyPreferDualStack), - Ports: ports, - Selector: selector, - SessionAffinity: affinity, - Type: svcType, - }, - } - - return service -} diff --git a/test/e2e/framework/statefulset.go b/test/e2e/framework/statefulset.go deleted file mode 100644 index f63c11f584e..00000000000 --- a/test/e2e/framework/statefulset.go +++ /dev/null @@ -1,180 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "strings" - "time" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - v1apps "k8s.io/client-go/kubernetes/typed/apps/v1" - "k8s.io/kubectl/pkg/polymorphichelpers" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/statefulset" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - "github.com/kubeovn/kube-ovn/pkg/util" -) - -type StatefulSetClient struct { - f *Framework - v1apps.StatefulSetInterface - namespace string -} - -func (f *Framework) StatefulSetClient() *StatefulSetClient { - return f.StatefulSetClientNS(f.Namespace.Name) -} - -func (f *Framework) StatefulSetClientNS(namespace string) *StatefulSetClient { - return &StatefulSetClient{ - f: f, - StatefulSetInterface: f.ClientSet.AppsV1().StatefulSets(namespace), - namespace: namespace, - } -} - -func (c *StatefulSetClient) Get(name string) *appsv1.StatefulSet { - ginkgo.GinkgoHelper() - sts, err := c.StatefulSetInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return sts -} - -func (c *StatefulSetClient) GetPods(sts *appsv1.StatefulSet) *corev1.PodList { - ginkgo.GinkgoHelper() - pods := statefulset.GetPodList(context.Background(), c.f.ClientSet, sts) - statefulset.SortStatefulPods(pods) - return pods -} - -// Create creates a new statefulset according to the framework specifications -func (c *StatefulSetClient) Create(sts *appsv1.StatefulSet) *appsv1.StatefulSet { - ginkgo.GinkgoHelper() - s, err := c.StatefulSetInterface.Create(context.TODO(), sts, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating statefulset") - return s.DeepCopy() -} - -// CreateSync creates a new statefulset according to the framework specifications, and waits for it to complete. -func (c *StatefulSetClient) CreateSync(sts *appsv1.StatefulSet) *appsv1.StatefulSet { - ginkgo.GinkgoHelper() - - s := c.Create(sts) - c.WaitForRunningAndReady(s) - // Get the newest statefulset - return c.Get(s.Name).DeepCopy() -} - -// Delete deletes a statefulset if the statefulset exists -func (c *StatefulSetClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.StatefulSetInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete statefulset %q: %v", name, err) - } -} - -// DeleteSync deletes the statefulset and waits for the statefulset to disappear for `timeout`. -// If the statefulset doesn't disappear before the timeout, it will fail the test. -func (c *StatefulSetClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for statefulset %q to disappear", name) -} - -func (c *StatefulSetClient) WaitForRunningAndReady(sts *appsv1.StatefulSet) { - ginkgo.GinkgoHelper() - Logf("Waiting up to %v for statefulset %s to be running and ready", timeout, sts.Name) - statefulset.WaitForRunningAndReady(context.Background(), c.f.ClientSet, *sts.Spec.Replicas, sts) -} - -// WaitToDisappear waits the given timeout duration for the specified statefulset to disappear. -func (c *StatefulSetClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*appsv1.StatefulSet, error) { - sts, err := c.StatefulSetInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return sts, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected statefulset %s to not be found: %w", name, err) - } - return nil -} - -func (c *StatefulSetClient) PatchSync(original, modified *appsv1.StatefulSet) *appsv1.StatefulSet { - ginkgo.GinkgoHelper() - sts := c.Patch(original, modified) - return c.RolloutStatus(sts.Name) -} - -func (c *StatefulSetClient) Patch(original, modified *appsv1.StatefulSet) *appsv1.StatefulSet { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedSts *appsv1.StatefulSet - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - sts, err := c.StatefulSetInterface.Patch(ctx, original.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch StatefulSet %s/%s", original.Namespace, original.Name) - } - patchedSts = sts - return true, nil - }) - if err == nil { - return patchedSts.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch StatefulSet %s/%s", original.Namespace, original.Name) - } - Failf("error occurred while retrying to patch StatefulSet %s/%s: %v", original.Namespace, original.Name, err) - - return nil -} - -func (c *StatefulSetClient) RolloutStatus(name string) *appsv1.StatefulSet { - var sts *appsv1.StatefulSet - WaitUntil(2*time.Second, timeout, func(_ context.Context) (bool, error) { - var err error - sts = c.Get(name) - unstructured := &unstructured.Unstructured{} - if unstructured.Object, err = runtime.DefaultUnstructuredConverter.ToUnstructured(sts); err != nil { - return false, err - } - - viewer := &polymorphichelpers.StatefulSetStatusViewer{} - msg, done, err := viewer.Status(unstructured, 0) - if err != nil { - return false, err - } - if done { - return true, nil - } - - Logf(strings.TrimSpace(msg)) - return false, nil - }, "") - - return sts -} - -func MakeStatefulSet(name, svcName string, replicas int32, labels map[string]string, image string) *appsv1.StatefulSet { - sts := statefulset.NewStatefulSet(name, "", svcName, replicas, nil, nil, labels) - sts.Spec.Template.Spec.Containers[0].Image = image - return sts -} diff --git a/test/e2e/framework/subnet.go b/test/e2e/framework/subnet.go deleted file mode 100644 index 8c52202f944..00000000000 --- a/test/e2e/framework/subnet.go +++ /dev/null @@ -1,288 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "math/big" - "strings" - "time" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// SubnetClient is a struct for subnet client. -type SubnetClient struct { - f *Framework - v1.SubnetInterface -} - -func (f *Framework) SubnetClient() *SubnetClient { - return &SubnetClient{ - f: f, - SubnetInterface: f.KubeOVNClientSet.KubeovnV1().Subnets(), - } -} - -func (c *SubnetClient) Get(name string) *apiv1.Subnet { - ginkgo.GinkgoHelper() - subnet, err := c.SubnetInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return subnet -} - -// Create creates a new subnet according to the framework specifications -func (c *SubnetClient) Create(subnet *apiv1.Subnet) *apiv1.Subnet { - ginkgo.GinkgoHelper() - s, err := c.SubnetInterface.Create(context.TODO(), subnet, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating subnet") - return s.DeepCopy() -} - -// CreateSync creates a new subnet according to the framework specifications, and waits for it to be ready. -func (c *SubnetClient) CreateSync(subnet *apiv1.Subnet) *apiv1.Subnet { - ginkgo.GinkgoHelper() - - s := c.Create(subnet) - ExpectTrue(c.WaitToBeReady(s.Name, timeout)) - // Get the newest subnet after it becomes ready - return c.Get(s.Name).DeepCopy() -} - -// Update updates the subnet -func (c *SubnetClient) Update(subnet *apiv1.Subnet, options metav1.UpdateOptions, timeout time.Duration) *apiv1.Subnet { - ginkgo.GinkgoHelper() - - var updatedSubnet *apiv1.Subnet - err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - s, err := c.SubnetInterface.Update(ctx, subnet, options) - if err != nil { - return handleWaitingAPIError(err, false, "update subnet %q", subnet.Name) - } - updatedSubnet = s - return true, nil - }) - if err == nil { - return updatedSubnet.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to update subnet %s", subnet.Name) - } - Failf("error occurred while retrying to update subnet %s: %v", subnet.Name, err) - - return nil -} - -// UpdateSync updates the subnet and waits for the subnet to be ready for `timeout`. -// If the subnet doesn't become ready before the timeout, it will fail the test. -func (c *SubnetClient) UpdateSync(subnet *apiv1.Subnet, options metav1.UpdateOptions, timeout time.Duration) *apiv1.Subnet { - ginkgo.GinkgoHelper() - - s := c.Update(subnet, options, timeout) - ExpectTrue(c.WaitToBeUpdated(s, timeout)) - ExpectTrue(c.WaitToBeReady(s.Name, timeout)) - // Get the newest subnet after it becomes ready - return c.Get(s.Name).DeepCopy() -} - -// Patch patches the subnet -func (c *SubnetClient) Patch(original, modified *apiv1.Subnet, timeout time.Duration) *apiv1.Subnet { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedSubnet *apiv1.Subnet - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - s, err := c.SubnetInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch subnet %q", original.Name) - } - patchedSubnet = s - return true, nil - }) - if err == nil { - return patchedSubnet.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch subnet %s", original.Name) - } - Failf("error occurred while retrying to patch subnet %s: %v", original.Name, err) - - return nil -} - -// PatchSync patches the subnet and waits for the subnet to be ready for `timeout`. -// If the subnet doesn't become ready before the timeout, it will fail the test. -func (c *SubnetClient) PatchSync(original, modified *apiv1.Subnet) *apiv1.Subnet { - ginkgo.GinkgoHelper() - - s := c.Patch(original, modified, timeout) - ExpectTrue(c.WaitToBeUpdated(s, timeout)) - ExpectTrue(c.WaitToBeReady(s.Name, timeout)) - // Get the newest subnet after it becomes ready - return c.Get(s.Name).DeepCopy() -} - -// Delete deletes a subnet if the subnet exists -func (c *SubnetClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.SubnetInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete subnet %q: %v", name, err) - } -} - -// DeleteSync deletes the subnet and waits for the subnet to disappear for `timeout`. -// If the subnet doesn't disappear before the timeout, it will fail the test. -func (c *SubnetClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for subnet %q to disappear", name) -} - -func isSubnetConditionSetAsExpected(subnet *apiv1.Subnet, conditionType apiv1.ConditionType, wantTrue, silent bool) bool { - for _, cond := range subnet.Status.Conditions { - if cond.Type == conditionType { - if (wantTrue && (cond.Status == corev1.ConditionTrue)) || (!wantTrue && (cond.Status != corev1.ConditionTrue)) { - return true - } - if !silent { - Logf("Condition %s of subnet %s is %v instead of %t. Reason: %v, message: %v", - conditionType, subnet.Name, cond.Status == corev1.ConditionTrue, wantTrue, cond.Reason, cond.Message) - } - return false - } - } - if !silent { - Logf("Couldn't find condition %v on subnet %v", conditionType, subnet.Name) - } - return false -} - -// IsSubnetConditionSetAsExpected returns a wantTrue value if the subnet has a match to the conditionType, -// otherwise returns an opposite value of the wantTrue with detailed logging. -func IsSubnetConditionSetAsExpected(subnet *apiv1.Subnet, conditionType apiv1.ConditionType, wantTrue bool) bool { - return isSubnetConditionSetAsExpected(subnet, conditionType, wantTrue, false) -} - -// WaitConditionToBe returns whether subnet "name's" condition state matches wantTrue -// within timeout. If wantTrue is true, it will ensure the subnet condition status is -// ConditionTrue; if it's false, it ensures the subnet condition is in any state other -// than ConditionTrue (e.g. not true or unknown). -func (c *SubnetClient) WaitConditionToBe(name string, conditionType apiv1.ConditionType, wantTrue bool, timeout time.Duration) bool { - Logf("Waiting up to %v for subnet %s condition %s to be %t", timeout, name, conditionType, wantTrue) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - subnet := c.Get(name) - if IsSubnetConditionSetAsExpected(subnet, conditionType, wantTrue) { - Logf("Subnet %s reach desired %t condition status", name, wantTrue) - return true - } - Logf("Subnet %s still not reach desired %t condition status", name, wantTrue) - } - Logf("Subnet %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout) - return false -} - -// WaitToBeReady returns whether the subnet is ready within timeout. -func (c *SubnetClient) WaitToBeReady(name string, timeout time.Duration) bool { - return c.WaitConditionToBe(name, apiv1.Ready, true, timeout) -} - -// WaitToBeUpdated returns whether the subnet is updated within timeout. -func (c *SubnetClient) WaitToBeUpdated(subnet *apiv1.Subnet, timeout time.Duration) bool { - Logf("Waiting up to %v for subnet %s to be updated", timeout, subnet.Name) - rv, _ := big.NewInt(0).SetString(subnet.ResourceVersion, 10) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - s := c.Get(subnet.Name) - if current, _ := big.NewInt(0).SetString(s.ResourceVersion, 10); current.Cmp(rv) > 0 { - Logf("Subnet %s updated", subnet.Name) - return true - } - Logf("Subnet %s still not updated", subnet.Name) - } - Logf("Subnet %s was not updated within %v", subnet.Name, timeout) - return false -} - -// WaitUntil waits the given timeout duration for the specified condition to be met. -func (c *SubnetClient) WaitUntil(name string, cond func(s *apiv1.Subnet) (bool, error), condDesc string, interval, timeout time.Duration) *apiv1.Subnet { - ginkgo.GinkgoHelper() - - var subnet *apiv1.Subnet - err := wait.PollUntilContextTimeout(context.Background(), interval, timeout, true, func(_ context.Context) (bool, error) { - Logf("Waiting for subnet %s to meet condition %q", name, condDesc) - subnet = c.Get(name).DeepCopy() - met, err := cond(subnet) - if err != nil { - return false, fmt.Errorf("failed to check condition for subnet %s: %w", name, err) - } - return met, nil - }) - if err == nil { - return subnet - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while waiting for subnet %s to meet condition %q", name, condDesc) - } - Failf("error occurred while waiting for subnet %s to meet condition %q: %v", name, condDesc, err) - - return nil -} - -// WaitToDisappear waits the given timeout duration for the specified subnet to disappear. -func (c *SubnetClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*apiv1.Subnet, error) { - subnet, err := c.SubnetInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return subnet, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected subnet %s to not be found: %w", name, err) - } - return nil -} - -func MakeSubnet(name, vlan, cidr, gateway, vpc, provider string, excludeIPs, gatewayNodes, namespaces []string) *apiv1.Subnet { - subnet := &apiv1.Subnet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apiv1.SubnetSpec{ - Vpc: vpc, - Vlan: vlan, - CIDRBlock: cidr, - Gateway: gateway, - Protocol: util.CheckProtocol(cidr), - Provider: provider, - ExcludeIps: excludeIPs, - GatewayNode: strings.Join(gatewayNodes, ","), - Namespaces: namespaces, - }, - } - if util.IsOvnProvider(provider) { - if len(gatewayNodes) != 0 { - subnet.Spec.GatewayType = apiv1.GWCentralizedType - } else { - subnet.Spec.GatewayType = apiv1.GWDistributedType - } - } - return subnet -} diff --git a/test/e2e/framework/switch-lb-rule.go b/test/e2e/framework/switch-lb-rule.go deleted file mode 100644 index 699172a7356..00000000000 --- a/test/e2e/framework/switch-lb-rule.go +++ /dev/null @@ -1,179 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "time" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// SwitchLBRuleClient is a struct for switch-lb-rule client. -type SwitchLBRuleClient struct { - f *Framework - v1.SwitchLBRuleInterface - namespace string -} - -func (f *Framework) SwitchLBRuleClient() *SwitchLBRuleClient { - return f.SwitchLBRuleClientNS(f.Namespace.Name) -} - -func (f *Framework) SwitchLBRuleClientNS(namespace string) *SwitchLBRuleClient { - return &SwitchLBRuleClient{ - f: f, - SwitchLBRuleInterface: f.KubeOVNClientSet.KubeovnV1().SwitchLBRules(), - namespace: namespace, - } -} - -func (c *SwitchLBRuleClient) Get(name string) *apiv1.SwitchLBRule { - ginkgo.GinkgoHelper() - rules, err := c.SwitchLBRuleInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return rules -} - -// Create creates a new switch-lb-rule according to the framework specifications -func (c *SwitchLBRuleClient) Create(rule *apiv1.SwitchLBRule) *apiv1.SwitchLBRule { - ginkgo.GinkgoHelper() - e, err := c.SwitchLBRuleInterface.Create(context.TODO(), rule, metav1.CreateOptions{}) - ExpectNoError(err, "error creating switch-lb-rule") - return e.DeepCopy() -} - -// CreateSync creates a new switch-lb-rule according to the framework specifications, and waits for it to be updated. -func (c *SwitchLBRuleClient) CreateSync(rule *apiv1.SwitchLBRule, cond func(s *apiv1.SwitchLBRule) (bool, error), condDesc string) *apiv1.SwitchLBRule { - ginkgo.GinkgoHelper() - _ = c.Create(rule) - return c.WaitUntil(rule.Name, cond, condDesc, 2*time.Second, timeout) -} - -// Patch patches the switch-lb-rule -func (c *SwitchLBRuleClient) Patch(original, modified *apiv1.SwitchLBRule) *apiv1.SwitchLBRule { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedService *apiv1.SwitchLBRule - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(_ context.Context) (bool, error) { - s, err := c.SwitchLBRuleInterface.Patch(context.TODO(), original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch switch-lb-rule %q", original.Name) - } - patchedService = s - return true, nil - }) - if err == nil { - return patchedService.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch switch-lb-rule %s", original.Name) - } - Failf("error occurred while retrying to patch switch-lb-rule %s: %v", original.Name, err) - - return nil -} - -// PatchSync patches the switch-lb-rule and waits the switch-lb-rule to meet the condition -func (c *SwitchLBRuleClient) PatchSync(original, modified *apiv1.SwitchLBRule, cond func(s *apiv1.SwitchLBRule) (bool, error), condDesc string) *apiv1.SwitchLBRule { - ginkgo.GinkgoHelper() - _ = c.Patch(original, modified) - return c.WaitUntil(original.Name, cond, condDesc, 2*time.Second, timeout) -} - -// Delete deletes a switch-lb-rule if the switch-lb-rule exists -func (c *SwitchLBRuleClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.SwitchLBRuleInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete switch-lb-rule %q: %v", name, err) - } -} - -// DeleteSync deletes the switch-lb-rule and waits for the switch-lb-rule to disappear for `timeout`. -// If the switch-lb-rule doesn't disappear before the timeout, it will fail the test. -func (c *SwitchLBRuleClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for switch-lb-rule %q to disappear", name) -} - -// WaitUntil waits the given timeout duration for the specified condition to be met. -func (c *SwitchLBRuleClient) WaitUntil(name string, cond func(s *apiv1.SwitchLBRule) (bool, error), condDesc string, _, timeout time.Duration) *apiv1.SwitchLBRule { - ginkgo.GinkgoHelper() - - var rules *apiv1.SwitchLBRule - err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(_ context.Context) (bool, error) { - Logf("Waiting for switch-lb-rule %s to meet condition %q", name, condDesc) - rules = c.Get(name).DeepCopy() - met, err := cond(rules) - if err != nil { - return false, fmt.Errorf("failed to check condition for switch-lb-rule %s: %w", name, err) - } - if met { - Logf("switch-lb-rule %s met condition %q", name, condDesc) - } else { - Logf("switch-lb-rule %s not met condition %q", name, condDesc) - } - return met, nil - }) - if err == nil { - return rules - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch switch-lb-rule %s", name) - } - Failf("error occurred while retrying to patch switch-lb-rule %s: %v", name, err) - - return nil -} - -// WaitToDisappear waits the given timeout duration for the specified switch-lb-rule to disappear. -func (c *SwitchLBRuleClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*apiv1.SwitchLBRule, error) { - svc, err := c.SwitchLBRuleInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return svc, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected endpoints %s to not be found: %w", name, err) - } - return nil -} - -func MakeSwitchLBRule(name, namespace, vip string, sessionAffinity corev1.ServiceAffinity, annotations map[string]string, selector, endpoints []string, ports []apiv1.SlrPort) *apiv1.SwitchLBRule { - return &apiv1.SwitchLBRule{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: annotations, - }, - Spec: apiv1.SwitchLBRuleSpec{ - Vip: vip, - Namespace: namespace, - Selector: selector, - Endpoints: endpoints, - SessionAffinity: string(sessionAffinity), - Ports: ports, - }, - } -} diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go deleted file mode 100644 index 27c13656417..00000000000 --- a/test/e2e/framework/util.go +++ /dev/null @@ -1,238 +0,0 @@ -package framework - -import ( - "fmt" - "math/rand/v2" - "net" - "sort" - "strings" - - "github.com/scylladb/go-set/strset" - - "github.com/onsi/ginkgo/v2" - - "github.com/kubeovn/kube-ovn/pkg/ipam" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -const ( - KubeOvnNamespace = "kube-system" - DaemonSetOvsOvn = "ovs-ovn" -) - -// RandomSuffix provides a random sequence to append to resources. -func RandomSuffix() string { - return fmt.Sprintf("%d%04d%04d", ginkgo.GinkgoParallelProcess(), rand.IntN(10000), rand.IntN(10000)) -} - -func RandomCIDR(family string) string { - fnIPv4 := func() string { - cidr := net.IPNet{ - IP: net.ParseIP("10.0.0.0").To4(), - Mask: net.CIDRMask(24, 32), - } - cidr.IP[1] = 0xf0 | byte(ginkgo.GinkgoParallelProcess()) - cidr.IP[2] = byte(rand.IntN(0xff + 1)) - return cidr.String() - } - - fnIPv6 := func() string { - cidr := net.IPNet{ - IP: net.ParseIP("fc00:10:ff::").To16(), - Mask: net.CIDRMask(96, 128), - } - cidr.IP[9] = byte(ginkgo.GinkgoParallelProcess()) - cidr.IP[10] = byte(rand.IntN(0xff + 1)) - cidr.IP[11] = byte(rand.IntN(0xff + 1)) - return cidr.String() - } - - switch family { - case IPv4: - return fnIPv4() - case IPv6: - return fnIPv6() - case Dual: - return fnIPv4() + "," + fnIPv6() - default: - Failf("invalid ip family: %q", family) - return "" - } -} - -func sortIPs(ips []string) { - sort.Slice(ips, func(i, j int) bool { - x, err := ipam.NewIP(ips[i]) - ExpectNoError(err) - y, err := ipam.NewIP(ips[j]) - ExpectNoError(err) - return x.LessThan(y) - }) -} - -// ipv4/ipv6 only -func RandomExcludeIPs(cidr string, count int) []string { - if cidr == "" || count == 0 { - return nil - } - - ExpectNotContainSubstring(cidr, ",") - ExpectNotContainSubstring(cidr, ";") - - rangeCount := rand.IntN(count + 1) - ips := randomSortedIPs(cidr, rangeCount*2+count-rangeCount, true) - - var idx int - rangeLeft := rangeCount - ret := make([]string, 0, count) - for i := 0; i < count; i++ { - if rangeLeft != 0 && rand.IntN(count-i) < rangeLeft { - ret = append(ret, fmt.Sprintf("%s..%s", ips[idx], ips[idx+1])) - idx++ - rangeLeft-- - } else { - ret = append(ret, ips[idx]) - } - idx++ - } - - return ret -} - -// ipv4/ipv6 only -func randomSortedIPs(cidr string, count int, sort bool) []string { - if cidr == "" { - return nil - } - - _, ipNet, err := net.ParseCIDR(cidr) - ExpectNoError(err) - - set := strset.NewWithSize(count) - r := ipam.NewIPRangeFromCIDR(*ipNet) - r = ipam.NewIPRange(r.Start().Add(2), r.End().Sub(1)) - for set.Size() != count { - set.Add(r.Random().String()) - } - - ips := set.List() - if sort { - sortIPs(ips) - } - - return ips -} - -func RandomIPs(cidr, sep string, count int) string { - cidrV4, cidrV6 := util.SplitStringIP(cidr) - ipsV4 := randomSortedIPs(cidrV4, count, false) - ipsV6 := randomSortedIPs(cidrV6, count, false) - - dual := make([]string, 0, count) - for i := 0; i < count; i++ { - ips := make([]string, 0, 2) - if i < len(ipsV4) { - ips = append(ips, ipsV4[i]) - } - if i < len(ipsV6) { - ips = append(ips, ipsV6[i]) - } - dual = append(dual, strings.Join(ips, ",")) - } - - return strings.Join(dual, sep) -} - -// ipv4/ipv6 only -func randomPool(cidr string, count int) []string { - if cidr == "" || count == 0 { - return nil - } - - _, ipNet, err := net.ParseCIDR(cidr) - ExpectNoError(err) - - r := ipam.NewIPRangeFromCIDR(*ipNet) - r = ipam.NewIPRange(r.Start().Add(2), r.End().Sub(1)) - - ones, bits := ipNet.Mask.Size() - rl := ipam.NewEmptyIPRangeList() - set := strset.NewWithSize(count) - for set.Size() != count/4 { - prefix := (ones+bits)/2 + rand.IntN((bits-ones)/2+1) - _, ipNet, err = net.ParseCIDR(fmt.Sprintf("%s/%d", r.Random(), prefix)) - ExpectNoError(err) - - v := ipam.NewIPRangeFromCIDR(*ipNet) - start, end := v.Start(), v.End() - if !r.Contains(start) || !r.Contains(end) || rl.Contains(start) || rl.Contains(end) { - continue - } - - rl = rl.MergeRange(ipam.NewIPRange(start, end)) - set.Add(ipNet.String()) - } - - count -= set.Size() - m := count / 3 // .. - n := count - m // - ips := make([]ipam.IP, 0, m*2+n) - ipSet := strset.NewWithSize(cap(ips)) - for len(ips) != cap(ips) { - ip := r.Random() - if rl.Contains(ip) { - continue - } - - s := ip.String() - if ipSet.Has(s) { - continue - } - ips = append(ips, ip) - ipSet.Add(s) - } - sort.Slice(ips, func(i, j int) bool { return ips[i].LessThan(ips[j]) }) - - var i, j int - k := rand.IntN(len(ips)) - for i != m || j != n { - if i != m { - x, y := k%len(ips), (k+1)%len(ips) - n1, _ := rl.Find(ips[x]) - n2, _ := rl.Find(ips[y]) - if n1 == n2 && ips[x].LessThan(ips[y]) { - set.Add(fmt.Sprintf("%s..%s", ips[x].String(), ips[y].String())) - i, k = i+1, k+2 - continue - } - } - - if j != n { - set.Add(ips[k%len(ips)].String()) - j, k = j+1, k+1 - } - } - - return set.List() -} - -func RandomIPPool(cidr string, count int) []string { - cidrV4, cidrV6 := util.SplitStringIP(cidr) - ipsV4, ipsV6 := randomPool(cidrV4, count), randomPool(cidrV6, count) - set := strset.NewWithSize(len(cidrV4) + len(cidrV6)) - set.Add(ipsV4...) - set.Add(ipsV6...) - return set.List() -} - -func PrevIP(ip string) string { - v, err := ipam.NewIP(ip) - ExpectNoError(err) - return v.Sub(1).String() -} - -func NextIP(ip string) string { - v, err := ipam.NewIP(ip) - ExpectNoError(err) - return v.Add(1).String() -} diff --git a/test/e2e/framework/vip.go b/test/e2e/framework/vip.go deleted file mode 100644 index 3ebc017dd9f..00000000000 --- a/test/e2e/framework/vip.go +++ /dev/null @@ -1,149 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "time" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// VipClient is a struct for vip client. -type VipClient struct { - f *Framework - v1.VipInterface -} - -func (f *Framework) VipClient() *VipClient { - return &VipClient{ - f: f, - VipInterface: f.KubeOVNClientSet.KubeovnV1().Vips(), - } -} - -func (c *VipClient) Get(name string) *apiv1.Vip { - ginkgo.GinkgoHelper() - vip, err := c.VipInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return vip.DeepCopy() -} - -// Create creates a new vip according to the framework specifications -func (c *VipClient) Create(vip *apiv1.Vip) *apiv1.Vip { - ginkgo.GinkgoHelper() - vip, err := c.VipInterface.Create(context.TODO(), vip, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating vip") - return vip.DeepCopy() -} - -// CreateSync creates a new ovn vip according to the framework specifications, and waits for it to be ready. -func (c *VipClient) CreateSync(vip *apiv1.Vip) *apiv1.Vip { - ginkgo.GinkgoHelper() - - vip = c.Create(vip) - ExpectTrue(c.WaitToBeReady(vip.Name, timeout)) - // Get the newest ovn vip after it becomes ready - return c.Get(vip.Name).DeepCopy() -} - -// WaitToBeReady returns whether the ovn vip is ready within timeout. -func (c *VipClient) WaitToBeReady(name string, timeout time.Duration) bool { - Logf("Waiting up to %v for ovn vip %s to be ready", timeout, name) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - if c.Get(name).Status.Ready { - Logf("ovn vip %s is ready", name) - return true - } - Logf("ovn vip %s is not ready", name) - } - Logf("ovn vip %s was not ready within %v", name, timeout) - return false -} - -// Patch patches the vip -func (c *VipClient) Patch(original, modified *apiv1.Vip, timeout time.Duration) *apiv1.Vip { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedVip *apiv1.Vip - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - p, err := c.VipInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch vip %q", original.Name) - } - patchedVip = p - return true, nil - }) - if err == nil { - return patchedVip.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch vip %s", original.Name) - } - Failf("error occurred while retrying to patch vip %s: %v", original.Name, err) - - return nil -} - -// Delete deletes a vip if the vip exists -func (c *VipClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.VipInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete vip %q: %v", name, err) - } -} - -// DeleteSync deletes the ovn vip and waits for the ovn vip to disappear for `timeout`. -// If the ovn vip doesn't disappear before the timeout, it will fail the test. -func (c *VipClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for ovn vip %q to disappear", name) -} - -// WaitToDisappear waits the given timeout duration for the specified OVN VIP to disappear. -func (c *VipClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*apiv1.Vip, error) { - vip, err := c.VipInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return vip, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected vip %s to not be found: %w", name, err) - } - return nil -} - -func MakeVip(namespaceName, name, subnet, v4ip, v6ip, vipType string) *apiv1.Vip { - vip := &apiv1.Vip{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apiv1.VipSpec{ - Namespace: namespaceName, - Subnet: subnet, - V4ip: v4ip, - V6ip: v6ip, - Type: vipType, - }, - } - return vip -} diff --git a/test/e2e/framework/virtual-machine.go b/test/e2e/framework/virtual-machine.go deleted file mode 100644 index fa302f3b199..00000000000 --- a/test/e2e/framework/virtual-machine.go +++ /dev/null @@ -1,259 +0,0 @@ -package framework - -import ( - "context" - "fmt" - "time" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - k8sframework "k8s.io/kubernetes/test/e2e/framework" - v1 "kubevirt.io/api/core/v1" - "kubevirt.io/client-go/kubecli" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - "github.com/onsi/gomega/format" -) - -// VMClient represents a KubeVirt VM client -type VMClient struct { - f *Framework - kubecli.VirtualMachineInterface -} - -func (f *Framework) VMClient() *VMClient { - return f.VMClientNS(f.Namespace.Name) -} - -func (f *Framework) VMClientNS(namespace string) *VMClient { - return &VMClient{ - f: f, - VirtualMachineInterface: f.KubeVirtClientSet.VirtualMachine(namespace), - } -} - -func (c *VMClient) Get(name string) *v1.VirtualMachine { - ginkgo.GinkgoHelper() - vm, err := c.VirtualMachineInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return vm -} - -// Create creates a new vm according to the framework specifications -func (c *VMClient) Create(vm *v1.VirtualMachine) *v1.VirtualMachine { - ginkgo.GinkgoHelper() - v, err := c.VirtualMachineInterface.Create(context.TODO(), vm, metav1.CreateOptions{}) - ExpectNoError(err, "failed to create vm %s", v.Name) - return c.Get(v.Name) -} - -// CreateSync creates a new vm according to the framework specifications, and waits for it to be ready. -func (c *VMClient) CreateSync(vm *v1.VirtualMachine) *v1.VirtualMachine { - ginkgo.GinkgoHelper() - - v := c.Create(vm) - ExpectNoError(c.WaitToBeReady(v.Name, timeout)) - // Get the newest vm after it becomes ready - return c.Get(v.Name).DeepCopy() -} - -// Start starts the vm. -func (c *VMClient) Start(name string) *v1.VirtualMachine { - ginkgo.GinkgoHelper() - - vm := c.Get(name) - if vm.Spec.Running != nil && *vm.Spec.Running { - Logf("vm %s has already been started", name) - return vm - } - - running := true - vm.Spec.Running = &running - _, err := c.VirtualMachineInterface.Update(context.TODO(), vm, metav1.UpdateOptions{}) - ExpectNoError(err, "failed to update vm %s", name) - return c.Get(name) -} - -// StartSync stops the vm and waits for it to be ready. -func (c *VMClient) StartSync(name string) *v1.VirtualMachine { - ginkgo.GinkgoHelper() - _ = c.Start(name) - ExpectNoError(c.WaitToBeReady(name, 2*time.Minute)) - return c.Get(name) -} - -// Stop stops the vm. -func (c *VMClient) Stop(name string) *v1.VirtualMachine { - ginkgo.GinkgoHelper() - - vm := c.Get(name) - if vm.Spec.Running != nil && !*vm.Spec.Running { - Logf("vm %s has already been stopped", name) - return vm - } - - running := false - vm.Spec.Running = &running - _, err := c.VirtualMachineInterface.Update(context.TODO(), vm, metav1.UpdateOptions{}) - ExpectNoError(err, "failed to update vm %s", name) - return c.Get(name) -} - -// StopSync stops the vm and waits for it to be stopped. -func (c *VMClient) StopSync(name string) *v1.VirtualMachine { - ginkgo.GinkgoHelper() - _ = c.Stop(name) - ExpectNoError(c.WaitToBeStopped(name, 2*time.Minute)) - return c.Get(name) -} - -// Delete deletes a vm if the vm exists -func (c *VMClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.VirtualMachineInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - ExpectNoError(err, "failed to delete vm %s", name) -} - -// DeleteSync deletes the vm and waits for the vm to disappear for `timeout`. -// If the vm doesn't disappear before the timeout, it will fail the test. -func (c *VMClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for vm %q to disappear", name) -} - -// WaitToDisappear waits the given timeout duration for the specified vm to be ready. -func (c *VMClient) WaitToBeReady(name string, timeout time.Duration) error { - err := k8sframework.Gomega().Eventually(context.TODO(), k8sframework.RetryNotFound(func(ctx context.Context) (*v1.VirtualMachine, error) { - return c.VirtualMachineInterface.Get(ctx, name, metav1.GetOptions{}) - })).WithTimeout(timeout).Should( - k8sframework.MakeMatcher(func(vm *v1.VirtualMachine) (func() string, error) { - if vm.Status.Ready { - return nil, nil - } - return func() string { - return fmt.Sprintf("expected vm status to be ready, got status instead:\n%s", format.Object(vm.Status, 1)) - }, nil - })) - if err != nil { - return fmt.Errorf("expected vm %s to be ready: %w", name, err) - } - return nil -} - -// WaitToDisappear waits the given timeout duration for the specified vm to be stopped. -func (c *VMClient) WaitToBeStopped(name string, timeout time.Duration) error { - err := k8sframework.Gomega().Eventually(context.TODO(), k8sframework.RetryNotFound(func(ctx context.Context) (*v1.VirtualMachine, error) { - return c.VirtualMachineInterface.Get(ctx, name, metav1.GetOptions{}) - })).WithTimeout(timeout).Should( - k8sframework.MakeMatcher(func(vm *v1.VirtualMachine) (func() string, error) { - if !vm.Status.Created { - return nil, nil - } - return func() string { - return fmt.Sprintf("expected vm status to be stopped, got status instead:\n%s", format.Object(vm.Status, 1)) - }, nil - })) - if err != nil { - return fmt.Errorf("expected vm %s to be stopped: %w", name, err) - } - return nil -} - -// WaitToDisappear waits the given timeout duration for the specified vm to disappear. -func (c *VMClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := k8sframework.Gomega().Eventually(context.Background(), k8sframework.HandleRetry(func(ctx context.Context) (*v1.VirtualMachine, error) { - vm, err := c.VirtualMachineInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return vm, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected vm %s to not be found: %w", name, err) - } - return nil -} - -func MakeVM(name, image, size string, running bool) *v1.VirtualMachine { - vm := &v1.VirtualMachine{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: v1.VirtualMachineSpec{ - Running: &running, - Template: &v1.VirtualMachineInstanceTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "kubevirt.io/size": size, - "kubevirt.io/domain": name, - }, - }, - Spec: v1.VirtualMachineInstanceSpec{ - Domain: v1.DomainSpec{ - Devices: v1.Devices{ - Disks: []v1.Disk{ - { - Name: "containerdisk", - DiskDevice: v1.DiskDevice{ - Disk: &v1.DiskTarget{ - Bus: v1.DiskBusVirtio, - }, - }, - }, - { - Name: "cloudinitdisk", - DiskDevice: v1.DiskDevice{ - Disk: &v1.DiskTarget{ - Bus: v1.DiskBusVirtio, - }, - }, - }, - }, - Interfaces: []v1.Interface{ - { - Name: "default", - InterfaceBindingMethod: v1.DefaultMasqueradeNetworkInterface().InterfaceBindingMethod, - }, - }, - }, - Resources: v1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceMemory: resource.MustParse("64M"), - }, - }, - }, - Networks: []v1.Network{ - { - Name: "default", - NetworkSource: v1.DefaultPodNetwork().NetworkSource, - }, - }, - Volumes: []v1.Volume{ - { - Name: "containerdisk", - VolumeSource: v1.VolumeSource{ - ContainerDisk: &v1.ContainerDiskSource{ - Image: image, - ImagePullPolicy: corev1.PullIfNotPresent, - }, - }, - }, - { - Name: "cloudinitdisk", - VolumeSource: v1.VolumeSource{ - CloudInitNoCloud: &v1.CloudInitNoCloudSource{ - UserDataBase64: "SGkuXG4=", - }, - }, - }, - }, - }, - }, - }, - } - return vm -} diff --git a/test/e2e/framework/vlan.go b/test/e2e/framework/vlan.go deleted file mode 100644 index 0da08795669..00000000000 --- a/test/e2e/framework/vlan.go +++ /dev/null @@ -1,96 +0,0 @@ -package framework - -import ( - "context" - "errors" - "time" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - - "github.com/onsi/ginkgo/v2" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// VlanClient is a struct for vlan client. -type VlanClient struct { - f *Framework - v1.VlanInterface -} - -func (f *Framework) VlanClient() *VlanClient { - return &VlanClient{ - f: f, - VlanInterface: f.KubeOVNClientSet.KubeovnV1().Vlans(), - } -} - -func (c *VlanClient) Get(name string) *apiv1.Vlan { - ginkgo.GinkgoHelper() - vlan, err := c.VlanInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return vlan -} - -// Create creates a new vlan according to the framework specifications -func (c *VlanClient) Create(pn *apiv1.Vlan) *apiv1.Vlan { - ginkgo.GinkgoHelper() - vlan, err := c.VlanInterface.Create(context.TODO(), pn, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating vlan") - return vlan.DeepCopy() -} - -// Patch patches the vlan -func (c *VlanClient) Patch(original, modified *apiv1.Vlan, timeout time.Duration) *apiv1.Vlan { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedVlan *apiv1.Vlan - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - pn, err := c.VlanInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch vlan %q", original.Name) - } - patchedVlan = pn - return true, nil - }) - if err == nil { - return patchedVlan.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch VLAN %s", original.Name) - } - Failf("error occurred while retrying to patch VLAN %s: %v", original.Name, err) - - return nil -} - -// Delete deletes a vlan if the vlan exists -func (c *VlanClient) Delete(name string, options metav1.DeleteOptions) { - ginkgo.GinkgoHelper() - err := c.VlanInterface.Delete(context.TODO(), name, options) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete vlan %q: %v", name, err) - } -} - -func MakeVlan(name, provider string, id int) *apiv1.Vlan { - vlan := &apiv1.Vlan{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apiv1.VlanSpec{ - Provider: provider, - ID: id, - }, - } - return vlan -} diff --git a/test/e2e/framework/vpc-nat-gw.go b/test/e2e/framework/vpc-nat-gw.go deleted file mode 100644 index 8b047cb7ad5..00000000000 --- a/test/e2e/framework/vpc-nat-gw.go +++ /dev/null @@ -1,224 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "math/big" - "time" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// VpcNatGatewayClient is a struct for vpc nat gw client. -type VpcNatGatewayClient struct { - f *Framework - v1.VpcNatGatewayInterface -} - -func (f *Framework) VpcNatGatewayClient() *VpcNatGatewayClient { - return &VpcNatGatewayClient{ - f: f, - VpcNatGatewayInterface: f.KubeOVNClientSet.KubeovnV1().VpcNatGateways(), - } -} - -func (c *VpcNatGatewayClient) Get(name string) *apiv1.VpcNatGateway { - ginkgo.GinkgoHelper() - vpcNatGw, err := c.VpcNatGatewayInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return vpcNatGw -} - -// Create creates a new vpc nat gw according to the framework specifications -func (c *VpcNatGatewayClient) Create(vpcNatGw *apiv1.VpcNatGateway) *apiv1.VpcNatGateway { - ginkgo.GinkgoHelper() - vpcNatGw, err := c.VpcNatGatewayInterface.Create(context.TODO(), vpcNatGw, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating vpc nat gw") - return vpcNatGw.DeepCopy() -} - -// CreateSync creates a new vpc nat gw according to the framework specifications, and waits for it to be ready. -func (c *VpcNatGatewayClient) CreateSync(vpcNatGw *apiv1.VpcNatGateway, clientSet clientset.Interface) *apiv1.VpcNatGateway { - ginkgo.GinkgoHelper() - - vpcNatGw = c.Create(vpcNatGw) - // When multiple VPC NAT gateways are being created, it may require more time to wait. - timeout := 4 * time.Minute - ExpectTrue(c.WaitGwPodReady(vpcNatGw.Name, timeout, clientSet)) - // Get the newest vpc nat gw after it becomes ready - return c.Get(vpcNatGw.Name).DeepCopy() -} - -// Patch patches the vpc nat gw -func (c *VpcNatGatewayClient) Patch(original, modified *apiv1.VpcNatGateway) *apiv1.VpcNatGateway { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedVpcNatGateway *apiv1.VpcNatGateway - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - vpcNatGw, err := c.VpcNatGatewayInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch vpc nat gw %q", original.Name) - } - patchedVpcNatGateway = vpcNatGw - return true, nil - }) - if err == nil { - return patchedVpcNatGateway.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch VPC NAT gateway %s", original.Name) - } - Failf("error occurred while retrying to patch VPC NAT gateway %s: %v", original.Name, err) - - return nil -} - -// PatchSync patches the vpc nat gw and waits for the vpc nat gw to be ready for `timeout`. -// If the vpc nat gw doesn't become ready before the timeout, it will fail the test. -func (c *VpcNatGatewayClient) PatchSync(original, modified *apiv1.VpcNatGateway, timeout time.Duration) *apiv1.VpcNatGateway { - ginkgo.GinkgoHelper() - - vpcNatGw := c.Patch(original, modified) - ExpectTrue(c.WaitToBeUpdated(vpcNatGw, timeout)) - ExpectTrue(c.WaitToBeReady(vpcNatGw.Name, timeout)) - // Get the newest vpc nat gw after it becomes ready - return c.Get(vpcNatGw.Name).DeepCopy() -} - -// PatchQoS patches the vpc nat gw and waits for the qos to be ready for `timeout`. -// If the qos doesn't become ready before the timeout, it will fail the test. -func (c *VpcNatGatewayClient) PatchQoSPolicySync(natgwName, qosPolicyName string) *apiv1.VpcNatGateway { - ginkgo.GinkgoHelper() - - natgw := c.Get(natgwName) - modifiedNATGW := natgw.DeepCopy() - modifiedNATGW.Spec.QoSPolicy = qosPolicyName - _ = c.Patch(natgw, modifiedNATGW) - ExpectTrue(c.WaitToQoSReady(natgwName)) - return c.Get(natgwName).DeepCopy() -} - -// Delete deletes a vpc nat gw if the vpc nat gw exists -func (c *VpcNatGatewayClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.VpcNatGatewayInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete vpc nat gw %q: %v", name, err) - } -} - -// DeleteSync deletes the vpc nat gw and waits for the vpc nat gw to disappear for `timeout`. -// If the vpc nat gw doesn't disappear before the timeout, it will fail the test. -func (c *VpcNatGatewayClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for vpc nat gw %q to disappear", name) -} - -// WaitToBeReady returns whether the vpc nat gw is ready within timeout. -func (c *VpcNatGatewayClient) WaitToBeReady(name string, timeout time.Duration) bool { - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - if c.Get(name).Spec.LanIP != "" { - return true - } - } - return false -} - -// WaitGwPodReady returns whether the vpc nat gw pod is ready within timeout. -func (c *VpcNatGatewayClient) WaitGwPodReady(name string, timeout time.Duration, clientSet clientset.Interface) bool { - podName := util.GenNatGwPodName(name) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - pod, err := clientSet.CoreV1().Pods("kube-system").Get(context.Background(), podName, metav1.GetOptions{}) - if err != nil { - if apierrors.IsNotFound(err) { - Logf("natgw %s is not ready err: %s", name, err) - continue - } - framework.ExpectNoError(err, "failed to get pod %v", podName) - } - if len(pod.Annotations) != 0 && pod.Annotations[util.VpcNatGatewayInitAnnotation] == "true" { - Logf("natgw %s is ready", name) - return true - } - Logf("natgw %s is not ready", name) - } - return false -} - -// WaitToBeUpdated returns whether the vpc nat gw is updated within timeout. -func (c *VpcNatGatewayClient) WaitToBeUpdated(vpcNatGw *apiv1.VpcNatGateway, timeout time.Duration) bool { - Logf("Waiting up to %v for vpc nat gw %s to be updated", timeout, vpcNatGw.Name) - rv, _ := big.NewInt(0).SetString(vpcNatGw.ResourceVersion, 10) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - s := c.Get(vpcNatGw.Name) - if current, _ := big.NewInt(0).SetString(s.ResourceVersion, 10); current.Cmp(rv) > 0 { - return true - } - } - Logf("vpc nat gw %s was not updated within %v", vpcNatGw.Name, timeout) - return false -} - -// WaitToDisappear waits the given timeout duration for the specified VPC NAT gateway to disappear. -func (c *VpcNatGatewayClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*apiv1.VpcNatGateway, error) { - gw, err := c.VpcNatGatewayInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return gw, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected VPC NAT gateway %s to not be found: %w", name, err) - } - return nil -} - -// WaitToQoSReady returns whether the qos is ready within timeout. -func (c *VpcNatGatewayClient) WaitToQoSReady(name string) bool { - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - natgw := c.Get(name) - if natgw.Status.QoSPolicy == natgw.Spec.QoSPolicy { - Logf("qos %s of vpc nat gateway %s is ready", natgw.Spec.QoSPolicy, name) - return true - } - Logf("qos %s of vpc nat gateway %s is not ready", natgw.Spec.QoSPolicy, name) - } - return false -} - -func MakeVpcNatGateway(name, vpc, subnet, lanIP, externalSubnet, qosPolicyName string) *apiv1.VpcNatGateway { - vpcNatGw := &apiv1.VpcNatGateway{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: apiv1.VpcNatSpec{ - Vpc: vpc, - Subnet: subnet, - LanIP: lanIP, - }, - } - if externalSubnet != "" { - vpcNatGw.Spec.ExternalSubnets = []string{externalSubnet} - } - vpcNatGw.Spec.QoSPolicy = qosPolicyName - return vpcNatGw -} diff --git a/test/e2e/framework/vpc.go b/test/e2e/framework/vpc.go deleted file mode 100644 index f6c897a2438..00000000000 --- a/test/e2e/framework/vpc.go +++ /dev/null @@ -1,180 +0,0 @@ -package framework - -import ( - "context" - "errors" - "fmt" - "math/big" - "time" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/test/e2e/framework" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - kubeovnv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - v1 "github.com/kubeovn/kube-ovn/pkg/client/clientset/versioned/typed/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" -) - -// VpcClient is a struct for vpc client. -type VpcClient struct { - f *Framework - v1.VpcInterface -} - -func (f *Framework) VpcClient() *VpcClient { - return &VpcClient{ - f: f, - VpcInterface: f.KubeOVNClientSet.KubeovnV1().Vpcs(), - } -} - -func (c *VpcClient) Get(name string) *kubeovnv1.Vpc { - ginkgo.GinkgoHelper() - vpc, err := c.VpcInterface.Get(context.TODO(), name, metav1.GetOptions{}) - ExpectNoError(err) - return vpc -} - -// Create creates a new vpc according to the framework specifications -func (c *VpcClient) Create(vpc *kubeovnv1.Vpc) *kubeovnv1.Vpc { - ginkgo.GinkgoHelper() - vpc, err := c.VpcInterface.Create(context.TODO(), vpc, metav1.CreateOptions{}) - ExpectNoError(err, "Error creating vpc") - return vpc.DeepCopy() -} - -// CreateSync creates a new vpc according to the framework specifications, and waits for it to be ready. -func (c *VpcClient) CreateSync(vpc *kubeovnv1.Vpc) *kubeovnv1.Vpc { - ginkgo.GinkgoHelper() - - vpc = c.Create(vpc) - ExpectTrue(c.WaitToBeReady(vpc.Name, timeout)) - // Get the newest vpc after it becomes ready - return c.Get(vpc.Name).DeepCopy() -} - -// Patch patches the vpc -func (c *VpcClient) Patch(original, modified *kubeovnv1.Vpc) *kubeovnv1.Vpc { - ginkgo.GinkgoHelper() - - patch, err := util.GenerateMergePatchPayload(original, modified) - ExpectNoError(err) - - var patchedVpc *kubeovnv1.Vpc - err = wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, true, func(ctx context.Context) (bool, error) { - vpc, err := c.VpcInterface.Patch(ctx, original.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "") - if err != nil { - return handleWaitingAPIError(err, false, "patch vpc %q", original.Name) - } - patchedVpc = vpc - return true, nil - }) - if err == nil { - return patchedVpc.DeepCopy() - } - - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while retrying to patch VPC %s", original.Name) - } - Failf("error occurred while retrying to patch VPC %s: %v", original.Name, err) - - return nil -} - -// PatchSync patches the vpc and waits for the vpc to be ready for `timeout`. -// If the vpc doesn't become ready before the timeout, it will fail the test. -func (c *VpcClient) PatchSync(original, modified *kubeovnv1.Vpc, _ []string, timeout time.Duration) *kubeovnv1.Vpc { - ginkgo.GinkgoHelper() - - vpc := c.Patch(original, modified) - ExpectTrue(c.WaitToBeUpdated(vpc, timeout)) - ExpectTrue(c.WaitToBeReady(vpc.Name, timeout)) - // Get the newest subnet after it becomes ready - return c.Get(vpc.Name).DeepCopy() -} - -// Delete deletes a vpc if the vpc exists -func (c *VpcClient) Delete(name string) { - ginkgo.GinkgoHelper() - err := c.VpcInterface.Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - Failf("Failed to delete vpc %q: %v", name, err) - } -} - -// DeleteSync deletes the vpc and waits for the vpc to disappear for `timeout`. -// If the vpc doesn't disappear before the timeout, it will fail the test. -func (c *VpcClient) DeleteSync(name string) { - ginkgo.GinkgoHelper() - c.Delete(name) - gomega.Expect(c.WaitToDisappear(name, 2*time.Second, timeout)).To(gomega.Succeed(), "wait for vpc %q to disappear", name) -} - -// WaitToBeReady returns whether the vpc is ready within timeout. -func (c *VpcClient) WaitToBeReady(name string, timeout time.Duration) bool { - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - if c.Get(name).Status.Standby { - // standby means the vpc is ready - return true - } - } - return false -} - -// WaitToBeUpdated returns whether the vpc is updated within timeout. -func (c *VpcClient) WaitToBeUpdated(vpc *kubeovnv1.Vpc, timeout time.Duration) bool { - Logf("Waiting up to %v for vpc %s to be updated", timeout, vpc.Name) - rv, _ := big.NewInt(0).SetString(vpc.ResourceVersion, 10) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { - s := c.Get(vpc.Name) - if current, _ := big.NewInt(0).SetString(s.ResourceVersion, 10); current.Cmp(rv) > 0 { - return true - } - } - Logf("Vpc %s was not updated within %v", vpc.Name, timeout) - return false -} - -// WaitToDisappear waits the given timeout duration for the specified VPC to disappear. -func (c *VpcClient) WaitToDisappear(name string, _, timeout time.Duration) error { - err := framework.Gomega().Eventually(context.Background(), framework.HandleRetry(func(ctx context.Context) (*kubeovnv1.Vpc, error) { - vpc, err := c.VpcInterface.Get(ctx, name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return nil, nil - } - return vpc, err - })).WithTimeout(timeout).Should(gomega.BeNil()) - if err != nil { - return fmt.Errorf("expected VPC %s to not be found: %w", name, err) - } - return nil -} - -func MakeVpc(name, gatewayV4 string, enableExternal, enableBfd bool, namespaces []string) *kubeovnv1.Vpc { - routes := make([]*kubeovnv1.StaticRoute, 0, 1) - if gatewayV4 != "" { - routes = append(routes, &kubeovnv1.StaticRoute{ - Policy: kubeovnv1.PolicyDst, - CIDR: "0.0.0.0/0", - NextHopIP: gatewayV4, - }) - } - vpc := &kubeovnv1.Vpc{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: kubeovnv1.VpcSpec{ - StaticRoutes: routes, - EnableExternal: enableExternal, - EnableBfd: enableBfd, - Namespaces: namespaces, - }, - } - return vpc -} diff --git a/test/e2e/framework/wait.go b/test/e2e/framework/wait.go deleted file mode 100644 index cf445c7564d..00000000000 --- a/test/e2e/framework/wait.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package framework - -import ( - "context" - "errors" - "fmt" - "time" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/util/wait" - - "github.com/onsi/ginkgo/v2" -) - -// handleWaitingAPIError handles an error from an API request in the context of a Wait function. -// If the error is retryable, sleep the recommended delay and ignore the error. -// If the error is terminal, return it. -func handleWaitingAPIError(err error, retryNotFound bool, taskFormat string, taskArgs ...interface{}) (bool, error) { - taskDescription := fmt.Sprintf(taskFormat, taskArgs...) - if retryNotFound && apierrors.IsNotFound(err) { - Logf("Ignoring NotFound error while " + taskDescription) - return false, nil - } - if retry, delay := shouldRetry(err); retry { - Logf("Retryable error while %s, retrying after %v: %v", taskDescription, delay, err) - if delay > 0 { - time.Sleep(delay) - } - return false, nil - } - Logf("Encountered non-retryable error while %s: %v", taskDescription, err) - return false, err -} - -// Decide whether to retry an API request. Optionally include a delay to retry after. -func shouldRetry(err error) (retry bool, retryAfter time.Duration) { - // if the error sends the Retry-After header, we respect it as an explicit confirmation we should retry. - if delay, shouldRetry := apierrors.SuggestsClientDelay(err); shouldRetry { - return shouldRetry, time.Duration(delay) * time.Second - } - - // these errors indicate a transient error that should be retried. - if apierrors.IsTimeout(err) || apierrors.IsTooManyRequests(err) { - return true, 0 - } - - return false, 0 -} - -// WaitUntil waits the condition to be met -func WaitUntil(_, timeout time.Duration, cond func(context.Context) (bool, error), condDesc string) { - ginkgo.GinkgoHelper() - - if err := wait.PollUntilContextTimeout(context.Background(), 2*time.Second, timeout, false, cond); err != nil { - if errors.Is(err, context.DeadlineExceeded) { - Failf("timed out while waiting for the condition to be met: %s", condDesc) - } - Failf("error occurred while waiting for the condition %q to be met", condDesc) - } -} diff --git a/test/e2e/ha/ha_test.go b/test/e2e/ha/ha_test.go deleted file mode 100644 index 8edf62deda9..00000000000 --- a/test/e2e/ha/ha_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package ha - -import ( - "context" - "flag" - "fmt" - "strconv" - "testing" - "time" - - "k8s.io/klog/v2" - "k8s.io/kubernetes/test/e2e" - k8sframework "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/config" - - "github.com/onsi/ginkgo/v2" - - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -func init() { - klog.SetOutput(ginkgo.GinkgoWriter) - - // Register flags. - config.CopyFlags(config.Flags, flag.CommandLine) - k8sframework.RegisterCommonFlags(flag.CommandLine) - k8sframework.RegisterClusterFlags(flag.CommandLine) -} - -func TestE2E(t *testing.T) { - k8sframework.AfterReadingAllFlags(&k8sframework.TestContext) - e2e.RunE2ETests(t) -} - -var _ = framework.Describe("[group:ha]", func() { - f := framework.NewDefaultFramework("ha") - f.SkipNamespaceCreation = true - - framework.DisruptiveIt("ovn db should recover automatically from db file corruption", func() { - f.SkipVersionPriorTo(1, 11, "This feature was introduced in v1.11") - - ginkgo.By("Getting daemonset ovs-ovn") - dsClient := f.DaemonSetClientNS(framework.KubeOvnNamespace) - ds := dsClient.Get("ovs-ovn") - - ginkgo.By("Getting deployment ovn-central") - deployClient := f.DeploymentClientNS(framework.KubeOvnNamespace) - deploy := deployClient.Get("ovn-central") - replicas := *deploy.Spec.Replicas - framework.ExpectNotZero(replicas) - - ginkgo.By("Ensuring deployment ovn-central is ready") - deployClient.RolloutStatus(deploy.Name) - - ginkgo.By("Getting nodes running deployment ovn-central") - deployClient.RolloutStatus(deploy.Name) - pods, err := deployClient.GetPods(deploy) - framework.ExpectNoError(err) - framework.ExpectHaveLen(pods.Items, int(replicas)) - nodes := make([]string, 0, int(replicas)) - for _, pod := range pods.Items { - nodes = append(nodes, pod.Spec.NodeName) - } - - ginkgo.By("Setting size of deployment ovn-central to 0") - deployClient.SetScale(deploy.Name, 0) - - ginkgo.By("Waiting for ovn-central pods to disappear") - framework.WaitUntil(2*time.Second, 30*time.Second, func(_ context.Context) (bool, error) { - pods, err := deployClient.GetAllPods(deploy) - if err != nil { - return false, err - } - return len(pods.Items) == 0, nil - }, "") - - db := "/etc/ovn/ovnnb_db.db" - checkCmd := fmt.Sprintf("ovsdb-tool check-cluster %s", db) - corruptCmd := fmt.Sprintf(`bash -c 'dd if=/dev/zero of="%s" bs=1 count=$((10+$RANDOM%%10)) seek=$(stat -c %%s "%s")'`, db, db) - for _, node := range nodes { - ginkgo.By("Getting ovs-ovn pod running on node " + node) - pod, err := dsClient.GetPodOnNode(ds, node) - framework.ExpectNoError(err) - - ginkgo.By("Ensuring db file " + db + " on node " + node + " is ok") - stdout, stderr, err := framework.ExecShellInPod(context.Background(), f, pod.Namespace, pod.Name, checkCmd) - framework.ExpectNoError(err, fmt.Sprintf("failed to check db file %q: stdout = %q, stderr = %q", db, stdout, stderr)) - ginkgo.By("Corrupting db file " + db + " on node " + node) - stdout, stderr, err = framework.ExecShellInPod(context.Background(), f, pod.Namespace, pod.Name, corruptCmd) - framework.ExpectNoError(err, fmt.Sprintf("failed to corrupt db file %q: stdout = %q, stderr = %q", db, stdout, stderr)) - ginkgo.By("Ensuring db file " + db + " on node " + node + " is corrupted") - stdout, stderr, err = framework.ExecShellInPod(context.Background(), f, pod.Namespace, pod.Name, checkCmd) - framework.ExpectError(err) - framework.Logf("command output: stdout = %q, stderr = %q", stdout, stderr) - } - - ginkgo.By("Setting size of deployment ovn-central to " + strconv.Itoa(int(replicas))) - deployClient.SetScale(deploy.Name, replicas) - - ginkgo.By("Waiting for deployment ovn-central to be ready") - deployClient.RolloutStatus(deploy.Name) - }) -}) diff --git a/test/e2e/iptables-vpc-nat-gw/e2e_test.go b/test/e2e/iptables-vpc-nat-gw/e2e_test.go deleted file mode 100644 index 3bb50c6406b..00000000000 --- a/test/e2e/iptables-vpc-nat-gw/e2e_test.go +++ /dev/null @@ -1,1406 +0,0 @@ -package ovn_eip - -import ( - "context" - "errors" - "flag" - "fmt" - "strconv" - "strings" - "testing" - "time" - - dockernetwork "github.com/docker/docker/api/types/network" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/klog/v2" - "k8s.io/kubernetes/test/e2e" - k8sframework "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/config" - e2enode "k8s.io/kubernetes/test/e2e/framework/node" - - "github.com/onsi/ginkgo/v2" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/ovs" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" - "github.com/kubeovn/kube-ovn/test/e2e/framework/docker" - "github.com/kubeovn/kube-ovn/test/e2e/framework/kind" -) - -const ( - dockerExtNet1Name = "kube-ovn-ext-net1" - dockerExtNet2Name = "kube-ovn-ext-net2" - vpcNatGWConfigMapName = "ovn-vpc-nat-gw-config" - vpcNatConfigName = "ovn-vpc-nat-config" - networkAttachDefName = "ovn-vpc-external-network" - externalSubnetProvider = "ovn-vpc-external-network.kube-system" -) - -const ( - iperf2Port = "20288" - skipIperf = false -) - -const ( - eipLimit = iota*5 + 10 - updatedEIPLimit - newEIPLimit - specificIPLimit - defaultNicLimit -) - -type qosParams struct { - vpc1Name string - vpc2Name string - vpc1SubnetName string - vpc2SubnetName string - vpcNat1GwName string - vpcNat2GwName string - vpc1EIPName string - vpc2EIPName string - vpc1FIPName string - vpc2FIPName string - vpc1PodName string - vpc2PodName string - attachDefName string - subnetProvider string -} - -func setupNetworkAttachmentDefinition( - f *framework.Framework, - dockerExtNetNetwork *dockernetwork.Inspect, - attachNetClient *framework.NetworkAttachmentDefinitionClient, - subnetClient *framework.SubnetClient, - externalNetworkName string, - nicName string, - provider string, - dockerExtNetName string, -) { - ginkgo.GinkgoHelper() - - ginkgo.By("Getting docker network " + dockerExtNetName) - network, err := docker.NetworkInspect(dockerExtNetName) - framework.ExpectNoError(err, "getting docker network "+dockerExtNetName) - ginkgo.By("Getting network attachment definition " + externalNetworkName) - attachConf := fmt.Sprintf(`{ - "cniVersion": "0.3.0", - "type": "macvlan", - "master": "%s", - "mode": "bridge", - "ipam": { - "type": "kube-ovn", - "server_socket": "/run/openvswitch/kube-ovn-daemon.sock", - "provider": "%s" - } - }`, nicName, provider) - attachNet := framework.MakeNetworkAttachmentDefinition(externalNetworkName, framework.KubeOvnNamespace, attachConf) - attachNetClient.Create(attachNet) - nad := attachNetClient.Get(externalNetworkName) - - ginkgo.By("Got network attachment definition " + nad.Name) - - ginkgo.By("Creating underlay macvlan subnet " + externalNetworkName) - var cidrV4, cidrV6, gatewayV4, gatewayV6 string - for _, config := range dockerExtNetNetwork.IPAM.Config { - switch util.CheckProtocol(config.Subnet) { - case apiv1.ProtocolIPv4: - if f.HasIPv4() { - cidrV4 = config.Subnet - gatewayV4 = config.Gateway - } - case apiv1.ProtocolIPv6: - if f.HasIPv6() { - cidrV6 = config.Subnet - gatewayV6 = config.Gateway - } - } - } - cidr := make([]string, 0, 2) - gateway := make([]string, 0, 2) - if f.HasIPv4() { - cidr = append(cidr, cidrV4) - gateway = append(gateway, gatewayV4) - } - if f.HasIPv6() { - cidr = append(cidr, cidrV6) - gateway = append(gateway, gatewayV6) - } - excludeIPs := make([]string, 0, len(network.Containers)*2) - for _, container := range network.Containers { - if container.IPv4Address != "" && f.HasIPv4() { - excludeIPs = append(excludeIPs, strings.Split(container.IPv4Address, "/")[0]) - } - if container.IPv6Address != "" && f.HasIPv6() { - excludeIPs = append(excludeIPs, strings.Split(container.IPv6Address, "/")[0]) - } - } - macvlanSubnet := framework.MakeSubnet(externalNetworkName, "", strings.Join(cidr, ","), strings.Join(gateway, ","), "", provider, excludeIPs, nil, nil) - _ = subnetClient.CreateSync(macvlanSubnet) -} - -func setupVpcNatGwTestEnvironment( - f *framework.Framework, - dockerExtNetNetwork *dockernetwork.Inspect, - attachNetClient *framework.NetworkAttachmentDefinitionClient, - subnetClient *framework.SubnetClient, - vpcClient *framework.VpcClient, - vpcNatGwClient *framework.VpcNatGatewayClient, - vpcName string, - overlaySubnetName string, - vpcNatGwName string, - natGwQosPolicy string, - overlaySubnetV4Cidr string, - overlaySubnetV4Gw string, - lanIP string, - dockerExtNetName string, - externalNetworkName string, - nicName string, - provider string, - skipNADSetup bool, -) { - ginkgo.GinkgoHelper() - - if !skipNADSetup { - setupNetworkAttachmentDefinition( - f, dockerExtNetNetwork, attachNetClient, - subnetClient, externalNetworkName, nicName, provider, dockerExtNetName) - } - - ginkgo.By("Getting config map " + vpcNatGWConfigMapName) - _, err := f.ClientSet.CoreV1().ConfigMaps(framework.KubeOvnNamespace).Get(context.Background(), vpcNatGWConfigMapName, metav1.GetOptions{}) - framework.ExpectNoError(err, "failed to get ConfigMap") - - ginkgo.By("Creating custom vpc " + vpcName) - vpc := framework.MakeVpc(vpcName, lanIP, false, false, nil) - _ = vpcClient.CreateSync(vpc) - - ginkgo.By("Creating custom overlay subnet " + overlaySubnetName) - overlaySubnet := framework.MakeSubnet(overlaySubnetName, "", overlaySubnetV4Cidr, overlaySubnetV4Gw, vpcName, "", nil, nil, nil) - _ = subnetClient.CreateSync(overlaySubnet) - - ginkgo.By("Creating custom vpc nat gw " + vpcNatGwName) - vpcNatGw := framework.MakeVpcNatGateway(vpcNatGwName, vpcName, overlaySubnetName, lanIP, externalNetworkName, natGwQosPolicy) - _ = vpcNatGwClient.CreateSync(vpcNatGw, f.ClientSet) -} - -func cleanVpcNatGwTestEnvironment( - subnetClient *framework.SubnetClient, - vpcClient *framework.VpcClient, - vpcNatGwClient *framework.VpcNatGatewayClient, - vpcName string, - overlaySubnetName string, - vpcNatGwName string, -) { - ginkgo.GinkgoHelper() - - ginkgo.By("start to clean custom vpc nat gw " + vpcNatGwName) - ginkgo.By("clean custom vpc nat gw " + vpcNatGwName) - vpcNatGwClient.DeleteSync(vpcNatGwName) - - ginkgo.By("clean custom overlay subnet " + overlaySubnetName) - subnetClient.DeleteSync(overlaySubnetName) - - ginkgo.By("clean custom vpc " + vpcName) - vpcClient.DeleteSync(vpcName) -} - -var _ = framework.SerialDescribe("[group:iptables-vpc-nat-gw]", func() { - f := framework.NewDefaultFramework("iptables-vpc-nat-gw") - - var skip bool - var cs clientset.Interface - var attachNetClient *framework.NetworkAttachmentDefinitionClient - var clusterName, vpcName, vpcNatGwName, overlaySubnetName string - var vpcClient *framework.VpcClient - var vpcNatGwClient *framework.VpcNatGatewayClient - var subnetClient *framework.SubnetClient - var fipVipName, fipEipName, fipName, dnatVipName, dnatEipName, dnatName, snatEipName, snatName string - // sharing case - var sharedVipName, sharedEipName, sharedEipDnatName, sharedEipSnatName, sharedEipFipShouldOkName, sharedEipFipShouldFailName string - var vipClient *framework.VipClient - var ipClient *framework.IPClient - var iptablesEIPClient *framework.IptablesEIPClient - var iptablesFIPClient *framework.IptablesFIPClient - var iptablesSnatRuleClient *framework.IptablesSnatClient - var iptablesDnatRuleClient *framework.IptablesDnatClient - - var dockerExtNet1Network *dockernetwork.Inspect - var net1NicName string - - // multiple external network case - var dockerExtNet2Network *dockernetwork.Inspect - var net2NicName string - var net2AttachDefName string - var net2SubnetProvider string - var net2OverlaySubnetName string - var net2VpcNatGwName string - var net2VpcName string - var net2EipName string - - vpcName = "vpc-" + framework.RandomSuffix() - vpcNatGwName = "gw-" + framework.RandomSuffix() - - fipVipName = "fip-vip-" + framework.RandomSuffix() - fipEipName = "fip-eip-" + framework.RandomSuffix() - fipName = "fip-" + framework.RandomSuffix() - - dnatVipName = "dnat-vip-" + framework.RandomSuffix() - dnatEipName = "dnat-eip-" + framework.RandomSuffix() - dnatName = "dnat-" + framework.RandomSuffix() - - // sharing case - sharedVipName = "shared-vip-" + framework.RandomSuffix() - sharedEipName = "shared-eip-" + framework.RandomSuffix() - sharedEipDnatName = "shared-eip-dnat-" + framework.RandomSuffix() - sharedEipSnatName = "shared-eip-snat-" + framework.RandomSuffix() - sharedEipFipShouldOkName = "shared-eip-fip-should-ok-" + framework.RandomSuffix() - sharedEipFipShouldFailName = "shared-eip-fip-should-fail-" + framework.RandomSuffix() - - snatEipName = "snat-eip-" + framework.RandomSuffix() - snatName = "snat-" + framework.RandomSuffix() - overlaySubnetName = "overlay-subnet-" + framework.RandomSuffix() - - net2AttachDefName = "net2-ovn-vpc-external-network-" + framework.RandomSuffix() - net2SubnetProvider = fmt.Sprintf("%s.%s", net2AttachDefName, framework.KubeOvnNamespace) - net2OverlaySubnetName = "net2-overlay-subnet-" + framework.RandomSuffix() - net2VpcNatGwName = "net2-gw-" + framework.RandomSuffix() - net2VpcName = "net2-vpc-" + framework.RandomSuffix() - net2EipName = "net2-eip-" + framework.RandomSuffix() - - ginkgo.BeforeEach(func() { - cs = f.ClientSet - attachNetClient = f.NetworkAttachmentDefinitionClientNS(framework.KubeOvnNamespace) - subnetClient = f.SubnetClient() - vpcClient = f.VpcClient() - vpcNatGwClient = f.VpcNatGatewayClient() - iptablesEIPClient = f.IptablesEIPClient() - vipClient = f.VipClient() - ipClient = f.IPClient() - iptablesFIPClient = f.IptablesFIPClient() - iptablesSnatRuleClient = f.IptablesSnatClient() - iptablesDnatRuleClient = f.IptablesDnatClient() - - if skip { - ginkgo.Skip("underlay spec only runs on kind clusters") - } - - if clusterName == "" { - ginkgo.By("Getting k8s nodes") - k8sNodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - - cluster, ok := kind.IsKindProvided(k8sNodes.Items[0].Spec.ProviderID) - if !ok { - skip = true - ginkgo.Skip("underlay spec only runs on kind clusters") - } - clusterName = cluster - } - - if dockerExtNet1Network == nil { - ginkgo.By("Ensuring docker network " + dockerExtNet1Name + " exists") - network1, err := docker.NetworkCreate(dockerExtNet1Name, true, true) - framework.ExpectNoError(err, "creating docker network "+dockerExtNet1Name) - - dockerExtNet1Network = network1 - } - - if dockerExtNet2Network == nil { - ginkgo.By("Ensuring docker network " + dockerExtNet2Name + " exists") - network2, err := docker.NetworkCreate(dockerExtNet2Name, true, true) - framework.ExpectNoError(err, "creating docker network "+dockerExtNet2Name) - dockerExtNet2Network = network2 - } - - ginkgo.By("Getting kind nodes") - nodes, err := kind.ListNodes(clusterName, "") - framework.ExpectNoError(err, "getting nodes in kind cluster") - framework.ExpectNotEmpty(nodes) - - ginkgo.By("Connecting nodes to the docker network") - err = kind.NetworkConnect(dockerExtNet1Network.ID, nodes) - framework.ExpectNoError(err, "connecting nodes to network "+dockerExtNet1Name) - - ginkgo.By("Connecting nodes to the docker network") - err = kind.NetworkConnect(dockerExtNet2Network.ID, nodes) - framework.ExpectNoError(err, "connecting nodes to network "+dockerExtNet2Name) - - ginkgo.By("Getting node links that belong to the docker network") - nodes, err = kind.ListNodes(clusterName, "") - framework.ExpectNoError(err, "getting nodes in kind cluster") - - ginkgo.By("Validating node links") - network1, err := docker.NetworkInspect(dockerExtNet1Name) - framework.ExpectNoError(err) - network2, err := docker.NetworkInspect(dockerExtNet2Name) - framework.ExpectNoError(err) - var eth0Exist, net1Exist, net2Exist bool - for _, node := range nodes { - links, err := node.ListLinks() - framework.ExpectNoError(err, "failed to list links on node %s: %v", node.Name(), err) - net1Mac := network1.Containers[node.ID].MacAddress - net2Mac := network2.Containers[node.ID].MacAddress - for _, link := range links { - ginkgo.By("exist node nic " + link.IfName) - if link.IfName == "eth0" { - eth0Exist = true - } - if link.Address == net1Mac { - net1NicName = link.IfName - net1Exist = true - } - if link.Address == net2Mac { - net2NicName = link.IfName - net2Exist = true - } - } - framework.ExpectTrue(eth0Exist) - framework.ExpectTrue(net1Exist) - framework.ExpectTrue(net2Exist) - } - }) - - ginkgo.AfterEach(func() { - cleanVpcNatGwTestEnvironment(subnetClient, vpcClient, vpcNatGwClient, vpcName, overlaySubnetName, vpcNatGwName) - ginkgo.By("Deleting macvlan underlay subnet " + networkAttachDefName) - subnetClient.DeleteSync(networkAttachDefName) - - // delete net1 attachment definition - ginkgo.By("Deleting nad " + networkAttachDefName) - attachNetClient.Delete(networkAttachDefName) - // delete net2 attachment definition - ginkgo.By("Deleting nad " + net2AttachDefName) - attachNetClient.Delete(net2AttachDefName) - - ginkgo.By("Getting nodes") - nodes, err := kind.ListNodes(clusterName, "") - framework.ExpectNoError(err, "getting nodes in cluster") - - if dockerExtNet1Network != nil { - ginkgo.By("Disconnecting nodes from the docker network") - err = kind.NetworkDisconnect(dockerExtNet1Network.ID, nodes) - framework.ExpectNoError(err, "disconnecting nodes from network "+dockerExtNet1Name) - } - if dockerExtNet2Network != nil { - ginkgo.By("Disconnecting nodes from the docker network") - err = kind.NetworkDisconnect(dockerExtNet2Network.ID, nodes) - framework.ExpectNoError(err, "disconnecting nodes from network "+dockerExtNet2Name) - } - }) - - framework.ConformanceIt("change gateway image", func() { - overlaySubnetV4Cidr := "10.0.2.0/24" - overlaySubnetV4Gw := "10.0.2.1" - lanIP := "10.0.2.254" - natgwQoS := "" - cm, err := f.ClientSet.CoreV1().ConfigMaps(framework.KubeOvnNamespace).Get(context.Background(), vpcNatConfigName, metav1.GetOptions{}) - framework.ExpectNoError(err) - oldImage := cm.Data["image"] - cm.Data["image"] = "docker.io/kubeovn/vpc-nat-gateway:v1.12.18" - cm, err = f.ClientSet.CoreV1().ConfigMaps(framework.KubeOvnNamespace).Update(context.Background(), cm, metav1.UpdateOptions{}) - framework.ExpectNoError(err) - time.Sleep(3 * time.Second) - setupVpcNatGwTestEnvironment( - f, dockerExtNet1Network, attachNetClient, - subnetClient, vpcClient, vpcNatGwClient, - vpcName, overlaySubnetName+"image", vpcNatGwName, natgwQoS, - overlaySubnetV4Cidr, overlaySubnetV4Gw, lanIP, - dockerExtNet1Name, networkAttachDefName, net1NicName, - externalSubnetProvider, - false, - ) - vpcNatGwPodName := util.GenNatGwPodName(vpcNatGwName) - pod := f.PodClientNS("kube-system").GetPod(vpcNatGwPodName) - framework.ExpectNotNil(pod) - framework.ExpectEqual(pod.Spec.Containers[0].Image, cm.Data["image"]) - - // recover the image - cm.Data["image"] = oldImage - _, err = f.ClientSet.CoreV1().ConfigMaps(framework.KubeOvnNamespace).Update(context.Background(), cm, metav1.UpdateOptions{}) - framework.ExpectNoError(err) - }) - - framework.ConformanceIt("iptables eip fip snat dnat", func() { - overlaySubnetV4Cidr := "10.0.1.0/24" - overlaySubnetV4Gw := "10.0.1.1" - lanIP := "10.0.1.254" - natgwQoS := "" - setupVpcNatGwTestEnvironment( - f, dockerExtNet1Network, attachNetClient, - subnetClient, vpcClient, vpcNatGwClient, - vpcName, overlaySubnetName, vpcNatGwName, natgwQoS, - overlaySubnetV4Cidr, overlaySubnetV4Gw, lanIP, - dockerExtNet1Name, networkAttachDefName, net1NicName, - externalSubnetProvider, - false, - ) - - ginkgo.By("Creating iptables vip for fip") - fipVip := framework.MakeVip(f.Namespace.Name, fipVipName, overlaySubnetName, "", "", "") - _ = vipClient.CreateSync(fipVip) - fipVip = vipClient.Get(fipVipName) - ginkgo.By("Creating iptables eip for fip") - fipEip := framework.MakeIptablesEIP(fipEipName, "", "", "", vpcNatGwName, "", "") - _ = iptablesEIPClient.CreateSync(fipEip) - ginkgo.By("Creating iptables fip") - fip := framework.MakeIptablesFIPRule(fipName, fipEipName, fipVip.Status.V4ip) - _ = iptablesFIPClient.CreateSync(fip) - - ginkgo.By("Creating iptables eip for snat") - snatEip := framework.MakeIptablesEIP(snatEipName, "", "", "", vpcNatGwName, "", "") - _ = iptablesEIPClient.CreateSync(snatEip) - ginkgo.By("Creating iptables snat") - snat := framework.MakeIptablesSnatRule(snatName, snatEipName, overlaySubnetV4Cidr) - _ = iptablesSnatRuleClient.CreateSync(snat) - - ginkgo.By("Creating iptables vip for dnat") - dnatVip := framework.MakeVip(f.Namespace.Name, dnatVipName, overlaySubnetName, "", "", "") - _ = vipClient.CreateSync(dnatVip) - dnatVip = vipClient.Get(dnatVipName) - ginkgo.By("Creating iptables eip for dnat") - dnatEip := framework.MakeIptablesEIP(dnatEipName, "", "", "", vpcNatGwName, "", "") - _ = iptablesEIPClient.CreateSync(dnatEip) - ginkgo.By("Creating iptables dnat") - dnat := framework.MakeIptablesDnatRule(dnatName, dnatEipName, "80", "tcp", dnatVip.Status.V4ip, "8080") - _ = iptablesDnatRuleClient.CreateSync(dnat) - - // share eip case - ginkgo.By("Creating share vip") - shareVip := framework.MakeVip(f.Namespace.Name, sharedVipName, overlaySubnetName, "", "", "") - _ = vipClient.CreateSync(shareVip) - fipVip = vipClient.Get(fipVipName) - ginkgo.By("Creating share iptables eip") - shareEip := framework.MakeIptablesEIP(sharedEipName, "", "", "", vpcNatGwName, "", "") - _ = iptablesEIPClient.CreateSync(shareEip) - ginkgo.By("Creating the first iptables fip with share eip vip should be ok") - shareFipShouldOk := framework.MakeIptablesFIPRule(sharedEipFipShouldOkName, sharedEipName, fipVip.Status.V4ip) - _ = iptablesFIPClient.CreateSync(shareFipShouldOk) - ginkgo.By("Creating the second iptables fip with share eip vip should be failed") - shareFipShouldFail := framework.MakeIptablesFIPRule(sharedEipFipShouldFailName, sharedEipName, fipVip.Status.V4ip) - _ = iptablesFIPClient.Create(shareFipShouldFail) - ginkgo.By("Creating iptables dnat for dnat with share eip vip") - shareDnat := framework.MakeIptablesDnatRule(sharedEipDnatName, sharedEipName, "80", "tcp", fipVip.Status.V4ip, "8080") - _ = iptablesDnatRuleClient.CreateSync(shareDnat) - ginkgo.By("Creating iptables snat with share eip vip") - shareSnat := framework.MakeIptablesSnatRule(sharedEipSnatName, sharedEipName, overlaySubnetV4Cidr) - _ = iptablesSnatRuleClient.CreateSync(shareSnat) - - ginkgo.By("Get share eip") - shareEip = iptablesEIPClient.Get(sharedEipName) - ginkgo.By("Get share dnat") - shareDnat = iptablesDnatRuleClient.Get(sharedEipDnatName) - ginkgo.By("Get share snat") - shareSnat = iptablesSnatRuleClient.Get(sharedEipSnatName) - ginkgo.By("Get share fip should ok") - shareFipShouldOk = iptablesFIPClient.Get(sharedEipFipShouldOkName) - ginkgo.By("Get share fip should fail") - shareFipShouldFail = iptablesFIPClient.Get(sharedEipFipShouldFailName) - - ginkgo.By("Check share eip should has the external ip label") - framework.ExpectHaveKeyWithValue(shareEip.Labels, util.EipV4IpLabel, shareEip.Spec.V4ip) - ginkgo.By("Check share dnat should has the external ip label") - framework.ExpectHaveKeyWithValue(shareDnat.Labels, util.EipV4IpLabel, shareEip.Spec.V4ip) - ginkgo.By("Check share snat should has the external ip label") - framework.ExpectHaveKeyWithValue(shareSnat.Labels, util.EipV4IpLabel, shareEip.Spec.V4ip) - ginkgo.By("Check share fip should ok should has the external ip label") - framework.ExpectHaveKeyWithValue(shareFipShouldOk.Labels, util.EipV4IpLabel, shareEip.Spec.V4ip) - ginkgo.By("Check share fip should fail should not be ready") - framework.ExpectEqual(shareFipShouldFail.Status.Ready, false) - - // make sure eip is shared - nats := []string{util.DnatUsingEip, util.FipUsingEip, util.SnatUsingEip} - framework.ExpectEqual(shareEip.Status.Nat, strings.Join(nats, ",")) - - ginkgo.By("Deleting share iptables fip " + sharedEipFipShouldOkName) - iptablesFIPClient.DeleteSync(sharedEipFipShouldOkName) - ginkgo.By("Deleting share iptables fip " + sharedEipFipShouldFailName) - iptablesFIPClient.DeleteSync(sharedEipFipShouldFailName) - ginkgo.By("Deleting share iptables dnat " + dnatName) - iptablesDnatRuleClient.DeleteSync(dnatName) - ginkgo.By("Deleting share iptables snat " + snatName) - iptablesSnatRuleClient.DeleteSync(snatName) - - ginkgo.By("Deleting iptables fip " + fipName) - iptablesFIPClient.DeleteSync(fipName) - ginkgo.By("Deleting iptables dnat " + dnatName) - iptablesDnatRuleClient.DeleteSync(dnatName) - ginkgo.By("Deleting iptables snat " + snatName) - iptablesSnatRuleClient.DeleteSync(snatName) - - ginkgo.By("Deleting iptables eip " + fipEipName) - iptablesEIPClient.DeleteSync(fipEipName) - ginkgo.By("Deleting iptables eip " + dnatEipName) - iptablesEIPClient.DeleteSync(dnatEipName) - ginkgo.By("Deleting iptables eip " + snatEipName) - iptablesEIPClient.DeleteSync(snatEipName) - ginkgo.By("Deleting iptables share eip " + sharedEipName) - iptablesEIPClient.DeleteSync(sharedEipName) - - ginkgo.By("Deleting vip " + fipVipName) - vipClient.DeleteSync(fipVipName) - ginkgo.By("Deleting vip " + dnatVipName) - vipClient.DeleteSync(dnatVipName) - ginkgo.By("Deleting vip " + sharedVipName) - vipClient.DeleteSync(sharedVipName) - - ginkgo.By("Deleting custom vpc " + vpcName) - vpcClient.DeleteSync(vpcName) - - ginkgo.By("Deleting custom vpc nat gw") - vpcNatGwClient.DeleteSync(vpcNatGwName) - - // the only pod for vpc nat gateway - vpcNatGwPodName := util.GenNatGwPodName(vpcNatGwName) - - // delete vpc nat gw statefulset remaining ip for eth0 and net1 - overlaySubnet := subnetClient.Get(overlaySubnetName) - macvlanSubnet := subnetClient.Get(networkAttachDefName) - eth0IpName := ovs.PodNameToPortName(vpcNatGwPodName, framework.KubeOvnNamespace, overlaySubnet.Spec.Provider) - net1IpName := ovs.PodNameToPortName(vpcNatGwPodName, framework.KubeOvnNamespace, macvlanSubnet.Spec.Provider) - ginkgo.By("Deleting vpc nat gw eth0 ip " + eth0IpName) - ipClient.DeleteSync(eth0IpName) - ginkgo.By("Deleting vpc nat gw net1 ip " + net1IpName) - ipClient.DeleteSync(net1IpName) - - ginkgo.By("Deleting overlay subnet " + overlaySubnetName) - subnetClient.DeleteSync(overlaySubnetName) - - // multiple external network case - net2OverlaySubnetV4Cidr := "10.0.1.0/24" - net2OoverlaySubnetV4Gw := "10.0.1.1" - net2LanIP := "10.0.1.254" - natgwQoS = "" - setupVpcNatGwTestEnvironment( - f, dockerExtNet2Network, attachNetClient, - subnetClient, vpcClient, vpcNatGwClient, - net2VpcName, net2OverlaySubnetName, net2VpcNatGwName, natgwQoS, - net2OverlaySubnetV4Cidr, net2OoverlaySubnetV4Gw, net2LanIP, - dockerExtNet2Name, net2AttachDefName, net2NicName, - net2SubnetProvider, - false, - ) - - ginkgo.By("Creating iptables eip of net2") - net2Eip := framework.MakeIptablesEIP(net2EipName, "", "", "", net2VpcNatGwName, net2AttachDefName, "") - _ = iptablesEIPClient.CreateSync(net2Eip) - - ginkgo.By("Deleting iptables eip " + net2EipName) - iptablesEIPClient.DeleteSync(net2EipName) - - ginkgo.By("Deleting custom vpc " + net2VpcName) - vpcClient.DeleteSync(net2VpcName) - - ginkgo.By("Deleting custom vpc nat gw") - vpcNatGwClient.DeleteSync(net2VpcNatGwName) - - // the only pod for vpc nat gateway - vpcNatGwPodName = util.GenNatGwPodName(net2VpcNatGwName) - - // delete vpc nat gw statefulset remaining ip for eth0 and net2 - overlaySubnet = subnetClient.Get(net2OverlaySubnetName) - macvlanSubnet = subnetClient.Get(net2AttachDefName) - eth0IpName = ovs.PodNameToPortName(vpcNatGwPodName, framework.KubeOvnNamespace, overlaySubnet.Spec.Provider) - net2IpName := ovs.PodNameToPortName(vpcNatGwPodName, framework.KubeOvnNamespace, macvlanSubnet.Spec.Provider) - ginkgo.By("Deleting vpc nat gw eth0 ip " + eth0IpName) - ipClient.DeleteSync(eth0IpName) - ginkgo.By("Deleting vpc nat gw net2 ip " + net2IpName) - ipClient.DeleteSync(net2IpName) - - ginkgo.By("Deleting macvlan underlay subnet " + net2AttachDefName) - subnetClient.DeleteSync(net2AttachDefName) - - ginkgo.By("Deleting overlay subnet " + net2OverlaySubnetName) - subnetClient.DeleteSync(net2OverlaySubnetName) - }) -}) - -func iperf(f *framework.Framework, iperfClientPod *corev1.Pod, iperfServerEIP *apiv1.IptablesEIP) string { - ginkgo.GinkgoHelper() - - for i := 0; i < 20; i++ { - command := fmt.Sprintf("iperf -e -p %s --reportstyle C -i 1 -c %s -t 10", iperf2Port, iperfServerEIP.Status.IP) - stdOutput, errOutput, err := framework.ExecShellInPod(context.Background(), f, iperfClientPod.Namespace, iperfClientPod.Name, command) - framework.Logf("output from exec on client pod %s (eip %s)\n", iperfClientPod.Name, iperfServerEIP.Name) - if stdOutput != "" && err == nil { - framework.Logf("output:\n%s", stdOutput) - return stdOutput - } - framework.Logf("exec %s failed err: %v, errOutput: %s, stdOutput: %s, retried %d times.", command, err, errOutput, stdOutput, i) - time.Sleep(6 * time.Second) - } - framework.ExpectNoError(errors.New("iperf failed")) - return "" -} - -func checkQos(f *framework.Framework, - vpc1Pod, vpc2Pod *corev1.Pod, vpc1EIP, vpc2EIP *apiv1.IptablesEIP, - limit int, expect bool, -) { - ginkgo.GinkgoHelper() - - if !skipIperf { - if expect { - output := iperf(f, vpc1Pod, vpc2EIP) - framework.ExpectTrue(validRateLimit(output, limit)) - output = iperf(f, vpc2Pod, vpc1EIP) - framework.ExpectTrue(validRateLimit(output, limit)) - } else { - output := iperf(f, vpc1Pod, vpc2EIP) - framework.ExpectFalse(validRateLimit(output, limit)) - output = iperf(f, vpc2Pod, vpc1EIP) - framework.ExpectFalse(validRateLimit(output, limit)) - } - } -} - -func newVPCQoSParamsInit() *qosParams { - qosParams := &qosParams{ - vpc1Name: "qos-vpc1-" + framework.RandomSuffix(), - vpc2Name: "qos-vpc2-" + framework.RandomSuffix(), - vpc1SubnetName: "qos-vpc1-subnet-" + framework.RandomSuffix(), - vpc2SubnetName: "qos-vpc2-subnet-" + framework.RandomSuffix(), - vpcNat1GwName: "qos-vpc1-gw-" + framework.RandomSuffix(), - vpcNat2GwName: "qos-vpc2-gw-" + framework.RandomSuffix(), - vpc1EIPName: "qos-vpc1-eip-" + framework.RandomSuffix(), - vpc2EIPName: "qos-vpc2-eip-" + framework.RandomSuffix(), - vpc1FIPName: "qos-vpc1-fip-" + framework.RandomSuffix(), - vpc2FIPName: "qos-vpc2-fip-" + framework.RandomSuffix(), - vpc1PodName: "qos-vpc1-pod-" + framework.RandomSuffix(), - vpc2PodName: "qos-vpc2-pod-" + framework.RandomSuffix(), - attachDefName: "qos-ovn-vpc-external-network-" + framework.RandomSuffix(), - } - qosParams.subnetProvider = fmt.Sprintf("%s.%s", qosParams.attachDefName, framework.KubeOvnNamespace) - return qosParams -} - -func getNicDefaultQoSPolicy(limit int) apiv1.QoSPolicyBandwidthLimitRules { - return apiv1.QoSPolicyBandwidthLimitRules{ - &apiv1.QoSPolicyBandwidthLimitRule{ - Name: "net1-ingress", - Interface: "net1", - RateMax: strconv.Itoa(limit), - BurstMax: strconv.Itoa(limit), - Priority: 3, - Direction: apiv1.DirectionIngress, - }, - &apiv1.QoSPolicyBandwidthLimitRule{ - Name: "net1-egress", - Interface: "net1", - RateMax: strconv.Itoa(limit), - BurstMax: strconv.Itoa(limit), - Priority: 3, - Direction: apiv1.DirectionEgress, - }, - } -} - -func getEIPQoSRule(limit int) apiv1.QoSPolicyBandwidthLimitRules { - return apiv1.QoSPolicyBandwidthLimitRules{ - &apiv1.QoSPolicyBandwidthLimitRule{ - Name: "eip-ingress", - RateMax: strconv.Itoa(limit), - BurstMax: strconv.Itoa(limit), - Priority: 1, - Direction: apiv1.DirectionIngress, - }, - &apiv1.QoSPolicyBandwidthLimitRule{ - Name: "eip-egress", - RateMax: strconv.Itoa(limit), - BurstMax: strconv.Itoa(limit), - Priority: 1, - Direction: apiv1.DirectionEgress, - }, - } -} - -func getSpecialQoSRule(limit int, ip string) apiv1.QoSPolicyBandwidthLimitRules { - return apiv1.QoSPolicyBandwidthLimitRules{ - &apiv1.QoSPolicyBandwidthLimitRule{ - Name: "net1-extip-ingress", - Interface: "net1", - RateMax: strconv.Itoa(limit), - BurstMax: strconv.Itoa(limit), - Priority: 2, - Direction: apiv1.DirectionIngress, - MatchType: apiv1.MatchTypeIP, - MatchValue: "src " + ip + "/32", - }, - &apiv1.QoSPolicyBandwidthLimitRule{ - Name: "net1-extip-egress", - Interface: "net1", - RateMax: strconv.Itoa(limit), - BurstMax: strconv.Itoa(limit), - Priority: 2, - Direction: apiv1.DirectionEgress, - MatchType: apiv1.MatchTypeIP, - MatchValue: "dst " + ip + "/32", - }, - } -} - -// defaultQoSCases test default qos policy= -func defaultQoSCases(f *framework.Framework, - vpcNatGwClient *framework.VpcNatGatewayClient, - podClient *framework.PodClient, - qosPolicyClient *framework.QoSPolicyClient, - vpc1Pod *corev1.Pod, - vpc2Pod *corev1.Pod, - vpc1EIP *apiv1.IptablesEIP, - vpc2EIP *apiv1.IptablesEIP, - natgwName string, -) { - ginkgo.GinkgoHelper() - - // create nic qos policy - qosPolicyName := "default-nic-qos-policy-" + framework.RandomSuffix() - ginkgo.By("Creating qos policy " + qosPolicyName) - rules := getNicDefaultQoSPolicy(defaultNicLimit) - - qosPolicy := framework.MakeQoSPolicy(qosPolicyName, true, apiv1.QoSBindingTypeNatGw, rules) - _ = qosPolicyClient.CreateSync(qosPolicy) - - ginkgo.By("Patch natgw " + natgwName + " with qos policy " + qosPolicyName) - _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, qosPolicyName) - - ginkgo.By("Check qos " + qosPolicyName + " is limited to " + strconv.Itoa(defaultNicLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, true) - - ginkgo.By("Delete natgw pod " + natgwName + "-0") - natGwPodName := util.GenNatGwPodName(natgwName) - podClient.DeleteSync(natGwPodName) - - ginkgo.By("Wait for natgw " + natgwName + "qos rebuild") - time.Sleep(5 * time.Second) - - ginkgo.By("Check qos " + qosPolicyName + " is limited to " + strconv.Itoa(defaultNicLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, true) - - ginkgo.By("Remove qos policy " + qosPolicyName + " from natgw " + natgwName) - _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, "") - - ginkgo.By("Deleting qos policy " + qosPolicyName) - qosPolicyClient.DeleteSync(qosPolicyName) - - ginkgo.By("Check qos " + qosPolicyName + " is not limited to " + strconv.Itoa(defaultNicLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, false) -} - -// eipQoSCases test default qos policy -func eipQoSCases(f *framework.Framework, - eipClient *framework.IptablesEIPClient, - podClient *framework.PodClient, - qosPolicyClient *framework.QoSPolicyClient, - vpc1Pod *corev1.Pod, - vpc2Pod *corev1.Pod, - vpc1EIP *apiv1.IptablesEIP, - vpc2EIP *apiv1.IptablesEIP, - eipName string, - natgwName string, -) { - ginkgo.GinkgoHelper() - - // create eip qos policy - qosPolicyName := "eip-qos-policy-" + framework.RandomSuffix() - ginkgo.By("Creating qos policy " + qosPolicyName) - rules := getEIPQoSRule(eipLimit) - - qosPolicy := framework.MakeQoSPolicy(qosPolicyName, false, apiv1.QoSBindingTypeEIP, rules) - qosPolicy = qosPolicyClient.CreateSync(qosPolicy) - - ginkgo.By("Patch eip " + eipName + " with qos policy " + qosPolicyName) - _ = eipClient.PatchQoSPolicySync(eipName, qosPolicyName) - - ginkgo.By("Check qos " + qosPolicyName + " is limited to " + strconv.Itoa(eipLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, eipLimit, true) - - ginkgo.By("Update qos policy " + qosPolicyName + " with new rate limit") - - rules = getEIPQoSRule(updatedEIPLimit) - modifiedqosPolicy := qosPolicy.DeepCopy() - modifiedqosPolicy.Spec.BandwidthLimitRules = rules - qosPolicyClient.Patch(qosPolicy, modifiedqosPolicy) - qosPolicyClient.WaitToQoSReady(qosPolicyName) - - ginkgo.By("Check qos " + qosPolicyName + " is changed to " + strconv.Itoa(updatedEIPLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, updatedEIPLimit, true) - - ginkgo.By("Delete natgw pod " + natgwName + "-0") - natGwPodName := util.GenNatGwPodName(natgwName) - podClient.DeleteSync(natGwPodName) - - ginkgo.By("Wait for natgw " + natgwName + "qos rebuid") - time.Sleep(5 * time.Second) - - ginkgo.By("Check qos " + qosPolicyName + " is limited to " + strconv.Itoa(updatedEIPLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, updatedEIPLimit, true) - - newQoSPolicyName := "new-eip-qos-policy-" + framework.RandomSuffix() - newRules := getEIPQoSRule(newEIPLimit) - newQoSPolicy := framework.MakeQoSPolicy(newQoSPolicyName, false, apiv1.QoSBindingTypeEIP, newRules) - _ = qosPolicyClient.CreateSync(newQoSPolicy) - - ginkgo.By("Change qos policy of eip " + eipName + " to " + newQoSPolicyName) - _ = eipClient.PatchQoSPolicySync(eipName, newQoSPolicyName) - - ginkgo.By("Check qos " + qosPolicyName + " is limited to " + strconv.Itoa(newEIPLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, newEIPLimit, true) - - ginkgo.By("Remove qos policy " + qosPolicyName + " from natgw " + eipName) - _ = eipClient.PatchQoSPolicySync(eipName, "") - - ginkgo.By("Deleting qos policy " + qosPolicyName) - qosPolicyClient.DeleteSync(qosPolicyName) - - ginkgo.By("Deleting qos policy " + newQoSPolicyName) - qosPolicyClient.DeleteSync(newQoSPolicyName) - - ginkgo.By("Check qos " + qosPolicyName + " is not limited to " + strconv.Itoa(newEIPLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, newEIPLimit, false) -} - -// specifyingIPQoSCases test default qos policy -func specifyingIPQoSCases(f *framework.Framework, - vpcNatGwClient *framework.VpcNatGatewayClient, - qosPolicyClient *framework.QoSPolicyClient, - vpc1Pod *corev1.Pod, - vpc2Pod *corev1.Pod, - vpc1EIP *apiv1.IptablesEIP, - vpc2EIP *apiv1.IptablesEIP, - natgwName string, -) { - ginkgo.GinkgoHelper() - - // create nic qos policy - qosPolicyName := "specifying-ip-qos-policy-" + framework.RandomSuffix() - ginkgo.By("Creating qos policy " + qosPolicyName) - - rules := getSpecialQoSRule(specificIPLimit, vpc2EIP.Status.IP) - - qosPolicy := framework.MakeQoSPolicy(qosPolicyName, true, apiv1.QoSBindingTypeNatGw, rules) - _ = qosPolicyClient.CreateSync(qosPolicy) - - ginkgo.By("Patch natgw " + natgwName + " with qos policy " + qosPolicyName) - _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, qosPolicyName) - - ginkgo.By("Check qos " + qosPolicyName + " is limited to " + strconv.Itoa(specificIPLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, specificIPLimit, true) - - ginkgo.By("Remove qos policy " + qosPolicyName + " from natgw " + natgwName) - _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, "") - - ginkgo.By("Deleting qos policy " + qosPolicyName) - qosPolicyClient.DeleteSync(qosPolicyName) - - ginkgo.By("Check qos " + qosPolicyName + " is not limited to " + strconv.Itoa(specificIPLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, specificIPLimit, false) -} - -// priorityQoSCases test qos match priority -func priorityQoSCases(f *framework.Framework, - vpcNatGwClient *framework.VpcNatGatewayClient, - eipClient *framework.IptablesEIPClient, - qosPolicyClient *framework.QoSPolicyClient, - vpc1Pod *corev1.Pod, - vpc2Pod *corev1.Pod, - vpc1EIP *apiv1.IptablesEIP, - vpc2EIP *apiv1.IptablesEIP, - natgwName string, - eipName string, -) { - ginkgo.GinkgoHelper() - - // create nic qos policy - natGwQoSPolicyName := "priority-nic-qos-policy-" + framework.RandomSuffix() - ginkgo.By("Creating qos policy " + natGwQoSPolicyName) - // default qos policy + special qos policy - natgwRules := getNicDefaultQoSPolicy(defaultNicLimit) - natgwRules = append(natgwRules, getSpecialQoSRule(specificIPLimit, vpc2EIP.Status.IP)...) - - natgwQoSPolicy := framework.MakeQoSPolicy(natGwQoSPolicyName, true, apiv1.QoSBindingTypeNatGw, natgwRules) - _ = qosPolicyClient.CreateSync(natgwQoSPolicy) - - ginkgo.By("Patch natgw " + natgwName + " with qos policy " + natGwQoSPolicyName) - _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, natGwQoSPolicyName) - - eipQoSPolicyName := "eip-qos-policy-" + framework.RandomSuffix() - ginkgo.By("Creating qos policy " + eipQoSPolicyName) - eipRules := getEIPQoSRule(eipLimit) - - eipQoSPolicy := framework.MakeQoSPolicy(eipQoSPolicyName, false, apiv1.QoSBindingTypeEIP, eipRules) - _ = qosPolicyClient.CreateSync(eipQoSPolicy) - - ginkgo.By("Patch eip " + eipName + " with qos policy " + eipQoSPolicyName) - _ = eipClient.PatchQoSPolicySync(eipName, eipQoSPolicyName) - - // match qos of priority 1 - ginkgo.By("Check qos to match priority 1 is limited to " + strconv.Itoa(eipLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, eipLimit, true) - - ginkgo.By("Remove qos policy " + eipQoSPolicyName + " from natgw " + eipName) - _ = eipClient.PatchQoSPolicySync(eipName, "") - - ginkgo.By("Deleting qos policy " + eipQoSPolicyName) - qosPolicyClient.DeleteSync(eipQoSPolicyName) - - // match qos of priority 2 - ginkgo.By("Check qos to match priority 2 is limited to " + strconv.Itoa(specificIPLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, specificIPLimit, true) - - // change qos policy of natgw - newNatGwQoSPolicyName := "new-priority-nic-qos-policy-" + framework.RandomSuffix() - ginkgo.By("Creating qos policy " + newNatGwQoSPolicyName) - newNatgwRules := getNicDefaultQoSPolicy(defaultNicLimit) - - newNatgwQoSPolicy := framework.MakeQoSPolicy(newNatGwQoSPolicyName, true, apiv1.QoSBindingTypeNatGw, newNatgwRules) - _ = qosPolicyClient.CreateSync(newNatgwQoSPolicy) - - ginkgo.By("Change qos policy of natgw " + natgwName + " to " + newNatGwQoSPolicyName) - _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, newNatGwQoSPolicyName) - - // match qos of priority 3 - ginkgo.By("Check qos to match priority 3 is limited to " + strconv.Itoa(specificIPLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, true) - - ginkgo.By("Remove qos policy " + natGwQoSPolicyName + " from natgw " + natgwName) - _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, "") - - ginkgo.By("Deleting qos policy " + natGwQoSPolicyName) - qosPolicyClient.DeleteSync(natGwQoSPolicyName) - - ginkgo.By("Deleting qos policy " + newNatGwQoSPolicyName) - qosPolicyClient.DeleteSync(newNatGwQoSPolicyName) - - ginkgo.By("Check qos " + natGwQoSPolicyName + " is not limited to " + strconv.Itoa(defaultNicLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, false) -} - -func createNatGwAndSetQosCases(f *framework.Framework, - vpcNatGwClient *framework.VpcNatGatewayClient, - ipClient *framework.IPClient, - eipClient *framework.IptablesEIPClient, - fipClient *framework.IptablesFIPClient, - subnetClient *framework.SubnetClient, - qosPolicyClient *framework.QoSPolicyClient, - vpc1Pod *corev1.Pod, - vpc2Pod *corev1.Pod, - vpc2EIP *apiv1.IptablesEIP, - natgwName string, - eipName string, - fipName string, - vpcName string, - overlaySubnetName string, - lanIP string, - attachDefName string, -) { - ginkgo.GinkgoHelper() - - // delete fip - ginkgo.By("Deleting fip " + fipName) - fipClient.DeleteSync(fipName) - - ginkgo.By("Deleting eip " + eipName) - eipClient.DeleteSync(eipName) - - // the only pod for vpc nat gateway - vpcNatGw1PodName := util.GenNatGwPodName(natgwName) - - // delete vpc nat gw statefulset remaining ip for eth0 and net2 - ginkgo.By("Deleting custom vpc nat gw " + natgwName) - vpcNatGwClient.DeleteSync(natgwName) - - overlaySubnet1 := subnetClient.Get(overlaySubnetName) - macvlanSubnet := subnetClient.Get(attachDefName) - eth0IpName := ovs.PodNameToPortName(vpcNatGw1PodName, framework.KubeOvnNamespace, overlaySubnet1.Spec.Provider) - net1IpName := ovs.PodNameToPortName(vpcNatGw1PodName, framework.KubeOvnNamespace, macvlanSubnet.Spec.Provider) - ginkgo.By("Deleting vpc nat gw eth0 ip " + eth0IpName) - ipClient.DeleteSync(eth0IpName) - ginkgo.By("Deleting vpc nat gw net1 ip " + net1IpName) - ipClient.DeleteSync(net1IpName) - - natgwQoSPolicyName := "default-nic-qos-policy-" + framework.RandomSuffix() - ginkgo.By("Creating qos policy " + natgwQoSPolicyName) - rules := getNicDefaultQoSPolicy(defaultNicLimit) - - qosPolicy := framework.MakeQoSPolicy(natgwQoSPolicyName, true, apiv1.QoSBindingTypeNatGw, rules) - _ = qosPolicyClient.CreateSync(qosPolicy) - - ginkgo.By("Creating custom vpc nat gw") - vpcNatGw := framework.MakeVpcNatGateway(natgwName, vpcName, overlaySubnetName, lanIP, attachDefName, natgwQoSPolicyName) - _ = vpcNatGwClient.CreateSync(vpcNatGw, f.ClientSet) - - eipQoSPolicyName := "eip-qos-policy-" + framework.RandomSuffix() - ginkgo.By("Creating qos policy " + eipQoSPolicyName) - rules = getEIPQoSRule(eipLimit) - - eipQoSPolicy := framework.MakeQoSPolicy(eipQoSPolicyName, false, apiv1.QoSBindingTypeEIP, rules) - _ = qosPolicyClient.CreateSync(eipQoSPolicy) - - ginkgo.By("Creating eip " + eipName) - vpc1EIP := framework.MakeIptablesEIP(eipName, "", "", "", natgwName, attachDefName, eipQoSPolicyName) - vpc1EIP = eipClient.CreateSync(vpc1EIP) - - ginkgo.By("Creating fip " + fipName) - fip := framework.MakeIptablesFIPRule(fipName, eipName, vpc1Pod.Status.PodIP) - _ = fipClient.CreateSync(fip) - - ginkgo.By("Check qos " + eipQoSPolicyName + " is limited to " + strconv.Itoa(eipLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, eipLimit, true) - - ginkgo.By("Remove qos policy " + eipQoSPolicyName + " from natgw " + natgwName) - _ = eipClient.PatchQoSPolicySync(eipName, "") - - ginkgo.By("Check qos " + natgwQoSPolicyName + " is limited to " + strconv.Itoa(defaultNicLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, true) - - ginkgo.By("Remove qos policy " + natgwQoSPolicyName + " from natgw " + natgwName) - _ = vpcNatGwClient.PatchQoSPolicySync(natgwName, "") - - ginkgo.By("Check qos " + natgwQoSPolicyName + " is not limited to " + strconv.Itoa(defaultNicLimit) + "Mbps") - checkQos(f, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, defaultNicLimit, false) - - ginkgo.By("Deleting qos policy " + natgwQoSPolicyName) - qosPolicyClient.DeleteSync(natgwQoSPolicyName) - - ginkgo.By("Deleting qos policy " + eipQoSPolicyName) - qosPolicyClient.DeleteSync(eipQoSPolicyName) -} - -func validRateLimit(text string, limit int) bool { - maxValue := float64(limit) * 1024 * 1024 * 1.2 - minValue := float64(limit) * 1024 * 1024 * 0.8 - lines := strings.Split(text, "\n") - for _, line := range lines { - if line == "" { - continue - } - fields := strings.Split(line, ",") - number, err := strconv.Atoi(fields[len(fields)-1]) - if err != nil { - continue - } - if v := float64(number); v >= minValue && v <= maxValue { - return true - } - } - return false -} - -var _ = framework.Describe("[group:qos-policy]", func() { - f := framework.NewDefaultFramework("qos-policy") - - var skip bool - var cs clientset.Interface - var attachNetClient *framework.NetworkAttachmentDefinitionClient - var clusterName string - var vpcClient *framework.VpcClient - var vpcNatGwClient *framework.VpcNatGatewayClient - var subnetClient *framework.SubnetClient - var podClient *framework.PodClient - var ipClient *framework.IPClient - var iptablesEIPClient *framework.IptablesEIPClient - var iptablesFIPClient *framework.IptablesFIPClient - var qosPolicyClient *framework.QoSPolicyClient - - var net1NicName string - var dockerExtNetName string - - // docker network - var dockerExtNetNetwork *dockernetwork.Inspect - - var vpcQosParams *qosParams - var vpc1Pod *corev1.Pod - var vpc2Pod *corev1.Pod - var vpc1EIP *apiv1.IptablesEIP - var vpc2EIP *apiv1.IptablesEIP - var vpc1FIP *apiv1.IptablesFIPRule - var vpc2FIP *apiv1.IptablesFIPRule - - var lanIP string - var overlaySubnetV4Cidr string - var overlaySubnetV4Gw string - var eth0Exist, net1Exist bool - var annotations1 map[string]string - var annotations2 map[string]string - var iperfServerCmd []string - - ginkgo.BeforeEach(func() { - vpcQosParams = newVPCQoSParamsInit() - - dockerExtNetName = "kube-ovn-qos-" + framework.RandomSuffix() - - vpcQosParams.vpc1SubnetName = "qos-vpc1-subnet-" + framework.RandomSuffix() - vpcQosParams.vpc2SubnetName = "qos-vpc2-subnet-" + framework.RandomSuffix() - - vpcQosParams.vpcNat1GwName = "qos-gw1-" + framework.RandomSuffix() - vpcQosParams.vpcNat2GwName = "qos-gw2-" + framework.RandomSuffix() - - vpcQosParams.vpc1EIPName = "qos-vpc1-eip-" + framework.RandomSuffix() - vpcQosParams.vpc2EIPName = "qos-vpc2-eip-" + framework.RandomSuffix() - - vpcQosParams.vpc1FIPName = "qos-vpc1-fip-" + framework.RandomSuffix() - vpcQosParams.vpc2FIPName = "qos-vpc2-fip-" + framework.RandomSuffix() - - vpcQosParams.vpc1PodName = "qos-vpc1-pod-" + framework.RandomSuffix() - vpcQosParams.vpc2PodName = "qos-vpc2-pod-" + framework.RandomSuffix() - - vpcQosParams.attachDefName = "qos-ovn-vpc-external-network-" + framework.RandomSuffix() - vpcQosParams.subnetProvider = fmt.Sprintf("%s.%s", vpcQosParams.attachDefName, framework.KubeOvnNamespace) - - cs = f.ClientSet - podClient = f.PodClient() - attachNetClient = f.NetworkAttachmentDefinitionClientNS(framework.KubeOvnNamespace) - subnetClient = f.SubnetClient() - vpcClient = f.VpcClient() - vpcNatGwClient = f.VpcNatGatewayClient() - iptablesEIPClient = f.IptablesEIPClient() - ipClient = f.IPClient() - iptablesFIPClient = f.IptablesFIPClient() - qosPolicyClient = f.QoSPolicyClient() - - if skip { - ginkgo.Skip("underlay spec only runs on kind clusters") - } - - if clusterName == "" { - ginkgo.By("Getting k8s nodes") - k8sNodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - - cluster, ok := kind.IsKindProvided(k8sNodes.Items[0].Spec.ProviderID) - if !ok { - skip = true - ginkgo.Skip("underlay spec only runs on kind clusters") - } - clusterName = cluster - } - - ginkgo.By("Ensuring docker network " + dockerExtNetName + " exists") - network, err := docker.NetworkCreate(dockerExtNetName, true, true) - framework.ExpectNoError(err, "creating docker network "+dockerExtNetName) - dockerExtNetNetwork = network - - ginkgo.By("Getting kind nodes") - nodes, err := kind.ListNodes(clusterName, "") - framework.ExpectNoError(err, "getting nodes in kind cluster") - framework.ExpectNotEmpty(nodes) - - ginkgo.By("Connecting nodes to the docker network") - err = kind.NetworkConnect(dockerExtNetNetwork.ID, nodes) - framework.ExpectNoError(err, "connecting nodes to network "+dockerExtNetName) - - ginkgo.By("Getting node links that belong to the docker network") - nodes, err = kind.ListNodes(clusterName, "") - framework.ExpectNoError(err, "getting nodes in kind cluster") - - ginkgo.By("Validating node links") - network1, err := docker.NetworkInspect(dockerExtNetName) - framework.ExpectNoError(err) - for _, node := range nodes { - links, err := node.ListLinks() - framework.ExpectNoError(err, "failed to list links on node %s: %v", node.Name(), err) - net1Mac := network1.Containers[node.ID].MacAddress - for _, link := range links { - ginkgo.By("exist node nic " + link.IfName) - if link.IfName == "eth0" { - eth0Exist = true - } - if link.Address == net1Mac { - net1NicName = link.IfName - net1Exist = true - } - } - framework.ExpectTrue(eth0Exist) - framework.ExpectTrue(net1Exist) - } - setupNetworkAttachmentDefinition( - f, dockerExtNetNetwork, attachNetClient, - subnetClient, vpcQosParams.attachDefName, net1NicName, vpcQosParams.subnetProvider, dockerExtNetName) - }) - - ginkgo.AfterEach(func() { - ginkgo.By("Deleting macvlan underlay subnet " + vpcQosParams.attachDefName) - subnetClient.DeleteSync(vpcQosParams.attachDefName) - - // delete net1 attachment definition - ginkgo.By("Deleting nad " + vpcQosParams.attachDefName) - attachNetClient.Delete(vpcQosParams.attachDefName) - - ginkgo.By("Getting nodes") - nodes, err := kind.ListNodes(clusterName, "") - framework.ExpectNoError(err, "getting nodes in cluster") - - if dockerExtNetNetwork != nil { - ginkgo.By("Disconnecting nodes from the docker network") - err = kind.NetworkDisconnect(dockerExtNetNetwork.ID, nodes) - framework.ExpectNoError(err, "disconnecting nodes from network "+dockerExtNetName) - ginkgo.By("Deleting docker network " + dockerExtNetName + " exists") - err := docker.NetworkRemove(dockerExtNetNetwork.ID) - framework.ExpectNoError(err, "deleting docker network "+dockerExtNetName) - } - }) - - _ = framework.Describe("vpc qos", func() { - ginkgo.BeforeEach(func() { - iperfServerCmd = []string{"iperf", "-s", "-i", "1", "-p", iperf2Port} - overlaySubnetV4Cidr = "10.0.0.0/24" - overlaySubnetV4Gw = "10.0.0.1" - lanIP = "10.0.0.254" - natgwQoS := "" - setupVpcNatGwTestEnvironment( - f, dockerExtNetNetwork, attachNetClient, - subnetClient, vpcClient, vpcNatGwClient, - vpcQosParams.vpc1Name, vpcQosParams.vpc1SubnetName, vpcQosParams.vpcNat1GwName, - natgwQoS, overlaySubnetV4Cidr, overlaySubnetV4Gw, lanIP, - dockerExtNetName, vpcQosParams.attachDefName, net1NicName, - vpcQosParams.subnetProvider, - true, - ) - annotations1 = map[string]string{ - util.LogicalSwitchAnnotation: vpcQosParams.vpc1SubnetName, - } - ginkgo.By("Creating pod " + vpcQosParams.vpc1PodName) - vpc1Pod = framework.MakePod(f.Namespace.Name, vpcQosParams.vpc1PodName, nil, annotations1, framework.AgnhostImage, iperfServerCmd, nil) - vpc1Pod = podClient.CreateSync(vpc1Pod) - - ginkgo.By("Creating eip " + vpcQosParams.vpc1EIPName) - vpc1EIP = framework.MakeIptablesEIP(vpcQosParams.vpc1EIPName, "", "", "", vpcQosParams.vpcNat1GwName, vpcQosParams.attachDefName, "") - vpc1EIP = iptablesEIPClient.CreateSync(vpc1EIP) - - ginkgo.By("Creating fip " + vpcQosParams.vpc1FIPName) - vpc1FIP = framework.MakeIptablesFIPRule(vpcQosParams.vpc1FIPName, vpcQosParams.vpc1EIPName, vpc1Pod.Status.PodIP) - _ = iptablesFIPClient.CreateSync(vpc1FIP) - - setupVpcNatGwTestEnvironment( - f, dockerExtNetNetwork, attachNetClient, - subnetClient, vpcClient, vpcNatGwClient, - vpcQosParams.vpc2Name, vpcQosParams.vpc2SubnetName, vpcQosParams.vpcNat2GwName, - natgwQoS, overlaySubnetV4Cidr, overlaySubnetV4Gw, lanIP, - dockerExtNetName, vpcQosParams.attachDefName, net1NicName, - vpcQosParams.subnetProvider, - true, - ) - - annotations2 = map[string]string{ - util.LogicalSwitchAnnotation: vpcQosParams.vpc2SubnetName, - } - - ginkgo.By("Creating pod " + vpcQosParams.vpc2PodName) - vpc2Pod = framework.MakePod(f.Namespace.Name, vpcQosParams.vpc2PodName, nil, annotations2, framework.AgnhostImage, iperfServerCmd, nil) - vpc2Pod = podClient.CreateSync(vpc2Pod) - - ginkgo.By("Creating eip " + vpcQosParams.vpc2EIPName) - vpc2EIP = framework.MakeIptablesEIP(vpcQosParams.vpc2EIPName, "", "", "", vpcQosParams.vpcNat2GwName, vpcQosParams.attachDefName, "") - vpc2EIP = iptablesEIPClient.CreateSync(vpc2EIP) - - ginkgo.By("Creating fip " + vpcQosParams.vpc2FIPName) - vpc2FIP = framework.MakeIptablesFIPRule(vpcQosParams.vpc2FIPName, vpcQosParams.vpc2EIPName, vpc2Pod.Status.PodIP) - _ = iptablesFIPClient.CreateSync(vpc2FIP) - }) - ginkgo.AfterEach(func() { - ginkgo.By("Deleting fip " + vpcQosParams.vpc1FIPName) - iptablesFIPClient.DeleteSync(vpcQosParams.vpc1FIPName) - - ginkgo.By("Deleting fip " + vpcQosParams.vpc2FIPName) - iptablesFIPClient.DeleteSync(vpcQosParams.vpc2FIPName) - - ginkgo.By("Deleting eip " + vpcQosParams.vpc1EIPName) - iptablesEIPClient.DeleteSync(vpcQosParams.vpc1EIPName) - - ginkgo.By("Deleting eip " + vpcQosParams.vpc2EIPName) - iptablesEIPClient.DeleteSync(vpcQosParams.vpc2EIPName) - - ginkgo.By("Deleting pod " + vpcQosParams.vpc1PodName) - podClient.DeleteSync(vpcQosParams.vpc1PodName) - - ginkgo.By("Deleting pod " + vpcQosParams.vpc2PodName) - podClient.DeleteSync(vpcQosParams.vpc2PodName) - - ginkgo.By("Deleting custom vpc " + vpcQosParams.vpc1Name) - vpcClient.DeleteSync(vpcQosParams.vpc1Name) - - ginkgo.By("Deleting custom vpc " + vpcQosParams.vpc2Name) - vpcClient.DeleteSync(vpcQosParams.vpc2Name) - - ginkgo.By("Deleting custom vpc nat gw " + vpcQosParams.vpcNat1GwName) - vpcNatGwClient.DeleteSync(vpcQosParams.vpcNat1GwName) - - ginkgo.By("Deleting custom vpc nat gw " + vpcQosParams.vpcNat2GwName) - vpcNatGwClient.DeleteSync(vpcQosParams.vpcNat2GwName) - - // the only pod for vpc nat gateway - vpcNatGw1PodName := util.GenNatGwPodName(vpcQosParams.vpcNat1GwName) - - // delete vpc nat gw statefulset remaining ip for eth0 and net2 - overlaySubnet1 := subnetClient.Get(vpcQosParams.vpc1SubnetName) - macvlanSubnet := subnetClient.Get(vpcQosParams.attachDefName) - eth0IpName := ovs.PodNameToPortName(vpcNatGw1PodName, framework.KubeOvnNamespace, overlaySubnet1.Spec.Provider) - net1IpName := ovs.PodNameToPortName(vpcNatGw1PodName, framework.KubeOvnNamespace, macvlanSubnet.Spec.Provider) - ginkgo.By("Deleting vpc nat gw eth0 ip " + eth0IpName) - ipClient.DeleteSync(eth0IpName) - ginkgo.By("Deleting vpc nat gw net1 ip " + net1IpName) - ipClient.DeleteSync(net1IpName) - ginkgo.By("Deleting overlay subnet " + vpcQosParams.vpc1SubnetName) - subnetClient.DeleteSync(vpcQosParams.vpc1SubnetName) - - ginkgo.By("Getting overlay subnet " + vpcQosParams.vpc2SubnetName) - overlaySubnet2 := subnetClient.Get(vpcQosParams.vpc2SubnetName) - - vpcNatGw2PodName := util.GenNatGwPodName(vpcQosParams.vpcNat2GwName) - eth0IpName = ovs.PodNameToPortName(vpcNatGw2PodName, framework.KubeOvnNamespace, overlaySubnet2.Spec.Provider) - net1IpName = ovs.PodNameToPortName(vpcNatGw2PodName, framework.KubeOvnNamespace, macvlanSubnet.Spec.Provider) - ginkgo.By("Deleting vpc nat gw eth0 ip " + eth0IpName) - ipClient.DeleteSync(eth0IpName) - ginkgo.By("Deleting vpc nat gw net1 ip " + net1IpName) - ipClient.DeleteSync(net1IpName) - ginkgo.By("Deleting overlay subnet " + vpcQosParams.vpc2SubnetName) - subnetClient.DeleteSync(vpcQosParams.vpc2SubnetName) - }) - framework.ConformanceIt("default nic qos", func() { - // case 1: set qos policy for natgw - // case 2: rebuild qos when natgw pod restart - defaultQoSCases(f, vpcNatGwClient, podClient, qosPolicyClient, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, vpcQosParams.vpcNat1GwName) - }) - framework.ConformanceIt("eip qos", func() { - // case 1: set qos policy for eip - // case 2: update qos policy for eip - // case 3: change qos policy of eip - // case 4: rebuild qos when natgw pod restart - eipQoSCases(f, iptablesEIPClient, podClient, qosPolicyClient, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, vpcQosParams.vpc1EIPName, vpcQosParams.vpcNat1GwName) - }) - framework.ConformanceIt("specifying ip qos", func() { - // case 1: set specific ip qos policy for natgw - specifyingIPQoSCases(f, vpcNatGwClient, qosPolicyClient, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, vpcQosParams.vpcNat1GwName) - }) - framework.ConformanceIt("qos priority matching", func() { - // case 1: test qos match priority - // case 2: change qos policy of natgw - priorityQoSCases(f, vpcNatGwClient, iptablesEIPClient, qosPolicyClient, vpc1Pod, vpc2Pod, vpc1EIP, vpc2EIP, vpcQosParams.vpcNat1GwName, vpcQosParams.vpc1EIPName) - }) - framework.ConformanceIt("create resource with qos policy", func() { - // case 1: test qos when create natgw with qos policy - // case 2: test qos when create eip with qos policy - createNatGwAndSetQosCases(f, - vpcNatGwClient, ipClient, iptablesEIPClient, iptablesFIPClient, - subnetClient, qosPolicyClient, vpc1Pod, vpc2Pod, vpc2EIP, vpcQosParams.vpcNat1GwName, - vpcQosParams.vpc1EIPName, vpcQosParams.vpc1FIPName, vpcQosParams.vpc1Name, - vpcQosParams.vpc1SubnetName, lanIP, vpcQosParams.attachDefName) - }) - }) -}) - -func init() { - klog.SetOutput(ginkgo.GinkgoWriter) - - // Register flags. - config.CopyFlags(config.Flags, flag.CommandLine) - k8sframework.RegisterCommonFlags(flag.CommandLine) - k8sframework.RegisterClusterFlags(flag.CommandLine) -} - -func TestE2E(t *testing.T) { - k8sframework.AfterReadingAllFlags(&k8sframework.TestContext) - e2e.RunE2ETests(t) -} diff --git a/test/e2e/k8s-network/e2e_test.go b/test/e2e/k8s-network/e2e_test.go deleted file mode 100644 index e96516204c6..00000000000 --- a/test/e2e/k8s-network/e2e_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package k8s_network - -import ( - "flag" - "testing" - - "k8s.io/klog/v2" - "k8s.io/kubernetes/test/e2e" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/config" - - "github.com/onsi/ginkgo/v2" - - // Import tests. - _ "k8s.io/kubernetes/test/e2e/network" -) - -func init() { - klog.SetOutput(ginkgo.GinkgoWriter) - - // Register flags. - config.CopyFlags(config.Flags, flag.CommandLine) - framework.RegisterCommonFlags(flag.CommandLine) - framework.RegisterClusterFlags(flag.CommandLine) -} - -func TestE2E(t *testing.T) { - framework.AfterReadingAllFlags(&framework.TestContext) - e2e.RunE2ETests(t) -} diff --git a/test/e2e/kube-ovn/e2e_test.go b/test/e2e/kube-ovn/e2e_test.go deleted file mode 100644 index 903a6d66e1b..00000000000 --- a/test/e2e/kube-ovn/e2e_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package kube_ovn - -import ( - "flag" - "testing" - - "k8s.io/klog/v2" - "k8s.io/kubernetes/test/e2e" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/config" - - "github.com/onsi/ginkgo/v2" - - // Import tests. - _ "github.com/kubeovn/kube-ovn/test/e2e/kube-ovn/ipam" - _ "github.com/kubeovn/kube-ovn/test/e2e/kube-ovn/kubectl-ko" - _ "github.com/kubeovn/kube-ovn/test/e2e/kube-ovn/network-policy" - _ "github.com/kubeovn/kube-ovn/test/e2e/kube-ovn/node" - _ "github.com/kubeovn/kube-ovn/test/e2e/kube-ovn/pod" - _ "github.com/kubeovn/kube-ovn/test/e2e/kube-ovn/qos" - _ "github.com/kubeovn/kube-ovn/test/e2e/kube-ovn/service" - _ "github.com/kubeovn/kube-ovn/test/e2e/kube-ovn/subnet" - _ "github.com/kubeovn/kube-ovn/test/e2e/kube-ovn/switch_lb_rule" - _ "github.com/kubeovn/kube-ovn/test/e2e/kube-ovn/underlay" -) - -func init() { - klog.SetOutput(ginkgo.GinkgoWriter) - - // Register flags. - config.CopyFlags(config.Flags, flag.CommandLine) - framework.RegisterCommonFlags(flag.CommandLine) - framework.RegisterClusterFlags(flag.CommandLine) -} - -func TestE2E(t *testing.T) { - framework.AfterReadingAllFlags(&framework.TestContext) - e2e.RunE2ETests(t) -} diff --git a/test/e2e/kube-ovn/ipam/ipam.go b/test/e2e/kube-ovn/ipam/ipam.go deleted file mode 100644 index 4c8ae27d23c..00000000000 --- a/test/e2e/kube-ovn/ipam/ipam.go +++ /dev/null @@ -1,512 +0,0 @@ -package ipam - -import ( - "context" - "fmt" - "strings" - "time" - - clientset "k8s.io/client-go/kubernetes" - e2epod "k8s.io/kubernetes/test/e2e/framework/pod" - - "github.com/onsi/ginkgo/v2" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/ipam" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -var _ = framework.Describe("[group:ipam]", func() { - f := framework.NewDefaultFramework("ipam") - - var cs clientset.Interface - var nsClient *framework.NamespaceClient - var podClient *framework.PodClient - var deployClient *framework.DeploymentClient - var stsClient *framework.StatefulSetClient - var subnetClient *framework.SubnetClient - var ippoolClient *framework.IPPoolClient - var namespaceName, subnetName, ippoolName, podName, deployName, stsName string - var subnet *apiv1.Subnet - var cidr string - - ginkgo.BeforeEach(func() { - cs = f.ClientSet - nsClient = f.NamespaceClient() - podClient = f.PodClient() - deployClient = f.DeploymentClient() - stsClient = f.StatefulSetClient() - subnetClient = f.SubnetClient() - ippoolClient = f.IPPoolClient() - namespaceName = f.Namespace.Name - subnetName = "subnet-" + framework.RandomSuffix() - ippoolName = "ippool-" + framework.RandomSuffix() - podName = "pod-" + framework.RandomSuffix() - deployName = "deploy-" + framework.RandomSuffix() - stsName = "sts-" + framework.RandomSuffix() - cidr = framework.RandomCIDR(f.ClusterIPFamily) - - ginkgo.By("Creating subnet " + subnetName) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, []string{namespaceName}) - subnet = subnetClient.CreateSync(subnet) - }) - ginkgo.AfterEach(func() { - ginkgo.By("Deleting pod " + podName) - podClient.DeleteSync(podName) - - ginkgo.By("Deleting deployment " + deployName) - deployClient.DeleteSync(deployName) - - ginkgo.By("Deleting statefulset " + stsName) - stsClient.DeleteSync(stsName) - - ginkgo.By("Deleting ippool " + ippoolName) - ippoolClient.DeleteSync(ippoolName) - - ginkgo.By("Deleting subnet " + subnetName) - subnetClient.DeleteSync(subnetName) - }) - - framework.ConformanceIt("should allocate static ipv4 and mac for pod", func() { - mac := util.GenerateMac() - ip := framework.RandomIPs(cidr, ";", 1) - - ginkgo.By("Creating pod " + podName + " with ip " + ip + " and mac " + mac) - annotations := map[string]string{ - util.IPAddressAnnotation: ip, - util.MacAddressAnnotation: mac, - } - pod := framework.MakePod(namespaceName, podName, nil, annotations, "", nil, nil) - pod = podClient.CreateSync(pod) - - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.CidrAnnotation, subnet.Spec.CIDRBlock) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.GatewayAnnotation, subnet.Spec.Gateway) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.IPAddressAnnotation, ip) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.LogicalSwitchAnnotation, subnet.Name) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.MacAddressAnnotation, mac) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - - framework.ExpectConsistOf(util.PodIPs(*pod), strings.Split(ip, ",")) - }) - - framework.ConformanceIt("should allocate static ip for pod with comma separated ippool", func() { - if f.IsDual() { - ginkgo.Skip("Comma separated ippool is not supported for dual stack") - } - - pool := framework.RandomIPs(cidr, ",", 3) - ginkgo.By("Creating pod " + podName + " with ippool " + pool) - annotations := map[string]string{util.IPPoolAnnotation: pool} - pod := framework.MakePod(namespaceName, podName, nil, annotations, "", nil, nil) - pod = podClient.CreateSync(pod) - - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.CidrAnnotation, subnet.Spec.CIDRBlock) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.GatewayAnnotation, subnet.Spec.Gateway) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.IPPoolAnnotation, pool) - framework.ExpectEqual(pod.Annotations[util.IPAddressAnnotation], pod.Status.PodIP) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.LogicalSwitchAnnotation, subnet.Name) - framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation]) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectContainElement(strings.Split(pool, ","), pod.Status.PodIP) - }) - - framework.ConformanceIt("should allocate static ip for deployment with ippool", func() { - ippoolSep := ";" - if f.VersionPriorTo(1, 11) { - if f.IsDual() { - ginkgo.Skip("Support for dual stack ippool was introduced in v1.11") - } - ippoolSep = "," - } - - replicas := 3 - ippool := framework.RandomIPs(cidr, ippoolSep, replicas) - - ginkgo.By("Creating deployment " + deployName + " with ippool " + ippool) - labels := map[string]string{"app": deployName} - annotations := map[string]string{util.IPPoolAnnotation: ippool} - deploy := framework.MakeDeployment(deployName, int32(replicas), labels, annotations, "pause", framework.PauseImage, "") - deploy = deployClient.CreateSync(deploy) - - ginkgo.By("Getting pods for deployment " + deployName) - pods, err := deployClient.GetPods(deploy) - framework.ExpectNoError(err, "failed to get pods for deployment "+deployName) - framework.ExpectHaveLen(pods.Items, replicas) - - ips := strings.Split(ippool, ippoolSep) - for _, pod := range pods.Items { - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.CidrAnnotation, subnet.Spec.CIDRBlock) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.GatewayAnnotation, subnet.Spec.Gateway) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.IPPoolAnnotation, ippool) - framework.ExpectContainElement(ips, pod.Annotations[util.IPAddressAnnotation]) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.LogicalSwitchAnnotation, subnet.Name) - framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation]) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - - framework.ExpectConsistOf(util.PodIPs(pod), strings.Split(pod.Annotations[util.IPAddressAnnotation], ",")) - } - - ginkgo.By("Deleting pods for deployment " + deployName) - for _, pod := range pods.Items { - err = podClient.Delete(pod.Name) - framework.ExpectNoError(err, "failed to delete pod "+pod.Name) - } - err = deployClient.WaitToComplete(deploy) - framework.ExpectNoError(err) - - ginkgo.By("Waiting for new pods to be ready") - err = e2epod.WaitForPodsRunningReady(context.Background(), cs, namespaceName, *deploy.Spec.Replicas, 0, time.Minute) - framework.ExpectNoError(err, "timed out waiting for pods to be ready") - - ginkgo.By("Getting pods for deployment " + deployName + " after deletion") - pods, err = deployClient.GetPods(deploy) - framework.ExpectNoError(err, "failed to get pods for deployment "+deployName) - framework.ExpectHaveLen(pods.Items, replicas) - for _, pod := range pods.Items { - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.CidrAnnotation, subnet.Spec.CIDRBlock) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.GatewayAnnotation, subnet.Spec.Gateway) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.IPPoolAnnotation, ippool) - framework.ExpectContainElement(ips, pod.Annotations[util.IPAddressAnnotation]) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.LogicalSwitchAnnotation, subnet.Name) - framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation]) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectConsistOf(util.PodIPs(pod), strings.Split(pod.Annotations[util.IPAddressAnnotation], ",")) - } - }) - - framework.ConformanceIt("should allocate static ip for statefulset", func() { - replicas := 3 - labels := map[string]string{"app": stsName} - - ginkgo.By("Creating statefulset " + stsName) - sts := framework.MakeStatefulSet(stsName, stsName, int32(replicas), labels, framework.PauseImage) - sts = stsClient.CreateSync(sts) - - ginkgo.By("Getting pods for statefulset " + stsName) - pods := stsClient.GetPods(sts) - framework.ExpectHaveLen(pods.Items, replicas) - - ips := make([]string, 0, replicas) - for _, pod := range pods.Items { - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.CidrAnnotation, subnet.Spec.CIDRBlock) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.GatewayAnnotation, subnet.Spec.Gateway) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.LogicalSwitchAnnotation, subnet.Name) - framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation]) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectConsistOf(util.PodIPs(pod), strings.Split(pod.Annotations[util.IPAddressAnnotation], ",")) - ips = append(ips, pod.Annotations[util.IPAddressAnnotation]) - } - - ginkgo.By("Deleting pods for statefulset " + stsName) - for _, pod := range pods.Items { - err := podClient.Delete(pod.Name) - framework.ExpectNoError(err, "failed to delete pod "+pod.Name) - } - stsClient.WaitForRunningAndReady(sts) - - ginkgo.By("Getting pods for statefulset " + stsName) - pods = stsClient.GetPods(sts) - framework.ExpectHaveLen(pods.Items, replicas) - - for i, pod := range pods.Items { - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.CidrAnnotation, subnet.Spec.CIDRBlock) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.GatewayAnnotation, subnet.Spec.Gateway) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.IPAddressAnnotation, ips[i]) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.LogicalSwitchAnnotation, subnet.Name) - framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation]) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - } - }) - - framework.ConformanceIt("should allocate static ip for statefulset with ippool", func() { - ippoolSep := ";" - if f.VersionPriorTo(1, 11) { - if f.IsDual() { - ginkgo.Skip("Support for dual stack ippool was introduced in v1.11") - } - ippoolSep = "," - } - - for replicas := 1; replicas <= 3; replicas++ { - stsName = "sts-" + framework.RandomSuffix() - ippool := framework.RandomIPs(cidr, ippoolSep, replicas) - labels := map[string]string{"app": stsName} - - ginkgo.By("Creating statefulset " + stsName + " with ippool " + ippool) - sts := framework.MakeStatefulSet(stsName, stsName, int32(replicas), labels, framework.PauseImage) - sts.Spec.Template.Annotations = map[string]string{util.IPPoolAnnotation: ippool} - sts = stsClient.CreateSync(sts) - - ginkgo.By("Getting pods for statefulset " + stsName) - pods := stsClient.GetPods(sts) - framework.ExpectHaveLen(pods.Items, replicas) - - ips := make([]string, 0, replicas) - for _, pod := range pods.Items { - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.CidrAnnotation, subnet.Spec.CIDRBlock) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.GatewayAnnotation, subnet.Spec.Gateway) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.IPPoolAnnotation, ippool) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.LogicalSwitchAnnotation, subnet.Name) - framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation]) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectConsistOf(util.PodIPs(pod), strings.Split(pod.Annotations[util.IPAddressAnnotation], ",")) - ips = append(ips, pod.Annotations[util.IPAddressAnnotation]) - } - framework.ExpectConsistOf(ips, strings.Split(ippool, ippoolSep)) - - ginkgo.By("Deleting pods for statefulset " + stsName) - for _, pod := range pods.Items { - err := podClient.Delete(pod.Name) - framework.ExpectNoError(err, "failed to delete pod "+pod.Name) - } - stsClient.WaitForRunningAndReady(sts) - - ginkgo.By("Getting pods for statefulset " + stsName) - pods = stsClient.GetPods(sts) - framework.ExpectHaveLen(pods.Items, replicas) - - for i, pod := range pods.Items { - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.CidrAnnotation, subnet.Spec.CIDRBlock) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.GatewayAnnotation, subnet.Spec.Gateway) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.IPPoolAnnotation, ippool) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.IPAddressAnnotation, ips[i]) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.LogicalSwitchAnnotation, subnet.Name) - framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation]) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectConsistOf(util.PodIPs(pod), strings.Split(pod.Annotations[util.IPAddressAnnotation], ",")) - } - - ginkgo.By("Deleting statefulset " + stsName) - stsClient.DeleteSync(stsName) - } - }) - - // separate ippool annotation by comma - framework.ConformanceIt("should allocate static ip for statefulset with ippool separated by comma", func() { - if f.IsDual() { - ginkgo.Skip("Comma separated ippool is not supported for dual stack") - } - - ippoolSep := "," - replicas := 3 - ippool := framework.RandomIPs(cidr, ippoolSep, replicas) - labels := map[string]string{"app": stsName} - - ginkgo.By("Creating statefulset " + stsName + " with ippool " + ippool) - sts := framework.MakeStatefulSet(stsName, stsName, int32(replicas), labels, framework.PauseImage) - sts.Spec.Template.Annotations = map[string]string{util.IPPoolAnnotation: ippool} - sts = stsClient.CreateSync(sts) - - ginkgo.By("Getting pods for statefulset " + stsName) - pods := stsClient.GetPods(sts) - framework.ExpectHaveLen(pods.Items, replicas) - - ips := make([]string, 0, replicas) - for _, pod := range pods.Items { - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.CidrAnnotation, subnet.Spec.CIDRBlock) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.GatewayAnnotation, subnet.Spec.Gateway) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.IPPoolAnnotation, ippool) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.LogicalSwitchAnnotation, subnet.Name) - framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation]) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectConsistOf(util.PodIPs(pod), strings.Split(pod.Annotations[util.IPAddressAnnotation], ",")) - ips = append(ips, pod.Annotations[util.IPAddressAnnotation]) - } - framework.ExpectConsistOf(ips, strings.Split(ippool, ippoolSep)) - - ginkgo.By("Deleting pods for statefulset " + stsName) - for _, pod := range pods.Items { - err := podClient.Delete(pod.Name) - framework.ExpectNoError(err, "failed to delete pod "+pod.Name) - } - stsClient.WaitForRunningAndReady(sts) - - ginkgo.By("Getting pods for statefulset " + stsName) - pods = stsClient.GetPods(sts) - framework.ExpectHaveLen(pods.Items, replicas) - - for i, pod := range pods.Items { - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.CidrAnnotation, subnet.Spec.CIDRBlock) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.GatewayAnnotation, subnet.Spec.Gateway) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.IPPoolAnnotation, ippool) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.IPAddressAnnotation, ips[i]) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.LogicalSwitchAnnotation, subnet.Name) - framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation]) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectConsistOf(util.PodIPs(pod), strings.Split(pod.Annotations[util.IPAddressAnnotation], ",")) - } - }) - - framework.ConformanceIt("should support IPPool feature", func() { - f.SkipVersionPriorTo(1, 12, "Support for IPPool feature was introduced in v1.12") - - ipsCount := 12 - ips := framework.RandomIPPool(cidr, ipsCount) - ipv4, ipv6 := util.SplitIpsByProtocol(ips) - if f.HasIPv4() { - framework.ExpectHaveLen(ipv4, ipsCount) - } - if f.HasIPv6() { - framework.ExpectHaveLen(ipv6, ipsCount) - } - - ipv4Range, err := ipam.NewIPRangeListFrom(ipv4...) - framework.ExpectNoError(err) - ipv6Range, err := ipam.NewIPRangeListFrom(ipv6...) - framework.ExpectNoError(err) - - excludeV4, excludeV6 := util.SplitIpsByProtocol(subnet.Spec.ExcludeIps) - excludeV4Range, err := ipam.NewIPRangeListFrom(excludeV4...) - framework.ExpectNoError(err) - excludeV6Range, err := ipam.NewIPRangeListFrom(excludeV6...) - framework.ExpectNoError(err) - - ipv4Range = ipv4Range.Separate(excludeV4Range) - ipv6Range = ipv6Range.Separate(excludeV6Range) - - ginkgo.By(fmt.Sprintf("Creating ippool %s with ips %v", ippoolName, ips)) - ippool := framework.MakeIPPool(ippoolName, subnetName, ips, nil) - ippool = ippoolClient.CreateSync(ippool) - - ginkgo.By("Validating ippool status") - framework.WaitUntil(2*time.Second, 30*time.Second, func(_ context.Context) (bool, error) { - if !ippool.Status.V4UsingIPs.EqualInt64(0) { - framework.Logf("unexpected .status.v4UsingIPs: %s", ippool.Status.V4UsingIPs) - return false, nil - } - if !ippool.Status.V6UsingIPs.EqualInt64(0) { - framework.Logf("unexpected .status.v6UsingIPs: %s", ippool.Status.V6UsingIPs) - return false, nil - } - if ippool.Status.V4UsingIPRange != "" { - framework.Logf("unexpected .status.v4UsingIPRange: %s", ippool.Status.V4UsingIPRange) - return false, nil - } - if ippool.Status.V6UsingIPRange != "" { - framework.Logf("unexpected .status.v6UsingIPRange: %s", ippool.Status.V6UsingIPRange) - return false, nil - } - if !ippool.Status.V4AvailableIPs.Equal(ipv4Range.Count()) { - framework.Logf(".status.v4AvailableIPs mismatch: expect %s, actual %s", ipv4Range.Count(), ippool.Status.V4AvailableIPs) - return false, nil - } - if !ippool.Status.V6AvailableIPs.Equal(ipv6Range.Count()) { - framework.Logf(".status.v6AvailableIPs mismatch: expect %s, actual %s", ipv6Range.Count(), ippool.Status.V6AvailableIPs) - return false, nil - } - if ippool.Status.V4AvailableIPRange != ipv4Range.String() { - framework.Logf(".status.v4AvailableIPRange mismatch: expect %s, actual %s", ipv4Range, ippool.Status.V4AvailableIPRange) - return false, nil - } - if ippool.Status.V6AvailableIPRange != ipv6Range.String() { - framework.Logf(".status.v6AvailableIPRange mismatch: expect %s, actual %s", ipv6Range, ippool.Status.V6AvailableIPRange) - return false, nil - } - return true, nil - }, "") - - ginkgo.By("Creating deployment " + deployName + " within ippool " + ippoolName) - replicas := 3 - labels := map[string]string{"app": deployName} - annotations := map[string]string{util.IPPoolAnnotation: ippoolName} - deploy := framework.MakeDeployment(deployName, int32(replicas), labels, annotations, "pause", framework.PauseImage, "") - deploy = deployClient.CreateSync(deploy) - - checkFn := func() { - ginkgo.GinkgoHelper() - - ginkgo.By("Getting pods for deployment " + deployName) - pods, err := deployClient.GetPods(deploy) - framework.ExpectNoError(err, "failed to get pods for deployment "+deployName) - framework.ExpectHaveLen(pods.Items, replicas) - - v4Using, v6Using := ipam.NewEmptyIPRangeList(), ipam.NewEmptyIPRangeList() - for _, pod := range pods.Items { - for _, podIP := range pod.Status.PodIPs { - ip, err := ipam.NewIP(podIP.IP) - framework.ExpectNoError(err) - if strings.ContainsRune(podIP.IP, ':') { - framework.ExpectTrue(ipv6Range.Contains(ip), "Pod IP %s should be contained by %v", ip.String(), ipv6Range.String()) - v6Using.Add(ip) - } else { - framework.ExpectTrue(ipv4Range.Contains(ip), "Pod IP %s should be contained by %v", ip.String(), ipv4Range.String()) - v4Using.Add(ip) - } - } - } - - ginkgo.By("Validating ippool status") - framework.WaitUntil(2*time.Second, 30*time.Second, func(_ context.Context) (bool, error) { - ippool = ippoolClient.Get(ippoolName) - v4Available, v6Available := ipv4Range.Separate(v4Using), ipv6Range.Separate(v6Using) - if !ippool.Status.V4UsingIPs.Equal(v4Using.Count()) { - framework.Logf(".status.v4UsingIPs mismatch: expect %s, actual %s", v4Using.Count(), ippool.Status.V4UsingIPs) - return false, nil - } - if !ippool.Status.V6UsingIPs.Equal(v6Using.Count()) { - framework.Logf(".status.v6UsingIPs mismatch: expect %s, actual %s", v6Using.Count(), ippool.Status.V6UsingIPs) - return false, nil - } - if ippool.Status.V4UsingIPRange != v4Using.String() { - framework.Logf(".status.v4UsingIPRange mismatch: expect %s, actual %s", v4Using, ippool.Status.V4UsingIPRange) - return false, nil - } - if ippool.Status.V6UsingIPRange != v6Using.String() { - framework.Logf(".status.v6UsingIPRange mismatch: expect %s, actual %s", v6Using, ippool.Status.V6UsingIPRange) - return false, nil - } - if !ippool.Status.V4AvailableIPs.Equal(v4Available.Count()) { - framework.Logf(".status.v4AvailableIPs mismatch: expect %s, actual %s", v4Available.Count(), ippool.Status.V4AvailableIPs) - return false, nil - } - if !ippool.Status.V6AvailableIPs.Equal(v6Available.Count()) { - framework.Logf(".status.v6AvailableIPs mismatch: expect %s, actual %s", v6Available.Count(), ippool.Status.V6AvailableIPs) - return false, nil - } - if ippool.Status.V4AvailableIPRange != v4Available.String() { - framework.Logf(".status.v4AvailableIPRange mismatch: expect %s, actual %s", v4Available, ippool.Status.V4AvailableIPRange) - return false, nil - } - if ippool.Status.V6AvailableIPRange != v6Available.String() { - framework.Logf(".status.v6AvailableIPRange mismatch: expect %s, actual %s", v6Available, ippool.Status.V6AvailableIPRange) - return false, nil - } - return true, nil - }, "") - } - checkFn() - - ginkgo.By("Restarting deployment " + deployName) - deploy = deployClient.RestartSync(deploy) - checkFn() - - ginkgo.By("Adding namespace " + namespaceName + " to ippool " + ippoolName) - patchedIPPool := ippool.DeepCopy() - patchedIPPool.Spec.Namespaces = []string{namespaceName} - ippool = ippoolClient.Patch(ippool, patchedIPPool, 10*time.Second) - - ginkgo.By("Validating namespace annotations") - framework.WaitUntil(2*time.Second, 30*time.Second, func(_ context.Context) (bool, error) { - ns := nsClient.Get(namespaceName) - return len(ns.Annotations) != 0 && ns.Annotations[util.IPPoolAnnotation] == ippoolName, nil - }, "") - - ginkgo.By("Patching deployment " + deployName) - deploy = deployClient.RestartSync(deploy) - patchedDeploy := deploy.DeepCopy() - patchedDeploy.Spec.Template.Annotations = nil - deploy = deployClient.PatchSync(deploy, patchedDeploy) - checkFn() - }) -}) diff --git a/test/e2e/kube-ovn/kubectl-ko/kubectl-ko.go b/test/e2e/kube-ovn/kubectl-ko/kubectl-ko.go deleted file mode 100644 index fde5248f7e2..00000000000 --- a/test/e2e/kube-ovn/kubectl-ko/kubectl-ko.go +++ /dev/null @@ -1,253 +0,0 @@ -package kubectl_ko - -import ( - "context" - "fmt" - "math/rand/v2" - "strings" - - clientset "k8s.io/client-go/kubernetes" - k8sframework "k8s.io/kubernetes/test/e2e/framework" - e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" - e2enode "k8s.io/kubernetes/test/e2e/framework/node" - - "github.com/onsi/ginkgo/v2" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -const ( - targetIPv4 = "8.8.8.8" - targetIPv6 = "2001:4860:4860::8888" -) - -func execOrDie(cmd string) { - ginkgo.GinkgoHelper() - ginkgo.By(`Executing "kubectl ` + cmd + `"`) - e2ekubectl.NewKubectlCommand("", strings.Fields(cmd)...).ExecOrDie("") -} - -var _ = framework.Describe("[group:kubectl-ko]", func() { - f := framework.NewDefaultFramework("kubectl-ko") - - var cs clientset.Interface - var podClient *framework.PodClient - var namespaceName, podName, kubectlConfig string - ginkgo.BeforeEach(func() { - cs = f.ClientSet - podClient = f.PodClient() - namespaceName = f.Namespace.Name - podName = "pod-" + framework.RandomSuffix() - kubectlConfig = k8sframework.TestContext.KubeConfig - k8sframework.TestContext.KubeConfig = "" - }) - ginkgo.AfterEach(func() { - k8sframework.TestContext.KubeConfig = kubectlConfig - - ginkgo.By("Deleting pod " + podName) - podClient.DeleteSync(podName) - }) - - framework.ConformanceIt(`should support "kubectl ko nbctl show"`, func() { - execOrDie("ko nbctl show") - }) - - framework.ConformanceIt(`should support "kubectl ko sbctl show"`, func() { - execOrDie("ko sbctl show") - }) - - framework.ConformanceIt(`should support "kubectl ko vsctl show"`, func() { - ginkgo.By("Getting nodes") - nodeList, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - - for _, node := range nodeList.Items { - execOrDie(fmt.Sprintf("ko vsctl %s show", node.Name)) - } - }) - - framework.ConformanceIt(`should support "kubectl ko ofctl show br-int"`, func() { - ginkgo.By("Getting nodes") - nodeList, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - - for _, node := range nodeList.Items { - execOrDie(fmt.Sprintf("ko ofctl %s show br-int", node.Name)) - } - }) - - framework.ConformanceIt(`should support "kubectl ko dpctl show"`, func() { - ginkgo.By("Getting nodes") - nodeList, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - - for _, node := range nodeList.Items { - execOrDie(fmt.Sprintf("ko dpctl %s show", node.Name)) - } - }) - - framework.ConformanceIt(`should support "kubectl ko appctl list-commands"`, func() { - ginkgo.By("Getting nodes") - nodeList, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - - for _, node := range nodeList.Items { - execOrDie(fmt.Sprintf("ko appctl %s list-commands", node.Name)) - } - }) - - framework.ConformanceIt(`should support "kubectl ko nb/sb status/backup"`, func() { - databases := [...]string{"nb", "sb"} - actions := [...]string{"status", "backup"} - for _, db := range databases { - for _, action := range actions { - execOrDie(fmt.Sprintf("ko %s %s", db, action)) - // TODO: verify backup files are present - } - } - }) - - framework.ConformanceIt(`should support "kubectl ko tcpdump -c1"`, func() { - ping, target := "ping", targetIPv4 - if f.IsIPv6() { - ping, target = "ping6", targetIPv6 - } - - ginkgo.By("Creating pod " + podName) - cmd := []string{"sh", "-c", fmt.Sprintf(`while true; do %s -c1 -w1 %s; sleep 1; done`, ping, target)} - pod := framework.MakePod(namespaceName, podName, nil, nil, f.KubeOVNImage, cmd, nil) - pod = podClient.CreateSync(pod) - - execOrDie(fmt.Sprintf("ko tcpdump %s/%s -c1", pod.Namespace, pod.Name)) - }) - - framework.ConformanceIt(`should support "kubectl ko trace "`, func() { - ginkgo.By("Creating pod " + podName) - pod := framework.MakePod(namespaceName, podName, nil, nil, "", nil, nil) - pod = podClient.CreateSync(pod) - - supportARP := !f.VersionPriorTo(1, 11) - supportDstMAC := !f.VersionPriorTo(1, 10) - if !supportARP { - framework.Logf("Support for ARP was introduced in v1.11") - } - if !supportDstMAC { - framework.Logf("Support for destination MAC was introduced in v1.10") - } - - for _, ip := range pod.Status.PodIPs { - target, testARP := targetIPv4, supportARP - if util.CheckProtocol(ip.IP) == apiv1.ProtocolIPv6 { - target, testARP = targetIPv6, false - } - - targetMAC := util.GenerateMac() - prefix := fmt.Sprintf("ko trace %s/%s %s", pod.Namespace, pod.Name, target) - if testARP { - execOrDie(fmt.Sprintf("%s %s arp reply", prefix, targetMAC)) - } - - targetMACs := []string{"", targetMAC} - for _, mac := range targetMACs { - if mac != "" && !supportDstMAC { - continue - } - if testARP { - execOrDie(fmt.Sprintf("%s %s arp", prefix, mac)) - execOrDie(fmt.Sprintf("%s %s arp request", prefix, mac)) - } - execOrDie(fmt.Sprintf("%s %s icmp", prefix, mac)) - execOrDie(fmt.Sprintf("%s %s tcp 80", prefix, mac)) - execOrDie(fmt.Sprintf("%s %s udp 53", prefix, mac)) - } - } - }) - - framework.ConformanceIt(`should support "kubectl ko trace " for pod with host network`, func() { - f.SkipVersionPriorTo(1, 12, "This feature was introduced in v1.12") - - ginkgo.By("Creating pod " + podName + " with host network") - pod := framework.MakePod(namespaceName, podName, nil, nil, "", nil, nil) - pod.Spec.HostNetwork = true - pod = podClient.CreateSync(pod) - - for _, ip := range pod.Status.PodIPs { - target, testARP := targetIPv4, true - if util.CheckProtocol(ip.IP) == apiv1.ProtocolIPv6 { - target, testARP = targetIPv6, false - } - - targetMAC := util.GenerateMac() - prefix := fmt.Sprintf("ko trace %s/%s %s", pod.Namespace, pod.Name, target) - if testARP { - execOrDie(fmt.Sprintf("%s %s arp reply", prefix, targetMAC)) - } - - targetMACs := []string{"", targetMAC} - for _, mac := range targetMACs { - if testARP { - execOrDie(fmt.Sprintf("%s %s arp", prefix, mac)) - execOrDie(fmt.Sprintf("%s %s arp request", prefix, mac)) - } - execOrDie(fmt.Sprintf("%s %s icmp", prefix, mac)) - execOrDie(fmt.Sprintf("%s %s tcp 80", prefix, mac)) - execOrDie(fmt.Sprintf("%s %s udp 53", prefix, mac)) - } - } - }) - - framework.ConformanceIt(`should support "kubectl ko trace "`, func() { - f.SkipVersionPriorTo(1, 12, "This feature was introduced in v1.12") - - ginkgo.By("Getting nodes") - nodeList, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - framework.ExpectNotNil(nodeList) - framework.ExpectNotEmpty(nodeList.Items) - node := nodeList.Items[rand.IntN(len(nodeList.Items))] - - nodeIPv4, nodeIPv6 := util.GetNodeInternalIP(node) - for _, ip := range []string{nodeIPv4, nodeIPv6} { - if ip == "" { - continue - } - target, testARP := targetIPv4, true - if util.CheckProtocol(ip) == apiv1.ProtocolIPv6 { - target, testARP = targetIPv6, false - } - - targetMAC := util.GenerateMac() - prefix := fmt.Sprintf("ko trace node//%s %s", node.Name, target) - if testARP { - execOrDie(fmt.Sprintf("%s %s arp reply", prefix, targetMAC)) - } - - targetMACs := []string{"", targetMAC} - for _, mac := range targetMACs { - if testARP { - execOrDie(fmt.Sprintf("%s %s arp", prefix, mac)) - execOrDie(fmt.Sprintf("%s %s arp request", prefix, mac)) - } - execOrDie(fmt.Sprintf("%s %s icmp", prefix, mac)) - execOrDie(fmt.Sprintf("%s %s tcp 80", prefix, mac)) - execOrDie(fmt.Sprintf("%s %s udp 53", prefix, mac)) - } - } - }) - - framework.ConformanceIt(`should support "kubectl ko log kube-ovn all"`, func() { - f.SkipVersionPriorTo(1, 12, "This feature was introduced in v1.12") - components := [...]string{"kube-ovn", "ovn", "ovs", "linux", "all"} - for _, component := range components { - execOrDie(fmt.Sprintf("ko log %s", component)) - } - }) - - framework.ConformanceIt(`should support "kubectl ko diagnose subnet IPPorts "`, func() { - f.SkipVersionPriorTo(1, 12, "This feature was introduced in v1.12") - execOrDie("ko diagnose subnet ovn-default") - execOrDie("ko diagnose IPPorts tcp-114.114.114.114-53,udp-114.114.114.114-53") - }) -}) diff --git a/test/e2e/kube-ovn/network-policy/network-policy.go b/test/e2e/kube-ovn/network-policy/network-policy.go deleted file mode 100644 index 8669d7fe097..00000000000 --- a/test/e2e/kube-ovn/network-policy/network-policy.go +++ /dev/null @@ -1,185 +0,0 @@ -package network_policy - -import ( - "context" - "fmt" - "math/rand/v2" - "net" - "strconv" - "strings" - "time" - - corev1 "k8s.io/api/core/v1" - netv1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" - e2enode "k8s.io/kubernetes/test/e2e/framework/node" - e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" - - "github.com/onsi/ginkgo/v2" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -var _ = framework.SerialDescribe("[group:network-policy]", func() { - f := framework.NewDefaultFramework("network-policy") - - var subnet *apiv1.Subnet - var cs clientset.Interface - var podClient *framework.PodClient - var subnetClient *framework.SubnetClient - var defaultServiceClient *framework.ServiceClient - var netpolClient *framework.NetworkPolicyClient - var daemonSetClient *framework.DaemonSetClient - var namespaceName, netpolName, subnetName, podName string - var cidr string - - ginkgo.BeforeEach(func() { - cs = f.ClientSet - podClient = f.PodClient() - subnetClient = f.SubnetClient() - defaultServiceClient = f.ServiceClientNS("default") - netpolClient = f.NetworkPolicyClient() - daemonSetClient = f.DaemonSetClientNS(framework.KubeOvnNamespace) - namespaceName = f.Namespace.Name - netpolName = "netpol-" + framework.RandomSuffix() - podName = "pod-" + framework.RandomSuffix() - subnetName = "subnet-" + framework.RandomSuffix() - cidr = framework.RandomCIDR(f.ClusterIPFamily) - }) - ginkgo.AfterEach(func() { - ginkgo.By("Deleting pod " + podName) - podClient.DeleteSync(podName) - - ginkgo.By("Deleting subnet " + subnetName) - subnetClient.DeleteSync(subnetName) - - ginkgo.By("Deleting network policy " + netpolName) - netpolClient.DeleteSync(netpolName) - }) - - framework.ConformanceIt("should be able to access pods from node after creating a network policy with empty ingress rules", func() { - ginkgo.By("Creating network policy " + netpolName) - netpol := &netv1.NetworkPolicy{ - ObjectMeta: metav1.ObjectMeta{ - Name: netpolName, - }, - Spec: netv1.NetworkPolicySpec{ - Ingress: []netv1.NetworkPolicyIngressRule{}, - }, - } - _ = netpolClient.Create(netpol) - - ginkgo.By("Creating subnet " + subnetName) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, nil) - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Creating pod " + podName) - port := strconv.Itoa(8000 + rand.IntN(1000)) - args := []string{"netexec", "--http-port", port} - annotations := map[string]string{util.LogicalSwitchAnnotation: subnetName} - pod := framework.MakePod(namespaceName, podName, nil, annotations, framework.AgnhostImage, nil, args) - pod = podClient.CreateSync(pod) - - ginkgo.By("Getting nodes") - nodeList, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - framework.ExpectNotEmpty(nodeList.Items) - - ginkgo.By("Getting daemonset kube-ovn-cni") - ds := daemonSetClient.Get("kube-ovn-cni") - - ginkgo.By("Getting kube-ovn-cni pods") - pods := make([]corev1.Pod, 0, len(nodeList.Items)) - for _, node := range nodeList.Items { - pod, err := daemonSetClient.GetPodOnNode(ds, node.Name) - framework.ExpectNoError(err, "failed to get kube-ovn-cni pod running on node %s", node.Name) - pods = append(pods, *pod) - } - - for _, podIP := range pod.Status.PodIPs { - ip := podIP.IP - protocol := strings.ToLower(util.CheckProtocol(ip)) - cmd := fmt.Sprintf("curl -q -s --connect-timeout 2 --max-time 2 %s", net.JoinHostPort(ip, port)) - - var podSameNode *corev1.Pod - for _, hostPod := range pods { - nodeName := hostPod.Spec.NodeName - if nodeName == pod.Spec.NodeName { - podSameNode = hostPod.DeepCopy() - continue - } - - ginkgo.By("Checking connection from node " + nodeName + " to " + podName + " via " + protocol) - ginkgo.By(fmt.Sprintf(`Executing %q in pod %s/%s`, cmd, hostPod.Namespace, hostPod.Name)) - framework.WaitUntil(2*time.Second, time.Minute, func(_ context.Context) (bool, error) { - _, err := e2epodoutput.RunHostCmd(hostPod.Namespace, hostPod.Name, cmd) - return err != nil, nil - }, "") - } - - ginkgo.By("Checking connection from node " + podSameNode.Spec.NodeName + " to " + podName + " via " + protocol) - ginkgo.By(fmt.Sprintf(`Executing %q in pod %s/%s`, cmd, podSameNode.Namespace, podSameNode.Name)) - framework.WaitUntil(2*time.Second, time.Minute, func(_ context.Context) (bool, error) { - _, err := e2epodoutput.RunHostCmd(podSameNode.Namespace, podSameNode.Name, cmd) - return err == nil, nil - }, "") - - // check one more time - for _, hostPod := range pods { - nodeName := hostPod.Spec.NodeName - if nodeName == pod.Spec.NodeName { - continue - } - - ginkgo.By("Checking connection from node " + nodeName + " to " + podName + " via " + protocol) - ginkgo.By(fmt.Sprintf(`Executing %q in pod %s/%s`, cmd, hostPod.Namespace, hostPod.Name)) - _, err := e2epodoutput.RunHostCmd(hostPod.Namespace, hostPod.Name, cmd) - framework.ExpectError(err) - } - } - }) - - framework.ConformanceIt("should be able to access svc with backend host network pod after any other ingress network policy rules created", func() { - ginkgo.By("Creating network policy " + netpolName) - netpol := &netv1.NetworkPolicy{ - ObjectMeta: metav1.ObjectMeta{ - Name: netpolName, - }, - Spec: netv1.NetworkPolicySpec{ - Ingress: []netv1.NetworkPolicyIngressRule{ - { - From: []netv1.NetworkPolicyPeer{ - { - PodSelector: nil, - NamespaceSelector: nil, - IPBlock: &netv1.IPBlock{CIDR: "0.0.0.0/0", Except: []string{"127.0.0.1/32"}}, - }, - }, - }, - }, - PolicyTypes: []netv1.PolicyType{netv1.PolicyTypeIngress}, - }, - } - _ = netpolClient.Create(netpol) - - ginkgo.By("Creating pod " + podName) - pod := framework.MakePod(namespaceName, podName, nil, nil, framework.AgnhostImage, nil, nil) - pod = podClient.CreateSync(pod) - - svc := defaultServiceClient.Get("kubernetes") - clusterIP := svc.Spec.ClusterIP - - ginkgo.By("Checking connection from pod " + podName + " to " + clusterIP + " via TCP") - - cmd := fmt.Sprintf("curl -k -q -s --connect-timeout 2 https://%s", net.JoinHostPort(clusterIP, "443")) - ginkgo.By(fmt.Sprintf(`Executing %q in pod %s/%s`, cmd, pod.Namespace, pod.Name)) - - framework.WaitUntil(2*time.Second, time.Minute, func(_ context.Context) (bool, error) { - _, err := e2epodoutput.RunHostCmd(pod.Namespace, pod.Name, cmd) - return err == nil, nil - }, "") - }) -}) diff --git a/test/e2e/kube-ovn/node/node.go b/test/e2e/kube-ovn/node/node.go deleted file mode 100644 index a72d9107200..00000000000 --- a/test/e2e/kube-ovn/node/node.go +++ /dev/null @@ -1,294 +0,0 @@ -package node - -import ( - "context" - "fmt" - "math/rand/v2" - "net" - "strconv" - "strings" - "time" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/intstr" - clientset "k8s.io/client-go/kubernetes" - e2enode "k8s.io/kubernetes/test/e2e/framework/node" - e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" - - "github.com/onsi/ginkgo/v2" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" - "github.com/kubeovn/kube-ovn/test/e2e/framework/iproute" -) - -var _ = framework.OrderedDescribe("[group:node]", func() { - f := framework.NewDefaultFramework("node") - - var subnet *apiv1.Subnet - var cs clientset.Interface - var podClient *framework.PodClient - var serviceClient *framework.ServiceClient - var subnetClient *framework.SubnetClient - var podName, hostPodName, serviceName, namespaceName, subnetName string - var cidr string - ginkgo.BeforeEach(func() { - cs = f.ClientSet - podClient = f.PodClient() - serviceClient = f.ServiceClient() - subnetClient = f.SubnetClient() - namespaceName = f.Namespace.Name - podName = "pod-" + framework.RandomSuffix() - hostPodName = "pod-" + framework.RandomSuffix() - serviceName = "service-" + framework.RandomSuffix() - subnetName = "subnet-" + framework.RandomSuffix() - cidr = framework.RandomCIDR(f.ClusterIPFamily) - }) - ginkgo.AfterEach(func() { - ginkgo.By("Deleting service " + serviceName) - serviceClient.DeleteSync(serviceName) - - ginkgo.By("Deleting pod " + podName) - podClient.DeleteSync(podName) - - ginkgo.By("Deleting pod " + hostPodName) - podClient.DeleteSync(hostPodName) - - ginkgo.By("Deleting subnet " + subnetName) - subnetClient.DeleteSync(subnetName) - }) - - framework.ConformanceIt("should allocate ip in join subnet to node", func() { - ginkgo.By("Getting join subnet") - join := subnetClient.Get("join") - - ginkgo.By("Getting nodes") - nodeList, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - - ginkgo.By("Validating node annotations") - for _, node := range nodeList.Items { - framework.ExpectHaveKeyWithValue(node.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectUUID(node.Annotations[util.ChassisAnnotation]) - framework.ExpectHaveKeyWithValue(node.Annotations, util.CidrAnnotation, join.Spec.CIDRBlock) - framework.ExpectHaveKeyWithValue(node.Annotations, util.GatewayAnnotation, join.Spec.Gateway) - framework.ExpectIPInCIDR(node.Annotations[util.IPAddressAnnotation], join.Spec.CIDRBlock) - framework.ExpectHaveKeyWithValue(node.Annotations, util.LogicalSwitchAnnotation, join.Name) - framework.ExpectMAC(node.Annotations[util.MacAddressAnnotation]) - framework.ExpectHaveKeyWithValue(node.Annotations, util.PortNameAnnotation, util.NodeLspName(node.Name)) - - podName = "pod-" + framework.RandomSuffix() - ginkgo.By("Creating pod " + podName + " with host network on node " + node.Name) - cmd := []string{"sh", "-c", "sleep infinity"} - pod := framework.MakePod(namespaceName, podName, nil, nil, f.KubeOVNImage, cmd, nil) - pod.Spec.NodeName = node.Name - pod.Spec.HostNetwork = true - pod = podClient.CreateSync(pod) - - ginkgo.By("Checking ip addresses on " + util.NodeNic) - links, err := iproute.AddressShow(util.NodeNic, func(cmd ...string) ([]byte, []byte, error) { - return framework.KubectlExec(pod.Namespace, pod.Name, cmd...) - }) - framework.ExpectNoError(err) - framework.ExpectHaveLen(links, 1) - ipCIDRs, err := util.GetIPAddrWithMask(node.Annotations[util.IPAddressAnnotation], join.Spec.CIDRBlock) - framework.ExpectNoError(err) - framework.Logf("node %q join ip address with prefix: %q", node.Name, ipCIDRs) - ips := strings.Split(ipCIDRs, ",") - framework.ExpectConsistOf(links[0].NonLinkLocalAddresses(), ips) - - err = podClient.Delete(podName) - framework.ExpectNoError(err) - } - }) - - framework.ConformanceIt("should access overlay pods using node ip", func() { - f.SkipVersionPriorTo(1, 12, "This feature was introduced in v1.12") - - ginkgo.By("Creating subnet " + subnetName) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, nil) - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Creating pod " + podName) - annotations := map[string]string{ - util.LogicalSwitchAnnotation: subnetName, - } - port := strconv.Itoa(8000 + rand.IntN(1000)) - args := []string{"netexec", "--http-port", port} - pod := framework.MakePod(namespaceName, podName, nil, annotations, framework.AgnhostImage, nil, args) - pod = podClient.CreateSync(pod) - - ginkgo.By("Creating pod " + hostPodName + " with host network") - cmd := []string{"sh", "-c", "sleep infinity"} - hostPod := framework.MakePod(namespaceName, hostPodName, nil, nil, f.KubeOVNImage, cmd, nil) - hostPod.Spec.HostNetwork = true - hostPod = podClient.CreateSync(hostPod) - - ginkgo.By("Validating client ip") - nodeIPs := util.PodIPs(*hostPod) - for _, podIP := range pod.Status.PodIPs { - ip := podIP.IP - protocol := strings.ToLower(util.CheckProtocol(ip)) - ginkgo.By("Checking connection from " + hostPodName + " to " + podName + " via " + protocol) - cmd := fmt.Sprintf("curl -q -s --connect-timeout 2 --max-time 2 %s/clientip", net.JoinHostPort(ip, port)) - ginkgo.By(fmt.Sprintf(`Executing %q in pod %s/%s`, cmd, namespaceName, hostPodName)) - output := e2epodoutput.RunHostCmdOrDie(namespaceName, hostPodName, cmd) - client, _, err := net.SplitHostPort(strings.TrimSpace(output)) - framework.ExpectNoError(err) - framework.ExpectContainElement(nodeIPs, client) - } - }) - - framework.ConformanceIt("should access overlay services using node ip", func() { - f.SkipVersionPriorTo(1, 12, "This feature was introduced in v1.12") - - ginkgo.By("Creating subnet " + subnetName) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, nil) - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Creating pod " + podName) - podLabels := map[string]string{"app": podName} - annotations := map[string]string{ - util.LogicalSwitchAnnotation: subnetName, - } - port := 8000 + rand.Int32N(1000) - portStr := strconv.Itoa(int(port)) - args := []string{"netexec", "--http-port", portStr} - pod := framework.MakePod(namespaceName, podName, podLabels, annotations, framework.AgnhostImage, nil, args) - _ = podClient.CreateSync(pod) - - ginkgo.By("Creating service " + serviceName) - ports := []corev1.ServicePort{{ - Name: "tcp", - Protocol: corev1.ProtocolTCP, - Port: port, - TargetPort: intstr.FromInt32(port), - }} - service := framework.MakeService(serviceName, "", nil, podLabels, ports, "") - _ = serviceClient.CreateSync(service, func(s *corev1.Service) (bool, error) { - return len(s.Spec.ClusterIPs) != 0, nil - }, "cluster ips are not empty") - - ginkgo.By("Creating pod " + hostPodName + " with host network") - cmd := []string{"sh", "-c", "sleep infinity"} - hostPod := framework.MakePod(namespaceName, hostPodName, nil, nil, f.KubeOVNImage, cmd, nil) - hostPod.Spec.HostNetwork = true - hostPod = podClient.CreateSync(hostPod) - - ginkgo.By("Validating client ip") - nodeIPs := util.PodIPs(*hostPod) - service = serviceClient.Get(serviceName) - for _, ip := range util.ServiceClusterIPs(*service) { - protocol := strings.ToLower(util.CheckProtocol(ip)) - ginkgo.By("Checking connection from " + hostPodName + " to " + serviceName + " via " + protocol) - cmd := fmt.Sprintf("curl -q -s --connect-timeout 2 --max-time 2 %s/clientip", net.JoinHostPort(ip, portStr)) - ginkgo.By(fmt.Sprintf(`Executing %q in pod %s/%s`, cmd, namespaceName, hostPodName)) - output := e2epodoutput.RunHostCmdOrDie(namespaceName, hostPodName, cmd) - client, _, err := net.SplitHostPort(strings.TrimSpace(output)) - framework.ExpectNoError(err) - framework.ExpectContainElement(nodeIPs, client) - } - }) -}) - -var _ = framework.SerialDescribe("[group:node]", func() { - f := framework.NewDefaultFramework("node") - - var cs clientset.Interface - var podClient *framework.PodClient - var subnetClient *framework.SubnetClient - var podName, namespaceName string - ginkgo.BeforeEach(func() { - cs = f.ClientSet - podClient = f.PodClient() - subnetClient = f.SubnetClient() - namespaceName = f.Namespace.Name - podName = "pod-" + framework.RandomSuffix() - }) - ginkgo.AfterEach(func() { - ginkgo.By("Deleting pod " + podName) - podClient.DeleteSync(podName) - }) - - framework.ConformanceIt("should add missing routes on node for the join subnet", func() { - f.SkipVersionPriorTo(1, 9, "This feature was introduced in v1.9") - ginkgo.By("Getting join subnet") - join := subnetClient.Get("join") - - ginkgo.By("Getting nodes") - nodeList, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - - ginkgo.By("Validating node annotations") - node := nodeList.Items[0] - framework.ExpectHaveKeyWithValue(node.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(node.Annotations, util.CidrAnnotation, join.Spec.CIDRBlock) - framework.ExpectHaveKeyWithValue(node.Annotations, util.GatewayAnnotation, join.Spec.Gateway) - framework.ExpectIPInCIDR(node.Annotations[util.IPAddressAnnotation], join.Spec.CIDRBlock) - framework.ExpectHaveKeyWithValue(node.Annotations, util.LogicalSwitchAnnotation, join.Name) - - podName = "pod-" + framework.RandomSuffix() - ginkgo.By("Creating pod " + podName + " with host network") - cmd := []string{"sh", "-c", "sleep infinity"} - pod := framework.MakePrivilegedPod(namespaceName, podName, nil, nil, f.KubeOVNImage, cmd, nil) - pod.Spec.NodeName = node.Name - pod.Spec.HostNetwork = true - pod = podClient.CreateSync(pod) - - ginkgo.By("Getting node routes on " + util.NodeNic) - cidrs := strings.Split(join.Spec.CIDRBlock, ",") - execFunc := func(cmd ...string) ([]byte, []byte, error) { - return framework.KubectlExec(pod.Namespace, pod.Name, cmd...) - } - routes, err := iproute.RouteShow("", util.NodeNic, execFunc) - framework.ExpectNoError(err) - found := make([]bool, len(cidrs)) - for i, cidr := range cidrs { - for _, route := range routes { - if route.Dst == cidr { - framework.Logf("Found route for cidr " + cidr + " on " + util.NodeNic) - found[i] = true - break - } - } - } - for i, cidr := range cidrs { - framework.ExpectTrue(found[i], "Route for cidr "+cidr+" not found on "+util.NodeNic) - } - - for _, cidr := range strings.Split(join.Spec.CIDRBlock, ",") { - ginkgo.By("Deleting route for " + cidr + " on node " + node.Name) - err = iproute.RouteDel("", cidr, execFunc) - framework.ExpectNoError(err) - } - - ginkgo.By("Waiting for routes for subnet " + join.Name + " to be created") - framework.WaitUntil(2*time.Second, 10*time.Second, func(_ context.Context) (bool, error) { - if routes, err = iproute.RouteShow("", util.NodeNic, execFunc); err != nil { - return false, err - } - - found = make([]bool, len(cidrs)) - for i, cidr := range cidrs { - for _, route := range routes { - if route.Dst == cidr { - framework.Logf("Found route for cidr " + cidr + " on " + util.NodeNic) - found[i] = true - break - } - } - } - for i, cidr := range cidrs { - if !found[i] { - framework.Logf("Route for cidr " + cidr + " not found on " + util.NodeNic) - return false, nil - } - } - return true, nil - }, "") - - err = podClient.Delete(podName) - framework.ExpectNoError(err) - }) -}) diff --git a/test/e2e/kube-ovn/pod/pod_recreation.go b/test/e2e/kube-ovn/pod/pod_recreation.go deleted file mode 100644 index 923b6bf6a63..00000000000 --- a/test/e2e/kube-ovn/pod/pod_recreation.go +++ /dev/null @@ -1,109 +0,0 @@ -package pod - -import ( - "cmp" - "context" - "strings" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/onsi/ginkgo/v2" - - "github.com/kubeovn/kube-ovn/pkg/ovs" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -var _ = framework.SerialDescribe("[group:pod]", func() { - f := framework.NewDefaultFramework("pod") - - var podClient *framework.PodClient - var namespaceName, podName string - - ginkgo.BeforeEach(func() { - podClient = f.PodClient() - namespaceName = f.Namespace.Name - podName = "pod-" + framework.RandomSuffix() - }) - ginkgo.AfterEach(func() { - ginkgo.By("Deleting pod " + podName) - podClient.DeleteSync(podName) - }) - - framework.ConformanceIt("should handle pod creation during kube-ovn-controller is down", func() { - ginkgo.By("Creating pod " + podName) - pod := framework.MakePod(namespaceName, podName, nil, nil, framework.PauseImage, nil, nil) - pod = podClient.CreateSync(pod) - - ginkgo.By("Validating pod annotations") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation]) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - mac := pod.Annotations[util.MacAddressAnnotation] - - portName := ovs.PodNameToPortName(podName, pod.Namespace, util.OvnProvider) - ginkgo.By("Getting ips " + portName) - ipClient := f.IPClient() - ip := ipClient.Get(portName) - - ginkgo.By("Validating ips " + ip.Name) - framework.ExpectEqual(ip.Spec.MacAddress, mac) - framework.ExpectEqual(ip.Spec.IPAddress, pod.Annotations[util.IPAddressAnnotation]) - - ginkgo.By("Getting deployment kube-ovn-controller") - deployClient := f.DeploymentClientNS(framework.KubeOvnNamespace) - deploy := deployClient.Get("kube-ovn-controller") - framework.ExpectNotNil(deploy.Spec.Replicas) - - ginkgo.By("Getting kube-ovn-controller pods") - kubePodClient := f.PodClientNS(framework.KubeOvnNamespace) - framework.ExpectNotNil(deploy.Spec.Replicas) - pods, err := kubePodClient.List(context.Background(), metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(deploy.Spec.Selector)}) - framework.ExpectNoError(err, "failed to list kube-ovn-controller pods") - framework.ExpectNotNil(pods) - podNames := make([]string, 0, len(pods.Items)) - for _, pod := range pods.Items { - podNames = append(podNames, pod.Name) - } - framework.Logf("Got kube-ovn-controller pods: %s", strings.Join(podNames, ", ")) - - ginkgo.By("Stopping kube-ovn-controller by setting its replicas to zero") - deployClient.SetScale(deploy.Name, 0) - - ginkgo.By("Waiting for kube-ovn-controller pods to disappear") - for _, pod := range podNames { - ginkgo.By("Waiting for pod " + pod + " to disappear") - kubePodClient.WaitForNotFound(pod) - } - - ginkgo.By("Deleting pod " + podName) - podClient.DeleteSync(podName) - - ginkgo.By("Recreating pod " + podName) - pod = framework.MakePod(namespaceName, podName, nil, nil, framework.PauseImage, nil, nil) - _ = podClient.Create(pod) - - ginkgo.By("Starting kube-ovn-controller by restore its replicas") - deployClient.SetScale(deploy.Name, cmp.Or(*deploy.Spec.Replicas, 1)) - - ginkgo.By("Waiting for kube-ovn-controller to be ready") - _ = deployClient.RolloutStatus(deploy.Name) - - ginkgo.By("Waiting for pod " + podName + " to be running") - podClient.WaitForRunning(podName) - - ginkgo.By("Validating pod annotations") - pod = podClient.GetPod(podName) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation]) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectNotEqual(pod.Annotations[util.MacAddressAnnotation], mac) - - ginkgo.By("Getting ips " + portName) - ip = ipClient.Get(portName) - - ginkgo.By("Validating ips " + ip.Name) - framework.ExpectEqual(ip.Spec.MacAddress, pod.Annotations[util.MacAddressAnnotation]) - framework.ExpectEqual(ip.Spec.IPAddress, pod.Annotations[util.IPAddressAnnotation]) - }) -}) diff --git a/test/e2e/kube-ovn/pod/pod_routes.go b/test/e2e/kube-ovn/pod/pod_routes.go deleted file mode 100644 index 53369be4199..00000000000 --- a/test/e2e/kube-ovn/pod/pod_routes.go +++ /dev/null @@ -1,172 +0,0 @@ -package pod - -import ( - "context" - "encoding/json" - "fmt" - "strings" - "time" - - "github.com/onsi/ginkgo/v2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/request" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" - "github.com/kubeovn/kube-ovn/test/e2e/framework/iproute" -) - -var _ = framework.SerialDescribe("[group:pod]", func() { - f := framework.NewDefaultFramework("pod") - - var podClient *framework.PodClient - var subnetClient *framework.SubnetClient - var namespaceName, subnetName, podName string - var cidr string - - ginkgo.BeforeEach(func() { - podClient = f.PodClient() - subnetClient = f.SubnetClient() - namespaceName = f.Namespace.Name - subnetName = "subnet-" + framework.RandomSuffix() - podName = "pod-" + framework.RandomSuffix() - cidr = framework.RandomCIDR(f.ClusterIPFamily) - }) - ginkgo.AfterEach(func() { - ginkgo.By("Deleting pod " + podName) - podClient.DeleteSync(podName) - - ginkgo.By("Deleting subnet " + subnetName) - subnetClient.DeleteSync(subnetName) - }) - - framework.ConformanceIt("should support north gateway via pod annotation", func() { - f.SkipVersionPriorTo(1, 12, "This feature was introduced in v1.12") - if f.ClusterNetworkMode == "underlay" { - ginkgo.Skip("This test is only for overlay network") - } - - ginkgo.By("Creating pod " + podName + " with north gateway annotation") - northGateway := "100.64.0.100" - ipSuffix := "ip4" - if f.ClusterIPFamily == "ipv6" { - northGateway = "fd00:100:64::100" - ipSuffix = "ip6" - } - - annotations := map[string]string{ - util.NorthGatewayAnnotation: northGateway, - } - cmd := []string{"sh", "-c", "sleep infinity"} - pod := framework.MakePod(namespaceName, podName, nil, annotations, f.KubeOVNImage, cmd, nil) - pod = podClient.CreateSync(pod) - - podIP := pod.Status.PodIP - nbCmd := fmt.Sprintf("ovn-nbctl --format=csv --data=bare --no-heading --columns=match,action,nexthops find logical_router_policy priority=%d", util.NorthGatewayRoutePolicyPriority) - out, _, err := framework.NBExec(nbCmd) - framework.ExpectNoError(err) - framework.ExpectEqual(strings.TrimSpace(string(out)), fmt.Sprintf("%s.src == %s,reroute,%s", ipSuffix, podIP, northGateway)) - - ginkgo.By("Deleting pod " + podName + " with north gateway annotation") - f.PodClientNS(namespaceName).DeleteSync(podName) - framework.WaitUntil(2*time.Second, 2*time.Minute, func(_ context.Context) (bool, error) { - out, _, err = framework.NBExec(nbCmd) - if err == nil && strings.TrimSpace(string(out)) == "" { - return true, nil - } - return false, err - }, "policy has been gc") - - ginkgo.By("gc policy route") - podClient.CreateSync(framework.MakePod(namespaceName, podName, nil, annotations, f.KubeOVNImage, cmd, nil)) - - ginkgo.By("restart kube-ovn-controller") - deployClient := f.DeploymentClientNS(framework.KubeOvnNamespace) - deploy := deployClient.Get("kube-ovn-controller") - framework.ExpectNotNil(deploy.Spec.Replicas) - deployClient.SetScale(deploy.Name, 0) - deployClient.RolloutStatus(deploy.Name) - - f.PodClientNS(namespaceName).DeleteSync(podName) - - deployClient.SetScale(deploy.Name, 1) - deployClient.RolloutStatus(deploy.Name) - - framework.WaitUntil(2*time.Second, 2*time.Minute, func(_ context.Context) (bool, error) { - out, _, err = framework.NBExec(nbCmd) - if err == nil && strings.TrimSpace(string(out)) == "" { - return true, nil - } - return false, err - }, "policy has been gc") - - ginkgo.By("remove legacy lsp") - deleteLspCmd := fmt.Sprintf("ovn-nbctl --if-exists lsp-del %s.%s", pod.Name, pod.Namespace) - _, _, err = framework.NBExec(deleteLspCmd) - framework.ExpectNoError(err) - err = f.KubeOVNClientSet.KubeovnV1().IPs().Delete(context.Background(), fmt.Sprintf("%s.%s", pod.Name, pod.Namespace), metav1.DeleteOptions{}) - }) - - framework.ConformanceIt("should support configuring routes via pod annotation", func() { - f.SkipVersionPriorTo(1, 12, "This feature was introduced in v1.12") - - ginkgo.By("Generating routes") - routes := make([]request.Route, 0, 4) - for _, s := range strings.Split(cidr, ",") { - gw, err := util.LastIP(s) - framework.ExpectNoError(err) - var dst string - switch util.CheckProtocol(gw) { - case apiv1.ProtocolIPv4: - dst = "114.114.114.0/26" - case apiv1.ProtocolIPv6: - dst = "2400:3200::/126" - } - routes = append(routes, request.Route{Gateway: gw}, request.Route{Gateway: framework.PrevIP(gw), Destination: dst}) - } - - buff, err := json.Marshal(routes) - framework.ExpectNoError(err) - - ginkgo.By("Creating subnet " + subnetName) - subnet := framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, []string{namespaceName}) - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Creating pod " + podName) - annotations := map[string]string{ - util.RoutesAnnotation: string(buff), - } - cmd := []string{"sh", "-c", "sleep infinity"} - pod := framework.MakePod(namespaceName, podName, nil, annotations, f.KubeOVNImage, cmd, nil) - pod = podClient.CreateSync(pod) - - ginkgo.By("Validating pod annotations") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.CidrAnnotation, subnet.Spec.CIDRBlock) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.GatewayAnnotation, subnet.Spec.Gateway) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.LogicalSwitchAnnotation, subnet.Name) - framework.ExpectMAC(pod.Annotations[util.MacAddressAnnotation]) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - - ginkgo.By("Getting pod routes") - podRoutes, err := iproute.RouteShow("", "eth0", func(cmd ...string) ([]byte, []byte, error) { - return framework.KubectlExec(pod.Namespace, pod.Name, cmd...) - }) - framework.ExpectNoError(err) - - ginkgo.By("Validating pod routes") - actualRoutes := make([]request.Route, 0, len(podRoutes)) - for _, r := range podRoutes { - if r.Gateway != "" || r.Dst != "" { - actualRoutes = append(actualRoutes, request.Route{Destination: r.Dst, Gateway: r.Gateway}) - } - } - for _, r := range routes { - if r.Destination == "" { - r.Destination = "default" - } - framework.ExpectContainElement(actualRoutes, r) - } - }) -}) diff --git a/test/e2e/kube-ovn/pod/statefulset.go b/test/e2e/kube-ovn/pod/statefulset.go deleted file mode 100644 index 38a51b36f02..00000000000 --- a/test/e2e/kube-ovn/pod/statefulset.go +++ /dev/null @@ -1,67 +0,0 @@ -package pod - -import ( - "fmt" - - "github.com/onsi/ginkgo/v2" - "k8s.io/utils/ptr" - - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -var _ = framework.Describe("[group:pod]", func() { - f := framework.NewDefaultFramework("pod") - - var podClient *framework.PodClient - var stsClient *framework.StatefulSetClient - var stsName string - - ginkgo.BeforeEach(func() { - podClient = f.PodClient() - stsClient = f.StatefulSetClient() - stsName = "sts-" + framework.RandomSuffix() - }) - ginkgo.AfterEach(func() { - ginkgo.By("Deleting sts " + stsName) - stsClient.DeleteSync(stsName) - }) - - framework.ConformanceIt("Should support statefulset scale Replica", func() { - // add this case for pr https://github.com/kubeovn/kube-ovn/pull/3777 - replicas := 3 - labels := map[string]string{"app": stsName} - - ginkgo.By("Creating statefulset " + stsName) - sts := framework.MakeStatefulSet(stsName, stsName, int32(replicas), labels, framework.PauseImage) - sts = stsClient.CreateSync(sts) - ginkgo.By("Delete pod for statefulset " + stsName) - pod2Name := stsName + "-2" - pod2 := podClient.GetPod(pod2Name) - pod2IP := pod2.Annotations[util.IPAddressAnnotation] - err := podClient.Delete(pod2Name) - framework.ExpectNoError(err, "failed to delete pod "+pod2Name) - stsClient.WaitForRunningAndReady(sts) - pod2 = podClient.GetPod(pod2Name) - framework.ExpectEqual(pod2.Annotations[util.IPAddressAnnotation], pod2IP) - - ginkgo.By("Scale sts replicas to 1") - sts = stsClient.Get(stsName) - patchSts := sts.DeepCopy() - patchSts.Spec.Replicas = ptr.To(int32(1)) - stsClient.PatchSync(sts, patchSts) - - for index := 1; index <= 2; index++ { - podName := fmt.Sprintf("%s-%d", stsName, index) - ginkgo.By(fmt.Sprintf("Waiting pod %s to be deleted", podName)) - podClient.WaitForNotFound(podName) - } - ginkgo.By("Scale sts replicas to 3") - sts = stsClient.Get(stsName) - patchSts = sts.DeepCopy() - patchSts.Spec.Replicas = ptr.To(int32(3)) - stsClient.PatchSync(sts, patchSts) - ginkgo.By("Waiting for statefulset " + stsName + " to be ready") - stsClient.WaitForRunningAndReady(patchSts) - }) -}) diff --git a/test/e2e/kube-ovn/pod/vpc_pod_probe.go b/test/e2e/kube-ovn/pod/vpc_pod_probe.go deleted file mode 100644 index 233ced69f40..00000000000 --- a/test/e2e/kube-ovn/pod/vpc_pod_probe.go +++ /dev/null @@ -1,224 +0,0 @@ -package pod - -import ( - "fmt" - "math/rand/v2" - "net" - "strconv" - "strings" - - "github.com/onsi/ginkgo/v2" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/intstr" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" - "github.com/kubeovn/kube-ovn/test/e2e/framework/iptables" -) - -var _ = framework.SerialDescribe("[group:pod]", func() { - f := framework.NewDefaultFramework("vpc-pod-probe") - - var podClient *framework.PodClient - var eventClient *framework.EventClient - var subnetClient *framework.SubnetClient - var vpcClient *framework.VpcClient - var namespaceName, subnetName, podName, vpcName string - var subnet *apiv1.Subnet - var cidr string - var extraSubnetNames []string - - ginkgo.BeforeEach(func() { - podClient = f.PodClient() - eventClient = f.EventClient() - subnetClient = f.SubnetClient() - namespaceName = f.Namespace.Name - subnetName = "subnet-" + framework.RandomSuffix() - podName = "pod-" + framework.RandomSuffix() - cidr = framework.RandomCIDR(f.ClusterIPFamily) - vpcClient = f.VpcClient() - - ginkgo.By("Creating subnet " + subnetName) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, []string{namespaceName}) - subnet = subnetClient.CreateSync(subnet) - }) - ginkgo.AfterEach(func() { - ginkgo.By("Deleting pod " + podName) - podClient.DeleteSync(podName) - - ginkgo.By("Deleting subnet " + subnetName) - subnetClient.DeleteSync(subnetName) - - ginkgo.By("Deleting custom vpc " + vpcName) - vpcClient.DeleteSync(vpcName) - - for _, subnetName := range extraSubnetNames { - subnetClient.DeleteSync(subnetName) - } - }) - - framework.ConformanceIt("should support http and tcp readiness probe in custom vpc pod", func() { - f.SkipVersionPriorTo(1, 12, "This feature was introduced in v1.12") - daemonSetClient := f.DaemonSetClientNS(framework.KubeOvnNamespace) - originDs := daemonSetClient.Get("kube-ovn-cni") - modifyDs := originDs.DeepCopy() - - newArgs := originDs.Spec.Template.Spec.Containers[0].Args - for index, arg := range newArgs { - if arg == "--enable-tproxy=false" { - newArgs = append(newArgs[:index], newArgs[index+1:]...) - } - } - newArgs = append(newArgs, "--enable-tproxy=true") - modifyDs.Spec.Template.Spec.Containers[0].Args = newArgs - - daemonSetClient.PatchSync(modifyDs) - - custVPCSubnetName := "subnet-" + framework.RandomSuffix() - extraSubnetNames = append(extraSubnetNames, custVPCSubnetName) - - ginkgo.By("Create Custom Vpc subnet Pod") - vpcName = "vpc-" + framework.RandomSuffix() - customVPC := framework.MakeVpc(vpcName, "", false, false, nil) - vpcClient.CreateSync(customVPC) - - ginkgo.By("Creating subnet " + custVPCSubnetName) - cidr = framework.RandomCIDR(f.ClusterIPFamily) - subnet := framework.MakeSubnet(custVPCSubnetName, "", cidr, "", vpcName, "", nil, nil, nil) - _ = subnetClient.CreateSync(subnet) - - ginkgo.By("Creating pod with HTTP readiness probe that port is accessible " + podName) - port := 8000 + rand.Int32N(1000) - portStr := strconv.Itoa(int(port)) - args := []string{"netexec", "--http-port", portStr} - pod := framework.MakePod(namespaceName, podName, nil, map[string]string{util.LogicalSwitchAnnotation: custVPCSubnetName}, framework.AgnhostImage, nil, args) - pod.Spec.Containers[0].ReadinessProbe = &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt32(port), - }, - }, - } - pod = podClient.CreateSync(pod) - checkTProxyRules(f, pod, port, true) - - ginkgo.By("Deleting pod " + podName) - podClient.DeleteSync(podName) - - ginkgo.By("Creating pod with HTTP readiness probe that port is not accessible " + podName) - pod = framework.MakePod(namespaceName, podName, nil, map[string]string{util.LogicalSwitchAnnotation: custVPCSubnetName}, framework.AgnhostImage, nil, args) - pod.Spec.Containers[0].ReadinessProbe = &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt32(port + 1), - }, - }, - } - _ = podClient.Create(pod) - - ginkgo.By("Waiting for pod readiness probe failure") - events := eventClient.WaitToHaveEvent("Pod", podName, "Warning", "Unhealthy", "kubelet", "") - var found bool - for _, event := range events { - if strings.Contains(event.Message, "Readiness probe failed") { - found = true - framework.Logf("Found pod event: %s", event.Message) - break - } - } - framework.ExpectTrue(found, "Pod readiness probe is expected to fail") - - pod = podClient.GetPod(podName) - checkTProxyRules(f, pod, port+1, true) - - ginkgo.By("Deleting pod " + podName) - podClient.DeleteSync(podName) - - ginkgo.By("Creating pod with TCP readiness probe that port is accessible " + podName) - pod = framework.MakePod(namespaceName, podName, nil, map[string]string{util.LogicalSwitchAnnotation: custVPCSubnetName}, framework.AgnhostImage, nil, args) - pod.Spec.Containers[0].ReadinessProbe = &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - TCPSocket: &corev1.TCPSocketAction{ - Port: intstr.FromInt32(port), - }, - }, - } - pod = podClient.CreateSync(pod) - checkTProxyRules(f, pod, port, true) - - ginkgo.By("Deleting pod " + podName) - podClient.DeleteSync(podName) - - ginkgo.By("Creating pod with TCP readiness probe that port is not accessible " + podName) - pod = framework.MakePod(namespaceName, podName, nil, map[string]string{util.LogicalSwitchAnnotation: custVPCSubnetName}, framework.AgnhostImage, nil, args) - pod.Spec.Containers[0].ReadinessProbe = &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - TCPSocket: &corev1.TCPSocketAction{ - Port: intstr.FromInt32(port - 1), - }, - }, - } - _ = podClient.Create(pod) - podClient.WaitForRunning(podName) - - ginkgo.By("Waiting for pod readiness probe failure") - events = eventClient.WaitToHaveEvent("Pod", podName, "Warning", "Unhealthy", "kubelet", "") - found = false - for _, event := range events { - if strings.Contains(event.Message, "Readiness probe failed") { - found = true - framework.Logf("Found pod event: %s", event.Message) - break - } - } - framework.ExpectTrue(found, "Pod readiness probe is expected to fail") - - pod = podClient.GetPod(podName) - checkTProxyRules(f, pod, port-1, false) - }) -}) - -func checkTProxyRules(f *framework.Framework, pod *corev1.Pod, probePort int32, exist bool) { - ginkgo.GinkgoHelper() - - nodeName := pod.Spec.NodeName - tProxyOutputMarkMask := fmt.Sprintf("%#x/%#x", util.TProxyOutputMark, util.TProxyOutputMask) - tProxyPreRoutingMarkMask := fmt.Sprintf("%#x/%#x", util.TProxyPreroutingMark, util.TProxyPreroutingMask) - - isZeroIP := false - if len(pod.Status.PodIPs) == 2 { - isZeroIP = true - } - - for _, podIP := range pod.Status.PodIPs { - if util.CheckProtocol(podIP.IP) == apiv1.ProtocolIPv4 { - expectedRules := []string{ - fmt.Sprintf(`-A OVN-OUTPUT -d %s/32 -p tcp -m tcp --dport %d -j MARK --set-xmark %s`, podIP.IP, probePort, tProxyOutputMarkMask), - } - iptables.CheckIptablesRulesOnNode(f, nodeName, util.Mangle, util.OvnOutput, apiv1.ProtocolIPv4, expectedRules, exist) - hostIP := pod.Status.HostIP - if isZeroIP { - hostIP = net.IPv4zero.String() - } - expectedRules = []string{ - fmt.Sprintf(`-A OVN-PREROUTING -d %s/32 -p tcp -m tcp --dport %d -j TPROXY --on-port %d --on-ip %s --tproxy-mark %s`, podIP.IP, probePort, util.TProxyListenPort, hostIP, tProxyPreRoutingMarkMask), - } - iptables.CheckIptablesRulesOnNode(f, nodeName, util.Mangle, util.OvnPrerouting, apiv1.ProtocolIPv4, expectedRules, exist) - } else if util.CheckProtocol(podIP.IP) == apiv1.ProtocolIPv6 { - expectedRules := []string{ - fmt.Sprintf(`-A OVN-OUTPUT -d %s/128 -p tcp -m tcp --dport %d -j MARK --set-xmark %s`, podIP.IP, probePort, tProxyOutputMarkMask), - } - iptables.CheckIptablesRulesOnNode(f, nodeName, util.Mangle, util.OvnOutput, apiv1.ProtocolIPv6, expectedRules, exist) - - hostIP := pod.Status.HostIP - if isZeroIP { - hostIP = "::" - } - expectedRules = []string{ - fmt.Sprintf(`-A OVN-PREROUTING -d %s/128 -p tcp -m tcp --dport %d -j TPROXY --on-port %d --on-ip %s --tproxy-mark %s`, podIP.IP, probePort, util.TProxyListenPort, hostIP, tProxyPreRoutingMarkMask), - } - iptables.CheckIptablesRulesOnNode(f, nodeName, util.Mangle, util.OvnPrerouting, apiv1.ProtocolIPv6, expectedRules, exist) - } - } -} diff --git a/test/e2e/kube-ovn/qos/qos.go b/test/e2e/kube-ovn/qos/qos.go deleted file mode 100644 index 325de16b571..00000000000 --- a/test/e2e/kube-ovn/qos/qos.go +++ /dev/null @@ -1,199 +0,0 @@ -package qos - -import ( - "context" - "fmt" - "strconv" - "strings" - "time" - - corev1 "k8s.io/api/core/v1" - e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" - - "github.com/onsi/ginkgo/v2" - - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -func parseConfig(table, config string) map[string]string { - kvs := make(map[string]string, 3) - for _, s := range strings.Fields(config) { - kv := strings.Split(s, "=") - if len(kv) != 2 { - framework.Logf("ignore %s config %s", table, s) - continue - } - kvs[kv[0]] = kv[1] - } - - return kvs -} - -func getOvsPodOnNode(f *framework.Framework, node string) *corev1.Pod { - ginkgo.GinkgoHelper() - - daemonSetClient := f.DaemonSetClientNS(framework.KubeOvnNamespace) - ds := daemonSetClient.Get("ovs-ovn") - pod, err := daemonSetClient.GetPodOnNode(ds, node) - framework.ExpectNoError(err) - return pod -} - -func getOvsQosForPod(f *framework.Framework, table string, pod *corev1.Pod) map[string]string { - ginkgo.GinkgoHelper() - - ovsPod := getOvsPodOnNode(f, pod.Spec.NodeName) - cmd := fmt.Sprintf(`ovs-vsctl --no-heading --columns=other_config --bare find %s external_ids:pod="%s/%s"`, table, pod.Namespace, pod.Name) - output := e2epodoutput.RunHostCmdOrDie(ovsPod.Namespace, ovsPod.Name, cmd) - return parseConfig(table, output) -} - -func waitOvsQosForPod(f *framework.Framework, table string, pod *corev1.Pod, expected map[string]string) map[string]string { - ginkgo.GinkgoHelper() - - ovsPod := getOvsPodOnNode(f, pod.Spec.NodeName) - cmd := fmt.Sprintf(`ovs-vsctl --no-heading --columns=other_config --bare find %s external_ids:pod="%s/%s"`, table, pod.Namespace, pod.Name) - - var config map[string]string - framework.WaitUntil(2*time.Second, 2*time.Minute, func(_ context.Context) (bool, error) { - output, err := e2epodoutput.RunHostCmd(ovsPod.Namespace, ovsPod.Name, cmd) - if err != nil { - return false, err - } - if output == "" { - return false, nil - } - kvs := parseConfig(table, output) - for k, v := range expected { - if kvs[k] != v { - return false, nil - } - } - - config = kvs - return true, nil - }, "") - - return config -} - -var _ = framework.Describe("[group:qos]", func() { - f := framework.NewDefaultFramework("qos") - - var podName, namespaceName string - var podClient *framework.PodClient - - ginkgo.BeforeEach(func() { - podClient = f.PodClient() - namespaceName = f.Namespace.Name - podName = "pod-" + framework.RandomSuffix() - }) - ginkgo.AfterEach(func() { - ginkgo.By("Deleting pod " + podName) - podClient.DeleteSync(podName) - }) - - framework.ConformanceIt("should support netem QoS", func() { - f.SkipVersionPriorTo(1, 9, "Support for netem QoS was introduced in v1.9") - - ginkgo.By("Creating pod " + podName) - latency, jitter, limit, loss := 600, 400, 2000, 10 - annotations := map[string]string{ - util.NetemQosLatencyAnnotation: strconv.Itoa(latency), - util.NetemQosLimitAnnotation: strconv.Itoa(limit), - util.NetemQosLossAnnotation: strconv.Itoa(loss), - } - if !f.VersionPriorTo(1, 12) { - annotations[util.NetemQosJitterAnnotation] = strconv.Itoa(jitter) - } - pod := framework.MakePod(namespaceName, podName, nil, annotations, "", nil, nil) - pod = podClient.CreateSync(pod) - - ginkgo.By("Validating pod annotations") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.NetemQosLatencyAnnotation, strconv.Itoa(latency)) - if !f.VersionPriorTo(1, 12) { - framework.ExpectHaveKeyWithValue(pod.Annotations, util.NetemQosJitterAnnotation, strconv.Itoa(jitter)) - } - framework.ExpectHaveKeyWithValue(pod.Annotations, util.NetemQosLimitAnnotation, strconv.Itoa(limit)) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.NetemQosLossAnnotation, strconv.Itoa(loss)) - - ginkgo.By("Validating OVS QoS") - qos := getOvsQosForPod(f, "qos", pod) - framework.ExpectHaveKeyWithValue(qos, "latency", strconv.Itoa(latency*1000)) - if !f.VersionPriorTo(1, 12) { - framework.ExpectHaveKeyWithValue(qos, "jitter", strconv.Itoa(jitter*1000)) - } - framework.ExpectHaveKeyWithValue(qos, "limit", strconv.Itoa(limit)) - framework.ExpectHaveKeyWithValue(qos, "loss", strconv.Itoa(loss)) - }) - - framework.ConformanceIt("should be able to update netem QoS", func() { - f.SkipVersionPriorTo(1, 9, "Support for netem QoS was introduced in v1.9") - - ginkgo.By("Creating pod " + podName + " without QoS") - pod := framework.MakePod(namespaceName, podName, nil, nil, "", nil, nil) - pod = podClient.CreateSync(pod) - - ginkgo.By("Validating pod annotations") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectNotHaveKey(pod.Annotations, util.NetemQosLatencyAnnotation) - framework.ExpectNotHaveKey(pod.Annotations, util.NetemQosJitterAnnotation) - framework.ExpectNotHaveKey(pod.Annotations, util.NetemQosLimitAnnotation) - framework.ExpectNotHaveKey(pod.Annotations, util.NetemQosLossAnnotation) - - ginkgo.By("Adding netem QoS to pod annotations") - latency, jitter, limit, loss := 600, 400, 2000, 10 - modifiedPod := pod.DeepCopy() - modifiedPod.Annotations[util.NetemQosLatencyAnnotation] = strconv.Itoa(latency) - if !f.VersionPriorTo(1, 12) { - modifiedPod.Annotations[util.NetemQosJitterAnnotation] = strconv.Itoa(jitter) - } - modifiedPod.Annotations[util.NetemQosLimitAnnotation] = strconv.Itoa(limit) - modifiedPod.Annotations[util.NetemQosLossAnnotation] = strconv.Itoa(loss) - pod = podClient.Patch(pod, modifiedPod) - - ginkgo.By("Validating pod annotations") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.NetemQosLatencyAnnotation, strconv.Itoa(latency)) - if !f.VersionPriorTo(1, 12) { - framework.ExpectHaveKeyWithValue(pod.Annotations, util.NetemQosJitterAnnotation, strconv.Itoa(jitter)) - } - framework.ExpectHaveKeyWithValue(pod.Annotations, util.NetemQosLimitAnnotation, strconv.Itoa(limit)) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.NetemQosLossAnnotation, strconv.Itoa(loss)) - - ginkgo.By("Validating OVS QoS") - qos := waitOvsQosForPod(f, "qos", pod, nil) - framework.ExpectHaveKeyWithValue(qos, "latency", strconv.Itoa(latency*1000)) - if !f.VersionPriorTo(1, 12) { - framework.ExpectHaveKeyWithValue(qos, "jitter", strconv.Itoa(jitter*1000)) - } - framework.ExpectHaveKeyWithValue(qos, "limit", strconv.Itoa(limit)) - framework.ExpectHaveKeyWithValue(qos, "loss", strconv.Itoa(loss)) - }) - - framework.ConformanceIt("should support htb QoS", func() { - f.SkipVersionPriorTo(1, 9, "Support for htb QoS with priority was introduced in v1.9") - - ginkgo.By("Creating pod " + podName) - ingressRate := 300 - annotations := map[string]string{ - util.IngressRateAnnotation: strconv.Itoa(ingressRate), - } - pod := framework.MakePod(namespaceName, podName, nil, annotations, "", nil, nil) - pod = podClient.CreateSync(pod) - - ginkgo.By("Validating pod annotations") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.IngressRateAnnotation, strconv.Itoa(ingressRate)) - - ginkgo.By("Validating OVS Queue") - queue := getOvsQosForPod(f, "queue", pod) - framework.ExpectHaveKeyWithValue(queue, "max-rate", strconv.Itoa(ingressRate*1000*1000)) - }) -}) diff --git a/test/e2e/kube-ovn/service/service.go b/test/e2e/kube-ovn/service/service.go deleted file mode 100644 index ae4cf47a506..00000000000 --- a/test/e2e/kube-ovn/service/service.go +++ /dev/null @@ -1,205 +0,0 @@ -package service - -import ( - "bytes" - "context" - "fmt" - "math/rand/v2" - "strconv" - "strings" - "time" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/intstr" - clientset "k8s.io/client-go/kubernetes" - e2enode "k8s.io/kubernetes/test/e2e/framework/node" - e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" - "k8s.io/utils/ptr" - - "github.com/onsi/ginkgo/v2" - - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -var _ = framework.Describe("[group:service]", func() { - f := framework.NewDefaultFramework("service") - - var cs clientset.Interface - var serviceClient *framework.ServiceClient - var podClient *framework.PodClient - var subnetClient *framework.SubnetClient - var namespaceName, serviceName, podName, hostPodName, subnetName, cidr string - - ginkgo.BeforeEach(func() { - cs = f.ClientSet - serviceClient = f.ServiceClient() - podClient = f.PodClient() - subnetClient = f.SubnetClient() - namespaceName = f.Namespace.Name - serviceName = "service-" + framework.RandomSuffix() - podName = "pod-" + framework.RandomSuffix() - hostPodName = "pod-" + framework.RandomSuffix() - subnetName = "subnet-" + framework.RandomSuffix() - cidr = framework.RandomCIDR(f.ClusterIPFamily) - }) - ginkgo.AfterEach(func() { - ginkgo.By("Deleting service " + serviceName) - serviceClient.DeleteSync(serviceName) - - ginkgo.By("Deleting pod " + podName) - podClient.DeleteSync(podName) - - ginkgo.By("Deleting pod " + hostPodName) - podClient.DeleteSync(hostPodName) - - ginkgo.By("Deleting subnet " + subnetName) - subnetClient.DeleteSync(subnetName) - }) - - framework.ConformanceIt("should be able to connect to NodePort service with external traffic policy set to Local from other nodes", func() { - f.SkipVersionPriorTo(1, 9, "This case is not adapted before v1.9") - ginkgo.By("Creating subnet " + subnetName) - subnet := framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, nil) - _ = subnetClient.CreateSync(subnet) - - ginkgo.By("Creating pod " + podName) - podLabels := map[string]string{"app": podName} - annotations := map[string]string{ - util.LogicalSwitchAnnotation: subnetName, - } - port := 8000 + rand.Int32N(1000) - portStr := strconv.Itoa(int(port)) - args := []string{"netexec", "--http-port", portStr} - pod := framework.MakePod(namespaceName, podName, podLabels, annotations, framework.AgnhostImage, nil, args) - _ = podClient.CreateSync(pod) - - ginkgo.By("Creating service " + serviceName) - ports := []corev1.ServicePort{{ - Name: "tcp", - Protocol: corev1.ProtocolTCP, - Port: port, - TargetPort: intstr.FromInt32(port), - }} - service := framework.MakeService(serviceName, corev1.ServiceTypeNodePort, nil, podLabels, ports, "") - service.Spec.ExternalTrafficPolicy = corev1.ServiceExternalTrafficPolicyLocal - service = serviceClient.CreateSync(service, func(s *corev1.Service) (bool, error) { - return len(s.Spec.Ports) != 0 && s.Spec.Ports[0].NodePort != 0, nil - }, "node port is allocated") - - ginkgo.By("Creating pod " + hostPodName + " with host network") - cmd := []string{"sh", "-c", "sleep infinity"} - hostPod := framework.MakePod(namespaceName, hostPodName, nil, nil, f.KubeOVNImage, cmd, nil) - hostPod.Spec.HostNetwork = true - _ = podClient.CreateSync(hostPod) - - ginkgo.By("Getting nodes") - nodeList, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - - nodePort := service.Spec.Ports[0].NodePort - fnCheck := func(nodeName, nodeIP string, nodePort int32) { - ginkgo.GinkgoHelper() - - if nodeIP == "" { - return - } - protocol := strings.ToLower(util.CheckProtocol(nodeIP)) - ginkgo.By("Checking " + protocol + " connection via node " + nodeName) - cmd := fmt.Sprintf("curl -q -s --connect-timeout 2 --max-time 2 %s/clientip", util.JoinHostPort(nodeIP, nodePort)) - framework.WaitUntil(2*time.Second, 30*time.Second, func(_ context.Context) (bool, error) { - ginkgo.By(fmt.Sprintf(`Executing %q in pod %s/%s`, cmd, namespaceName, hostPodName)) - _, err := e2epodoutput.RunHostCmd(namespaceName, hostPodName, cmd) - return err == nil, nil - }, "") - } - for _, node := range nodeList.Items { - ipv4, ipv6 := util.GetNodeInternalIP(node) - fnCheck(node.Name, ipv4, nodePort) - fnCheck(node.Name, ipv6, nodePort) - } - }) - - framework.ConformanceIt("should ovn nb change vip when dual-stack service removes the cluster ip", func() { - if !f.IsDual() { - ginkgo.Skip("this case only support dual mode") - } - f.SkipVersionPriorTo(1, 11, "This case is support in v1.11") - ginkgo.By("Creating service " + serviceName) - port := 8000 + rand.Int32N(1000) - ports := []corev1.ServicePort{{ - Name: "tcp", - Protocol: corev1.ProtocolTCP, - Port: port, - TargetPort: intstr.FromInt32(port), - }} - - selector := map[string]string{"app": "svc-dual"} - service := framework.MakeService(serviceName, corev1.ServiceTypeClusterIP, nil, selector, ports, corev1.ServiceAffinityNone) - service.Namespace = namespaceName - service.Spec.IPFamilyPolicy = ptr.To(corev1.IPFamilyPolicyRequireDualStack) - service = serviceClient.CreateSync(service, func(s *corev1.Service) (bool, error) { - return len(util.ServiceClusterIPs(*s)) == 2, nil - }, "both ipv4 and ipv6 cluster ips are allocated") - v6ClusterIP := service.Spec.ClusterIPs[1] - originService := service.DeepCopy() - framework.Logf("created service %s with cluster ips %s", serviceName, strings.Join(util.ServiceClusterIPs(*service), ",")) - - ginkgo.By("Creating pod " + podName) - podBackend := framework.MakePod(namespaceName, podName, selector, nil, framework.PauseImage, nil, nil) - podBackend = podClient.CreateSync(podBackend) - framework.Logf("created pod %s with ips %s", podName, strings.Join(util.PodIPs(*podBackend), ",")) - - checkContainsClusterIP := func(v6ClusterIP string, isContain bool) { - ginkgo.GinkgoHelper() - - cmd := "ovn-nbctl --format=csv --data=bare --no-heading --columns=vips list Load_Balancer cluster-tcp-loadbalancer" - framework.WaitUntil(2*time.Second, 30*time.Second, func(_ context.Context) (bool, error) { - output, _, err := framework.NBExec(cmd) - framework.ExpectNoError(err) - output = bytes.TrimSpace(output) - if output[0] == '"' { - output = output[1 : len(output)-1] - } - framework.Logf("cluster-tcp-loadbalancer vips is %q", output) - framework.Logf("IPv6 cluster ip is %q", v6ClusterIP) - vips := strings.Fields(string(output)) - prefix := util.JoinHostPort(v6ClusterIP, port) + "=" - var found bool - for _, vip := range vips { - if strings.HasPrefix(vip, prefix) { - found = true - break - } - } - if found == isContain { - return true, nil - } - return false, nil - }, "") - - output, _, err := framework.NBExec(cmd) - framework.ExpectNoError(err) - framework.ExpectEqual(strings.Contains(string(output), v6ClusterIP), isContain) - } - - ginkgo.By("check service from dual stack should have cluster ip") - checkContainsClusterIP(v6ClusterIP, true) - - ginkgo.By("change service from dual stack to single stack") - modifyService := service.DeepCopy() - modifyService.Spec.IPFamilyPolicy = ptr.To(corev1.IPFamilyPolicySingleStack) - modifyService.Spec.IPFamilies = []corev1.IPFamily{corev1.IPv4Protocol} - modifyService.Spec.ClusterIPs = []string{service.Spec.ClusterIP} - service = serviceClient.Patch(service, modifyService) - checkContainsClusterIP(v6ClusterIP, false) - - ginkgo.By("recover service from single stack to dual stack") - recoverService := service.DeepCopy() - recoverService.Spec.IPFamilyPolicy = ptr.To(*originService.Spec.IPFamilyPolicy) - recoverService.Spec.IPFamilies = originService.Spec.IPFamilies - recoverService.Spec.ClusterIPs = originService.Spec.ClusterIPs - _ = serviceClient.Patch(service, recoverService) - checkContainsClusterIP(v6ClusterIP, true) - }) -}) diff --git a/test/e2e/kube-ovn/subnet/subnet.go b/test/e2e/kube-ovn/subnet/subnet.go deleted file mode 100644 index d87e59dfdee..00000000000 --- a/test/e2e/kube-ovn/subnet/subnet.go +++ /dev/null @@ -1,1594 +0,0 @@ -package subnet - -import ( - "context" - "fmt" - "math/big" - "math/rand/v2" - "net" - "os/exec" - "slices" - "strconv" - "strings" - "time" - - corev1 "k8s.io/api/core/v1" - clientset "k8s.io/client-go/kubernetes" - e2enode "k8s.io/kubernetes/test/e2e/framework/node" - e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" - - "github.com/onsi/ginkgo/v2" - "github.com/onsi/gomega" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" - "github.com/kubeovn/kube-ovn/test/e2e/framework/docker" - "github.com/kubeovn/kube-ovn/test/e2e/framework/iproute" - "github.com/kubeovn/kube-ovn/test/e2e/framework/iptables" - "github.com/kubeovn/kube-ovn/test/e2e/framework/kind" -) - -func getOvsPodOnNode(f *framework.Framework, node string) *corev1.Pod { - ginkgo.GinkgoHelper() - - daemonSetClient := f.DaemonSetClientNS(framework.KubeOvnNamespace) - ds := daemonSetClient.Get("ovs-ovn") - pod, err := daemonSetClient.GetPodOnNode(ds, node) - framework.ExpectNoError(err) - return pod -} - -func checkNatOutgoingRoutes(f *framework.Framework, ns, pod string, gateways []string) { - ginkgo.GinkgoHelper() - - afs := make([]int, 0, 2) - dst := make([]string, 0, 2) - if f.HasIPv4() { - afs = append(afs, 4) - dst = append(dst, "1.1.1.1") - } - if f.HasIPv6() { - afs = append(afs, 6) - dst = append(dst, "2606:4700:4700::1111") - } - - for i, af := range afs { - ginkgo.By(fmt.Sprintf("Checking IPv%d NAT outgoing routes of %s/%s", af, ns, pod)) - cmd := fmt.Sprintf("traceroute -%d -n -f2 -m2 %s", af, dst[i]) - framework.WaitUntil(3*time.Second, 30*time.Second, func(_ context.Context) (bool, error) { - // traceroute to 1.1.1.1 (1.1.1.1), 2 hops max, 60 byte packets - // 2 172.19.0.2 0.663 ms 0.613 ms 0.605 ms - output, err := e2epodoutput.RunHostCmd(ns, pod, cmd) - if err != nil { - return false, nil - } - - lines := strings.Split(strings.TrimSpace(output), "\n") - fields := strings.Fields(lines[len(lines)-1]) - return len(fields) > 2 && slices.Contains(gateways, fields[1]), nil - }, "") - } -} - -func checkSubnetNatOutgoingPolicyRuleStatus(subnetClient *framework.SubnetClient, subnetName string, rules []apiv1.NatOutgoingPolicyRule) *apiv1.Subnet { - ginkgo.GinkgoHelper() - - ginkgo.By("Waiting for status of subnet " + subnetName + " to be updated") - var subnet *apiv1.Subnet - framework.WaitUntil(2*time.Second, 10*time.Second, func(_ context.Context) (bool, error) { - s := subnetClient.Get(subnetName) - if len(s.Status.NatOutgoingPolicyRules) != len(rules) { - return false, nil - } - for i, r := range s.Status.NatOutgoingPolicyRules { - if r.RuleID == "" || r.NatOutgoingPolicyRule != rules[i] { - return false, nil - } - } - subnet = s - return true, nil - }, "") - return subnet -} - -func checkIPSetOnNode(f *framework.Framework, node string, expectetIPsets []string, shouldExist bool) { - ginkgo.GinkgoHelper() - - ovsPod := getOvsPodOnNode(f, node) - cmd := `ipset list | grep '^Name:' | awk '{print $2}'` - framework.WaitUntil(3*time.Second, 10*time.Second, func(_ context.Context) (bool, error) { - output := e2epodoutput.RunHostCmdOrDie(ovsPod.Namespace, ovsPod.Name, cmd) - exitIPsets := strings.Split(output, "\n") - for _, r := range expectetIPsets { - framework.Logf("checking ipset %s: %v", r, shouldExist) - ok, err := gomega.ContainElement(r).Match(exitIPsets) - if err != nil || ok != shouldExist { - return false, err - } - } - return true, nil - }, "") -} - -var _ = framework.Describe("[group:subnet]", func() { - f := framework.NewDefaultFramework("subnet") - - var subnet *apiv1.Subnet - var cs clientset.Interface - var podClient *framework.PodClient - var deployClient *framework.DeploymentClient - var subnetClient *framework.SubnetClient - var eventClient *framework.EventClient - var namespaceName, subnetName, fakeSubnetName, podNamePrefix, deployName, podName string - var cidr, cidrV4, cidrV6, firstIPv4, lastIPv4, firstIPv6, lastIPv6 string - var gateways []string - var podCount int - - ginkgo.BeforeEach(func() { - cs = f.ClientSet - podClient = f.PodClient() - deployClient = f.DeploymentClient() - subnetClient = f.SubnetClient() - namespaceName = f.Namespace.Name - subnetName = "subnet-" + framework.RandomSuffix() - fakeSubnetName = "subnet-" + framework.RandomSuffix() - deployName = "deploy-" + framework.RandomSuffix() - podNamePrefix = "pod-" + framework.RandomSuffix() - podName = "pod-" + framework.RandomSuffix() - cidr = framework.RandomCIDR(f.ClusterIPFamily) - cidrV4, cidrV6 = util.SplitStringIP(cidr) - gateways = nil - podCount = 0 - if cidrV4 == "" { - firstIPv4 = "" - lastIPv4 = "" - } else { - firstIPv4, _ = util.FirstIP(cidrV4) - lastIPv4, _ = util.LastIP(cidrV4) - gateways = append(gateways, firstIPv4) - } - if cidrV6 == "" { - firstIPv6 = "" - lastIPv6 = "" - } else { - firstIPv6, _ = util.FirstIP(cidrV6) - lastIPv6, _ = util.LastIP(cidrV6) - gateways = append(gateways, firstIPv6) - } - }) - ginkgo.AfterEach(func() { - ginkgo.By("Deleting deployment " + deployName) - deployClient.DeleteSync(deployName) - - for i := 1; i <= podCount; i++ { - podName := fmt.Sprintf("%s-%d", podNamePrefix, i) - ginkgo.By("Deleting pod " + podName) - podClient.DeleteSync(podName) - } - - ginkgo.By("Deleting pod " + podName) - podClient.DeleteSync(podName) - - ginkgo.By("Deleting subnet " + fakeSubnetName) - subnetClient.DeleteSync(fakeSubnetName) - - ginkgo.By("Deleting subnet " + subnetName) - subnetClient.DeleteSync(subnetName) - }) - - framework.ConformanceIt("should create subnet with only cidr provided", func() { - ginkgo.By("Creating subnet " + subnetName) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, nil) - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Validating subnet finalizers") - f.ValidateFinalizers(subnet) - - ginkgo.By("Validating subnet spec fields") - framework.ExpectFalse(subnet.Spec.Default) - framework.ExpectEqual(subnet.Spec.Protocol, util.CheckProtocol(cidr)) - framework.ExpectEmpty(subnet.Spec.Namespaces) - framework.ExpectConsistOf(subnet.Spec.ExcludeIps, gateways) - framework.ExpectEqual(subnet.Spec.Gateway, strings.Join(gateways, ",")) - framework.ExpectEqual(subnet.Spec.GatewayType, apiv1.GWDistributedType) - framework.ExpectEmpty(subnet.Spec.GatewayNode) - framework.ExpectFalse(subnet.Spec.NatOutgoing) - framework.ExpectFalse(subnet.Spec.Private) - framework.ExpectEmpty(subnet.Spec.AllowSubnets) - - ginkgo.By("Validating subnet status fields") - framework.ExpectEmpty(subnet.Status.ActivateGateway) - framework.ExpectZero(subnet.Status.V4UsingIPs) - framework.ExpectZero(subnet.Status.V6UsingIPs) - - if cidrV4 == "" { - framework.ExpectZero(subnet.Status.V4AvailableIPs) - } else { - _, ipnet, _ := net.ParseCIDR(cidrV4) - framework.ExpectEqual(subnet.Status.V4AvailableIPs, util.AddressCount(ipnet)-1) - } - if cidrV6 == "" { - framework.ExpectZero(subnet.Status.V6AvailableIPs) - } else { - _, ipnet, _ := net.ParseCIDR(cidrV6) - framework.ExpectEqual(subnet.Status.V6AvailableIPs, util.AddressCount(ipnet)-1) - } - - // TODO: check routes on ovn0 - }) - - framework.ConformanceIt("should format subnet cidr", func() { - fn := func(cidr string) string { - if cidr == "" { - return "" - } - _, ipnet, _ := net.ParseCIDR(cidr) - ipnet.IP = net.ParseIP(framework.RandomIPs(cidr, ";", 1)) - return ipnet.String() - } - - s := make([]string, 0, 2) - if c := fn(cidrV4); c != "" { - s = append(s, c) - } - if c := fn(cidrV6); c != "" { - s = append(s, c) - } - - subnet = framework.MakeSubnet(subnetName, "", strings.Join(s, ","), "", "", "", nil, nil, nil) - ginkgo.By("Creating subnet " + subnetName + " with cidr " + subnet.Spec.CIDRBlock) - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Validating subnet finalizers") - f.ValidateFinalizers(subnet) - - ginkgo.By("Validating subnet spec fields") - framework.ExpectFalse(subnet.Spec.Default) - framework.ExpectEqual(subnet.Spec.Protocol, util.CheckProtocol(cidr)) - framework.ExpectEmpty(subnet.Spec.Namespaces) - framework.ExpectConsistOf(subnet.Spec.ExcludeIps, gateways) - framework.ExpectEqual(subnet.Spec.Gateway, strings.Join(gateways, ",")) - framework.ExpectEqual(subnet.Spec.GatewayType, apiv1.GWDistributedType) - framework.ExpectEmpty(subnet.Spec.GatewayNode) - framework.ExpectFalse(subnet.Spec.NatOutgoing) - framework.ExpectFalse(subnet.Spec.Private) - framework.ExpectEmpty(subnet.Spec.AllowSubnets) - - ginkgo.By("Validating subnet status fields") - framework.ExpectEmpty(subnet.Status.ActivateGateway) - framework.ExpectZero(subnet.Status.V4UsingIPs) - framework.ExpectZero(subnet.Status.V6UsingIPs) - - if cidrV4 == "" { - framework.ExpectZero(subnet.Status.V4AvailableIPs) - } else { - _, ipnet, _ := net.ParseCIDR(cidrV4) - framework.ExpectEqual(subnet.Status.V4AvailableIPs, util.AddressCount(ipnet)-1) - } - if cidrV6 == "" { - framework.ExpectZero(subnet.Status.V6AvailableIPs) - } else { - _, ipnet, _ := net.ParseCIDR(cidrV6) - framework.ExpectEqual(subnet.Status.V6AvailableIPs, util.AddressCount(ipnet)-1) - } - - // TODO: check routes on ovn0 - }) - - framework.ConformanceIt("should create subnet with exclude ips", func() { - excludeIPv4 := framework.RandomExcludeIPs(cidrV4, rand.IntN(10)+1) - excludeIPv6 := framework.RandomExcludeIPs(cidrV6, rand.IntN(10)+1) - excludeIPs := append(excludeIPv4, excludeIPv6...) - - ginkgo.By(fmt.Sprintf("Creating subnet %s with exclude ips %v", subnetName, excludeIPs)) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", excludeIPs, nil, nil) - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Validating subnet finalizers") - f.ValidateFinalizers(subnet) - - ginkgo.By("Validating subnet spec fields") - framework.ExpectFalse(subnet.Spec.Default) - framework.ExpectEqual(subnet.Spec.Protocol, util.CheckProtocol(cidr)) - framework.ExpectEmpty(subnet.Spec.Namespaces) - framework.ExpectConsistOf(subnet.Spec.ExcludeIps, append(excludeIPs, gateways...)) - framework.ExpectEqual(subnet.Spec.Gateway, strings.Join(gateways, ",")) - framework.ExpectEqual(subnet.Spec.GatewayType, apiv1.GWDistributedType) - framework.ExpectEmpty(subnet.Spec.GatewayNode) - framework.ExpectFalse(subnet.Spec.NatOutgoing) - framework.ExpectFalse(subnet.Spec.Private) - framework.ExpectEmpty(subnet.Spec.AllowSubnets) - - ginkgo.By("Validating subnet status fields") - framework.ExpectEmpty(subnet.Status.ActivateGateway) - framework.ExpectZero(subnet.Status.V4UsingIPs) - framework.ExpectZero(subnet.Status.V6UsingIPs) - - if cidrV4 == "" { - framework.ExpectZero(subnet.Status.V4AvailableIPs) - } else { - _, ipnet, _ := net.ParseCIDR(cidrV4) - expected := util.AddressCount(ipnet) - util.CountIPNums(excludeIPv4) - 1 - framework.ExpectEqual(subnet.Status.V4AvailableIPs, expected) - } - if cidrV6 == "" { - framework.ExpectZero(subnet.Status.V6AvailableIPs) - } else { - _, ipnet, _ := net.ParseCIDR(cidrV6) - expected := util.AddressCount(ipnet) - util.CountIPNums(excludeIPv6) - 1 - framework.ExpectEqual(subnet.Status.V6AvailableIPs, expected) - } - }) - - framework.ConformanceIt("should create subnet with centralized gateway", func() { - ginkgo.By("Getting nodes") - nodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - framework.ExpectNotEmpty(nodes.Items) - - ginkgo.By("Creating subnet " + subnetName) - n := min(3, max(1, len(nodes.Items)-1)) - gatewayNodes := make([]string, 0, n) - nodeIPs := make([]string, 0, n*2) - var ipv4, ipv6 string - for i := 0; i < n; i++ { - gatewayNodes = append(gatewayNodes, nodes.Items[i].Name) - if f.VersionPriorTo(1, 12) { - ipv4, ipv6 = util.SplitStringIP(nodes.Items[i].Annotations[util.IPAddressAnnotation]) - } else { - ipv4, ipv6 = util.GetNodeInternalIP(nodes.Items[i]) - } - nodeIPs = append(nodeIPs, strings.Split(strings.Trim(ipv4+","+ipv6, ","), ",")...) - } - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, gatewayNodes, []string{namespaceName}) - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Validating subnet finalizers") - f.ValidateFinalizers(subnet) - - ginkgo.By("Validating subnet spec fields") - framework.ExpectFalse(subnet.Spec.Default) - framework.ExpectEqual(subnet.Spec.Protocol, util.CheckProtocol(cidr)) - framework.ExpectConsistOf(subnet.Spec.Namespaces, []string{namespaceName}) - framework.ExpectConsistOf(subnet.Spec.ExcludeIps, gateways) - framework.ExpectEqual(subnet.Spec.Gateway, strings.Join(gateways, ",")) - framework.ExpectEqual(subnet.Spec.GatewayType, apiv1.GWCentralizedType) - framework.ExpectConsistOf(strings.Split(subnet.Spec.GatewayNode, ","), gatewayNodes) - framework.ExpectFalse(subnet.Spec.NatOutgoing) - framework.ExpectFalse(subnet.Spec.Private) - framework.ExpectEmpty(subnet.Spec.AllowSubnets) - - ginkgo.By("Validating subnet status fields") - framework.ExpectContainElement(gatewayNodes, subnet.Status.ActivateGateway) - framework.ExpectZero(subnet.Status.V4UsingIPs) - framework.ExpectZero(subnet.Status.V6UsingIPs) - - if cidrV4 == "" { - framework.ExpectZero(subnet.Status.V4AvailableIPs) - } else { - _, ipnet, _ := net.ParseCIDR(cidrV4) - framework.ExpectEqual(subnet.Status.V4AvailableIPs, util.AddressCount(ipnet)-1) - } - if cidrV6 == "" { - framework.ExpectZero(subnet.Status.V6AvailableIPs) - } else { - _, ipnet, _ := net.ParseCIDR(cidrV6) - framework.ExpectEqual(subnet.Status.V6AvailableIPs, util.AddressCount(ipnet)-1) - } - - ginkgo.By("Creating pod " + podName) - cmd := []string{"sh", "-c", "sleep infinity"} - pod := framework.MakePod(namespaceName, podName, nil, nil, f.KubeOVNImage, cmd, nil) - _ = podClient.CreateSync(pod) - - checkNatOutgoingRoutes(f, namespaceName, podName, nodeIPs) - }) - - framework.ConformanceIt("should be able to switch gateway mode to centralized", func() { - ginkgo.By("Getting nodes") - nodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - framework.ExpectNotEmpty(nodes.Items) - - ginkgo.By("Creating subnet " + subnetName) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, []string{namespaceName}) - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Validating subnet finalizers") - f.ValidateFinalizers(subnet) - - ginkgo.By("Validating subnet spec fields") - framework.ExpectFalse(subnet.Spec.Default) - framework.ExpectEqual(subnet.Spec.Protocol, util.CheckProtocol(cidr)) - framework.ExpectConsistOf(subnet.Spec.Namespaces, []string{namespaceName}) - framework.ExpectConsistOf(subnet.Spec.ExcludeIps, gateways) - framework.ExpectEqual(subnet.Spec.Gateway, strings.Join(gateways, ",")) - framework.ExpectEqual(subnet.Spec.GatewayType, apiv1.GWDistributedType) - framework.ExpectEmpty(subnet.Spec.GatewayNode) - framework.ExpectFalse(subnet.Spec.NatOutgoing) - framework.ExpectFalse(subnet.Spec.Private) - framework.ExpectEmpty(subnet.Spec.AllowSubnets) - - ginkgo.By("Validating subnet status fields") - framework.ExpectEmpty(subnet.Status.ActivateGateway) - framework.ExpectZero(subnet.Status.V4UsingIPs) - framework.ExpectZero(subnet.Status.V6UsingIPs) - - if cidrV4 == "" { - framework.ExpectZero(subnet.Status.V4AvailableIPs) - } else { - _, ipnet, _ := net.ParseCIDR(cidrV4) - framework.ExpectEqual(subnet.Status.V4AvailableIPs, util.AddressCount(ipnet)-1) - } - if cidrV6 == "" { - framework.ExpectZero(subnet.Status.V6AvailableIPs) - } else { - _, ipnet, _ := net.ParseCIDR(cidrV6) - framework.ExpectEqual(subnet.Status.V6AvailableIPs, util.AddressCount(ipnet)-1) - } - - ginkgo.By("Converting gateway mode to centralized") - n := min(3, max(1, len(nodes.Items)-1)) - gatewayNodes := make([]string, 0, n) - nodeIPs := make([]string, 0, n*2) - var ipv4, ipv6 string - for i := 0; i < n; i++ { - gatewayNodes = append(gatewayNodes, nodes.Items[i].Name) - if f.VersionPriorTo(1, 12) { - ipv4, ipv6 = util.SplitStringIP(nodes.Items[i].Annotations[util.IPAddressAnnotation]) - } else { - ipv4, ipv6 = util.GetNodeInternalIP(nodes.Items[i]) - } - nodeIPs = append(nodeIPs, strings.Split(strings.Trim(ipv4+","+ipv6, ","), ",")...) - } - modifiedSubnet := subnet.DeepCopy() - modifiedSubnet.Spec.GatewayNode = strings.Join(gatewayNodes, ",") - modifiedSubnet.Spec.GatewayType = apiv1.GWCentralizedType - subnet = subnetClient.PatchSync(subnet, modifiedSubnet) - - ginkgo.By("Validating subnet finalizers") - f.ValidateFinalizers(subnet) - - ginkgo.By("Validating subnet spec fields") - framework.ExpectFalse(subnet.Spec.Default) - framework.ExpectEqual(subnet.Spec.Protocol, util.CheckProtocol(cidr)) - framework.ExpectConsistOf(subnet.Spec.Namespaces, []string{namespaceName}) - framework.ExpectConsistOf(subnet.Spec.ExcludeIps, gateways) - framework.ExpectEqual(subnet.Spec.Gateway, strings.Join(gateways, ",")) - framework.ExpectEqual(subnet.Spec.GatewayType, apiv1.GWCentralizedType) - framework.ExpectConsistOf(strings.Split(subnet.Spec.GatewayNode, ","), gatewayNodes) - framework.ExpectFalse(subnet.Spec.NatOutgoing) - framework.ExpectFalse(subnet.Spec.Private) - framework.ExpectEmpty(subnet.Spec.AllowSubnets) - - ginkgo.By("Validating subnet status fields") - subnet = subnetClient.WaitUntil(subnetName, func(s *apiv1.Subnet) (bool, error) { - return gomega.ContainElement(s.Status.ActivateGateway).Match(gatewayNodes) - }, fmt.Sprintf("field .status.activateGateway is within %v", gatewayNodes), - 2*time.Second, time.Minute, - ) - framework.ExpectZero(subnet.Status.V4UsingIPs) - framework.ExpectZero(subnet.Status.V6UsingIPs) - - if cidrV4 == "" { - framework.ExpectZero(subnet.Status.V4AvailableIPs) - } else { - _, ipnet, _ := net.ParseCIDR(cidrV4) - framework.ExpectEqual(subnet.Status.V4AvailableIPs, util.AddressCount(ipnet)-1) - } - if cidrV6 == "" { - framework.ExpectZero(subnet.Status.V6AvailableIPs) - } else { - _, ipnet, _ := net.ParseCIDR(cidrV6) - framework.ExpectEqual(subnet.Status.V6AvailableIPs, util.AddressCount(ipnet)-1) - } - - ginkgo.By("Creating pod " + podName) - cmd := []string{"sh", "-c", "sleep infinity"} - pod := framework.MakePod(namespaceName, podName, nil, nil, f.KubeOVNImage, cmd, nil) - _ = podClient.CreateSync(pod) - - checkNatOutgoingRoutes(f, namespaceName, podName, nodeIPs) - }) - - framework.ConformanceIt("create centralized subnet without enableEcmp", func() { - f.SkipVersionPriorTo(1, 12, "Support for enableEcmp in subnet is introduced in v1.12") - - ginkgo.By("Getting nodes") - nodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - framework.ExpectNotEmpty(nodes.Items) - - ginkgo.By("Creating subnet " + subnetName) - n := min(3, max(1, len(nodes.Items)-1)) - if len(nodes.Items) == 2 { - n = 2 - } - gatewayNodes := make([]string, 0, n) - nodeIPs := make([]string, 0, n) - for i := 0; i < n; i++ { - gatewayNodes = append(gatewayNodes, nodes.Items[i].Name) - nodeIPs = append(nodeIPs, nodes.Items[i].Annotations[util.IPAddressAnnotation]) - } - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, gatewayNodes, []string{namespaceName}) - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Validating subnet finalizers") - f.ValidateFinalizers(subnet) - - ginkgo.By("Validating centralized subnet with active-standby mode") - framework.ExpectFalse(subnet.Spec.EnableEcmp) - framework.ExpectEqual(subnet.Status.ActivateGateway, gatewayNodes[0]) - framework.ExpectConsistOf(strings.Split(subnet.Spec.GatewayNode, ","), gatewayNodes) - - ginkgo.By("Creating pod " + podName) - cmd := []string{"sh", "-c", "sleep infinity"} - pod := framework.MakePod(namespaceName, podName, nil, nil, f.KubeOVNImage, cmd, nil) - _ = podClient.CreateSync(pod) - - var gwIPv4, gwIPv6 string - if f.VersionPriorTo(1, 12) { - gwIPv4, gwIPv6 = util.SplitStringIP(nodes.Items[0].Annotations[util.IPAddressAnnotation]) - } else { - gwIPv4, gwIPv6 = util.GetNodeInternalIP(nodes.Items[0]) - } - checkNatOutgoingRoutes(f, namespaceName, podName, strings.Split(strings.Trim(gwIPv4+","+gwIPv6, ","), ",")) - - ginkgo.By("Change subnet spec field enableEcmp to true") - modifiedSubnet := subnet.DeepCopy() - modifiedSubnet.Spec.EnableEcmp = true - subnet = subnetClient.PatchSync(subnet, modifiedSubnet) - - ginkgo.By("Validating active gateway") - nbctlCmd := fmt.Sprintf("ovn-nbctl --format=csv --data=bare --no-heading --columns=nexthops find logical-router-policy external_ids:subnet=%s", subnetName) - output, _, err := framework.NBExec(nbctlCmd) - framework.ExpectNoError(err) - - lines := strings.Split(string(output), "\n") - nextHops := make([]string, 0, len(lines)) - for _, l := range lines { - if len(strings.TrimSpace(l)) == 0 { - continue - } - nextHops = strings.Fields(l) - } - framework.Logf("subnet policy route nextHops %v, gatewayNode IPs %v", nextHops, nodeIPs) - - check := true - if len(nextHops) < len(nodeIPs) { - framework.Logf("some gateway nodes maybe not ready for subnet %s", subnetName) - check = false - } - - if check { - for _, nodeIP := range nodeIPs { - for _, strIP := range strings.Split(nodeIP, ",") { - if util.CheckProtocol(strIP) != util.CheckProtocol(nextHops[0]) { - continue - } - framework.ExpectContainElement(nextHops, strIP) - } - } - } - }) - - framework.ConformanceIt("should support distributed external egress gateway", func() { - ginkgo.By("Getting nodes") - nodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - framework.ExpectNotEmpty(nodes.Items) - - clusterName, ok := kind.IsKindProvided(nodes.Items[0].Spec.ProviderID) - if !ok { - ginkgo.Skip("external egress gateway spec only runs in clusters created by kind") - } - - ginkgo.By("Getting docker network used by kind") - network, err := docker.NetworkInspect(kind.NetworkName) - framework.ExpectNoError(err) - - ginkgo.By("Determine external egress gateway addresses") - var gatewayV4, gatewayV6 string - for _, config := range network.IPAM.Config { - if config.Subnet != "" { - switch util.CheckProtocol(config.Subnet) { - case apiv1.ProtocolIPv4: - if cidrV4 != "" { - gatewayV4, err = util.LastIP(config.Subnet) - framework.ExpectNoError(err) - } - case apiv1.ProtocolIPv6: - if cidrV6 != "" { - gatewayV6, err = util.LastIP(config.Subnet) - framework.ExpectNoError(err) - } - } - } - } - gateways := make([]string, 0, 2) - if gatewayV4 != "" { - gateways = append(gateways, gatewayV4) - } - if gatewayV6 != "" { - gateways = append(gateways, gatewayV6) - } - - ginkgo.By("Creating subnet " + subnetName) - prPriority := 1000 + rand.IntN(1000) - prTable := 1000 + rand.IntN(1000) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, []string{namespaceName}) - subnet.Spec.ExternalEgressGateway = strings.Join(gateways, ",") - subnet.Spec.PolicyRoutingPriority = uint32(prPriority) - subnet.Spec.PolicyRoutingTableID = uint32(prTable) - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Creating pod " + podName) - cmd := []string{"sh", "-c", "sleep infinity"} - pod := framework.MakePod(namespaceName, podName, nil, nil, f.KubeOVNImage, cmd, nil) - pod = podClient.CreateSync(pod) - - ginkgo.By("Getting kind nodes") - kindNodes, err := kind.ListNodes(clusterName, "") - framework.ExpectNoError(err) - framework.ExpectNotEmpty(kindNodes) - - for _, node := range kindNodes { - ginkgo.By("Getting ip rules in node " + node.Name()) - rules, err := iproute.RuleShow("", node.Exec) - framework.ExpectNoError(err) - - ginkgo.By("Checking ip rules in node " + node.Name()) - podIPs := util.PodIPs(*pod) - for _, rule := range rules { - if rule.Priority == prPriority && - rule.Table == strconv.Itoa(prTable) { - framework.ExpectEqual(pod.Spec.NodeName, node.Name()) - framework.ExpectContainElement(podIPs, rule.Src) - framework.ExpectEqual(rule.SrcLen, 0) - } - } - - if pod.Spec.NodeName != node.Name() { - continue - } - - ginkgo.By("Getting ip routes in node " + node.Name()) - routes, err := iproute.RouteShow(strconv.Itoa(prTable), "", node.Exec) - framework.ExpectNoError(err) - - ginkgo.By("Checking ip routes in node " + node.Name()) - framework.ExpectHaveLen(routes, len(gateways)) - nexthops := make([]string, 0, 2) - for _, route := range routes { - framework.ExpectEqual(route.Dst, "default") - nexthops = append(nexthops, route.Gateway) - } - framework.ExpectConsistOf(nexthops, gateways) - } - }) - - framework.ConformanceIt("should support centralized external egress gateway", func() { - ginkgo.By("Getting nodes") - nodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - framework.ExpectNotEmpty(nodes.Items) - - clusterName, ok := kind.IsKindProvided(nodes.Items[0].Spec.ProviderID) - if !ok { - ginkgo.Skip("external egress gateway spec only runs in clusters created by kind") - } - - ginkgo.By("Getting docker network used by kind") - network, err := docker.NetworkInspect(kind.NetworkName) - framework.ExpectNoError(err) - - ginkgo.By("Determine external egress gateway addresses") - var gatewayV4, gatewayV6 string - for _, config := range network.IPAM.Config { - if config.Subnet != "" { - switch util.CheckProtocol(config.Subnet) { - case apiv1.ProtocolIPv4: - if cidrV4 != "" { - gatewayV4, err = util.LastIP(config.Subnet) - framework.ExpectNoError(err) - } - case apiv1.ProtocolIPv6: - if cidrV6 != "" { - gatewayV6, err = util.LastIP(config.Subnet) - framework.ExpectNoError(err) - } - } - } - } - cidrs := make([]string, 0, 2) - gateways := make([]string, 0, 2) - if gatewayV4 != "" { - cidrs = append(cidrs, cidrV4) - gateways = append(gateways, gatewayV4) - } - if gatewayV6 != "" { - cidrs = append(cidrs, cidrV6) - gateways = append(gateways, gatewayV6) - } - - ginkgo.By("Creating subnet " + subnetName) - n := min(3, max(1, len(nodes.Items)-1)) - gatewayNodes := make([]string, 0, n) - for i := 0; i < n; i++ { - gatewayNodes = append(gatewayNodes, nodes.Items[i].Name) - } - prPriority := 1000 + rand.IntN(1000) - prTable := 1000 + rand.IntN(1000) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, gatewayNodes, []string{namespaceName}) - subnet.Spec.ExternalEgressGateway = strings.Join(gateways, ",") - subnet.Spec.PolicyRoutingPriority = uint32(prPriority) - subnet.Spec.PolicyRoutingTableID = uint32(prTable) - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Getting kind nodes") - kindNodes, err := kind.ListNodes(clusterName, "") - framework.ExpectNoError(err) - framework.ExpectNotEmpty(kindNodes) - - for _, node := range kindNodes { - shouldHavePolicyRoute := slices.Contains(gatewayNodes, node.Name()) - ginkgo.By("Getting ip rules in node " + node.Name()) - rules, err := iproute.RuleShow("", node.Exec) - framework.ExpectNoError(err) - - ginkgo.By("Checking ip rules in node " + node.Name()) - var found int - for _, rule := range rules { - if rule.Priority == prPriority && - rule.Table == strconv.Itoa(prTable) { - framework.ExpectContainElement(cidrs, fmt.Sprintf("%s/%d", rule.Src, rule.SrcLen)) - found++ - } - } - if !shouldHavePolicyRoute { - framework.ExpectZero(found) - continue - } - framework.ExpectEqual(found, len(gateways)) - - ginkgo.By("Getting ip routes in node " + node.Name()) - routes, err := iproute.RouteShow(strconv.Itoa(prTable), "", node.Exec) - framework.ExpectNoError(err) - - ginkgo.By("Checking ip routes in node " + node.Name()) - framework.ExpectHaveLen(routes, len(gateways)) - nexthops := make([]string, 0, 2) - for _, route := range routes { - framework.ExpectEqual(route.Dst, "default") - nexthops = append(nexthops, route.Gateway) - } - framework.ExpectConsistOf(nexthops, gateways) - } - }) - - framework.ConformanceIt("should support subnet AvailableIPRange and UsingIPRange creating pod no specify ip", func() { - f.SkipVersionPriorTo(1, 12, "Support for display AvailableIPRange and UsingIPRange in v1.12") - podCount = 5 - var startIPv4, startIPv6 string - if firstIPv4 != "" { - startIPv4 = util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(firstIPv4), big.NewInt(1))) - } - if firstIPv6 != "" { - startIPv6 = util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(firstIPv6), big.NewInt(1))) - } - - ginkgo.By("Creating subnet " + subnetName) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, nil) - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Creating pod with no specify pod ip") - annotations := map[string]string{ - util.LogicalSwitchAnnotation: subnetName, - } - for i := 1; i <= podCount; i++ { - podName := fmt.Sprintf("%s-%d", podNamePrefix, i) - ginkgo.By("Creating pod " + podName) - pod := framework.MakePod("", podName, nil, annotations, "", nil, nil) - podClient.Create(pod) - } - for i := 1; i <= podCount; i++ { - podName := fmt.Sprintf("%s-%d", podNamePrefix, i) - ginkgo.By("Waiting pod " + podName + " to be running") - podClient.WaitForRunning(podName) - } - - framework.WaitUntil(2*time.Second, 30*time.Second, func(_ context.Context) (bool, error) { - subnet = subnetClient.Get(subnetName) - if cidrV4 != "" { - v4UsingIPEnd := util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(startIPv4), big.NewInt(int64(podCount-1)))) - v4AvailableIPStart := util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(v4UsingIPEnd), big.NewInt(1))) - framework.Logf("V4UsingIPRange: expected %q, current %q", - fmt.Sprintf("%s-%s", startIPv4, v4UsingIPEnd), - subnet.Status.V4UsingIPRange, - ) - framework.Logf("V4AvailableIPRange: expected %q, current %q", - fmt.Sprintf("%s-%s", v4AvailableIPStart, lastIPv4), - subnet.Status.V4AvailableIPRange, - ) - if subnet.Status.V4UsingIPRange != fmt.Sprintf("%s-%s", startIPv4, v4UsingIPEnd) || - subnet.Status.V4AvailableIPRange != fmt.Sprintf("%s-%s", v4AvailableIPStart, lastIPv4) { - return false, nil - } - } - if cidrV6 != "" { - v6UsingIPEnd := util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(startIPv6), big.NewInt(int64(podCount-1)))) - v6AvailableIPStart := util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(v6UsingIPEnd), big.NewInt(1))) - framework.Logf("V6UsingIPRange: expected %q, current %q", - fmt.Sprintf("%s-%s", startIPv6, v6UsingIPEnd), - subnet.Status.V6UsingIPRange, - ) - framework.Logf("V6AvailableIPRange: expected %q, current %q", - fmt.Sprintf("%s-%s", v6AvailableIPStart, lastIPv6), - subnet.Status.V6AvailableIPRange, - ) - if subnet.Status.V6UsingIPRange != fmt.Sprintf("%s-%s", startIPv6, v6UsingIPEnd) || - subnet.Status.V6AvailableIPRange != fmt.Sprintf("%s-%s", v6AvailableIPStart, lastIPv6) { - return false, nil - } - } - return true, nil - }, "") - - for i := 1; i <= podCount; i++ { - podName := fmt.Sprintf("%s-%d", podNamePrefix, i) - ginkgo.By("Deleting pod " + podName) - err := podClient.Delete(podName) - framework.ExpectNoError(err) - } - for i := 1; i <= podCount; i++ { - podName := fmt.Sprintf("%s-%d", podNamePrefix, i) - ginkgo.By("Waiting pod " + podName + " to be deleted") - podClient.WaitForNotFound(podName) - } - - framework.WaitUntil(2*time.Second, 30*time.Second, func(_ context.Context) (bool, error) { - subnet = subnetClient.Get(subnetName) - if cidrV4 != "" { - if subnet.Status.V4UsingIPRange != "" || subnet.Status.V4AvailableIPRange != fmt.Sprintf("%s-%s", startIPv4, lastIPv4) { - return false, nil - } - } - if cidrV6 != "" { - if subnet.Status.V6UsingIPRange != "" || subnet.Status.V6AvailableIPRange != fmt.Sprintf("%s-%s", startIPv6, lastIPv6) { - return false, nil - } - } - return true, nil - }, "") - - if cidrV4 != "" { - framework.ExpectEqual(subnet.Status.V4UsingIPRange, "") - framework.ExpectEqual(subnet.Status.V4AvailableIPRange, fmt.Sprintf("%s-%s", startIPv4, lastIPv4)) - } - - if cidrV6 != "" { - framework.ExpectEqual(subnet.Status.V6UsingIPRange, "") - framework.ExpectEqual(subnet.Status.V6AvailableIPRange, fmt.Sprintf("%s-%s", startIPv6, lastIPv6)) - } - }) - - framework.ConformanceIt("should support subnet AvailableIPRange and UsingIPRange creating pod specify ip", func() { - f.SkipVersionPriorTo(1, 12, "Support for display AvailableIPRange and UsingIPRange in v1.12") - podCount = 5 - var startIPv4, startIPv6, usingIPv4Str, availableIPv4Str, usingIPv6Str, availableIPv6Str string - - if firstIPv4 != "" { - startIPv4 = util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(firstIPv4), big.NewInt(1))) - } - if firstIPv6 != "" { - startIPv6 = util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(firstIPv6), big.NewInt(1))) - } - - ginkgo.By("Creating subnet " + subnetName) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, nil) - subnet = subnetClient.CreateSync(subnet) - ginkgo.By("Creating pod with specify pod ip") - podIPv4s, podIPv6s := createPodsByRandomIPs(podClient, subnetClient, subnetName, podNamePrefix, podCount, startIPv4, startIPv6) - subnet = subnetClient.Get(subnetName) - - if podIPv4s != nil { - usingIPv4Str, availableIPv4Str = calcuIPRangeListStr(podIPv4s, startIPv4, lastIPv4) - framework.ExpectEqual(subnet.Status.V4UsingIPRange, usingIPv4Str) - framework.ExpectEqual(subnet.Status.V4AvailableIPRange, availableIPv4Str) - } - - if podIPv6s != nil { - usingIPv6Str, availableIPv6Str = calcuIPRangeListStr(podIPv6s, startIPv6, lastIPv6) - framework.ExpectEqual(subnet.Status.V6UsingIPRange, usingIPv6Str) - framework.ExpectEqual(subnet.Status.V6AvailableIPRange, availableIPv6Str) - } - - for i := 1; i <= podCount; i++ { - podName := fmt.Sprintf("%s-%d", podNamePrefix, i) - ginkgo.By("Deleting pod " + podName) - err := podClient.Delete(podName) - framework.ExpectNoError(err) - } - for i := 1; i <= podCount; i++ { - podName := fmt.Sprintf("%s-%d", podNamePrefix, i) - ginkgo.By("Waiting pod " + podName + " to be deleted") - podClient.WaitForNotFound(podName) - } - - framework.WaitUntil(2*time.Second, 30*time.Second, func(_ context.Context) (bool, error) { - subnet = subnetClient.Get(subnetName) - if cidrV4 != "" { - if subnet.Status.V4UsingIPRange != "" || subnet.Status.V4AvailableIPRange != fmt.Sprintf("%s-%s", startIPv4, lastIPv4) { - return false, nil - } - } - if cidrV6 != "" { - if subnet.Status.V6UsingIPRange != "" || subnet.Status.V6AvailableIPRange != fmt.Sprintf("%s-%s", startIPv6, lastIPv6) { - return false, nil - } - } - return true, nil - }, "") - - if cidrV4 != "" { - framework.ExpectEqual(subnet.Status.V4UsingIPRange, "") - framework.ExpectEqual(subnet.Status.V4AvailableIPRange, fmt.Sprintf("%s-%s", startIPv4, lastIPv4)) - } - - if cidrV6 != "" { - framework.ExpectEqual(subnet.Status.V6UsingIPRange, "") - framework.ExpectEqual(subnet.Status.V6AvailableIPRange, fmt.Sprintf("%s-%s", startIPv6, lastIPv6)) - } - }) - - framework.ConformanceIt("should support subnet AvailableIPRange and UsingIPRange is correct when restart deployment", func() { - f.SkipVersionPriorTo(1, 12, "Support for display AvailableIPRange and UsingIPRange in v1.12") - - var startIPv4, startIPv6 string - if firstIPv4 != "" { - startIPv4 = util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(firstIPv4), big.NewInt(1))) - } - if firstIPv6 != "" { - startIPv6 = util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(firstIPv6), big.NewInt(1))) - } - - ginkgo.By("Creating subnet " + subnetName) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, nil) - subnet = subnetClient.CreateSync(subnet) - - deployName = "deployment-" + framework.RandomSuffix() - ginkgo.By("Creating deployment " + deployName) - replicas := int64(5) - labels := map[string]string{"app": deployName} - annotations := map[string]string{util.LogicalSwitchAnnotation: subnetName} - deploy := framework.MakeDeployment(deployName, int32(replicas), labels, annotations, "pause", framework.PauseImage, "") - deploy = deployClient.CreateSync(deploy) - - checkFunc := func(usingIPRange, availableIPRange, startIP, lastIP string, count int64) bool { - ginkgo.GinkgoHelper() - - if startIP == "" { - return true - } - - usingIPEnd := util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(startIP), big.NewInt(count-1))) - availableIPStart := util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(usingIPEnd), big.NewInt(1))) - - framework.Logf(`subnet status usingIPRange %q expect "%s-%s"`, usingIPRange, startIP, usingIPEnd) - if usingIPRange != fmt.Sprintf("%s-%s", startIP, usingIPEnd) { - return false - } - framework.Logf(`subnet status availableIPRange %q expect "%s-%s"`, availableIPRange, availableIPStart, lastIP) - return availableIPRange == fmt.Sprintf("%s-%s", availableIPStart, lastIP) - } - - ginkgo.By("Checking subnet status") - framework.WaitUntil(2*time.Second, 30*time.Second, func(_ context.Context) (bool, error) { - subnet = subnetClient.Get(subnetName) - if !checkFunc(subnet.Status.V4UsingIPRange, subnet.Status.V4AvailableIPRange, startIPv4, lastIPv4, replicas) { - return false, nil - } - return checkFunc(subnet.Status.V6UsingIPRange, subnet.Status.V6AvailableIPRange, startIPv6, lastIPv6, replicas), nil - }, "") - - ginkgo.By("Restarting deployment " + deployName) - _ = deployClient.RestartSync(deploy) - - checkFunc2 := func(usingIPRange, availableIPRange, startIP, lastIP string, count int64) bool { - ginkgo.GinkgoHelper() - - if startIP == "" { - return true - } - - expectAvailIPRangeStr := fmt.Sprintf("%s-%s,%s-%s", - startIP, - util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(startIP), big.NewInt(count-1))), - util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(startIP), big.NewInt(2*count))), - lastIP, - ) - expectUsingIPRangeStr := fmt.Sprintf("%s-%s", - util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(startIP), big.NewInt(count))), - util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(startIP), big.NewInt(2*count-1))), - ) - - framework.Logf("subnet status usingIPRange %q expect %q", usingIPRange, expectUsingIPRangeStr) - if usingIPRange != expectUsingIPRangeStr { - return false - } - framework.Logf("subnet status availableIPRange %q expect %q", availableIPRange, expectAvailIPRangeStr) - return availableIPRange == expectAvailIPRangeStr - } - - ginkgo.By("Checking subnet status") - subnet = subnetClient.Get(subnetName) - framework.WaitUntil(2*time.Second, 30*time.Second, func(_ context.Context) (bool, error) { - subnet = subnetClient.Get(subnetName) - if !checkFunc2(subnet.Status.V4UsingIPRange, subnet.Status.V4AvailableIPRange, startIPv4, lastIPv4, replicas) { - return false, nil - } - return checkFunc2(subnet.Status.V6UsingIPRange, subnet.Status.V6AvailableIPRange, startIPv6, lastIPv6, replicas), nil - }, "") - }) - - framework.ConformanceIt("create subnet with enableLb option", func() { - f.SkipVersionPriorTo(1, 12, "Support for enableLb in subnet is introduced in v1.12") - - enableLb := true - ginkgo.By("Creating subnet " + subnetName) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, nil) - subnet.Spec.EnableLb = &enableLb - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Validating subnet finalizers") - f.ValidateFinalizers(subnet) - - ginkgo.By("Validating subnet load-balancer records exist") - cmd := "ovn-nbctl --format=csv --data=bare --no-heading --columns=load_balancer list Logical_Switch " + subnetName - output, _, err := framework.NBExec(cmd) - framework.ExpectNoError(err) - framework.ExpectNotEmpty(strings.TrimSpace(string(output))) - - ginkgo.By("Validating change subnet spec enableLb to false") - enableLb = false - modifiedSubnet := subnet.DeepCopy() - modifiedSubnet.Spec.EnableLb = &enableLb - subnet = subnetClient.PatchSync(subnet, modifiedSubnet) - framework.WaitUntil(2*time.Second, time.Minute, func(_ context.Context) (bool, error) { - if output, _, err = framework.NBExec(cmd); err != nil { - return false, err - } - if strings.TrimSpace(string(output)) == "" { - return true, nil - } - return false, nil - }, fmt.Sprintf("OVN LB record for subnet %s to be empty", subnet.Name)) - - ginkgo.By("Validating empty subnet spec enableLb field, should keep same value as args enableLb") - modifiedSubnet = subnet.DeepCopy() - modifiedSubnet.Spec.EnableLb = nil - subnet = subnetClient.PatchSync(subnet, modifiedSubnet) - framework.WaitUntil(2*time.Second, time.Minute, func(_ context.Context) (bool, error) { - if output, _, err = framework.NBExec(cmd); err != nil { - return false, err - } - if strings.TrimSpace(string(output)) != "" { - return true, nil - } - return false, nil - }, fmt.Sprintf("OVN LB record for subnet %s to sync", subnet.Name)) - }) - - framework.ConformanceIt("should support subnet add gateway event and metrics", func() { - f.SkipVersionPriorTo(1, 12, "Support for subnet add gateway event and metrics is introduced in v1.12") - - ginkgo.By("Creating subnet " + subnetName) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, nil) - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Getting nodes") - nodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - framework.ExpectNotEmpty(nodes.Items) - - for _, node := range nodes.Items { - ginkgo.By("Checking iptables rules on node " + node.Name + " for subnet " + subnetName) - - if cidrV4 != "" { - expectedRules := []string{ - fmt.Sprintf(`-A %s -d %s -m comment --comment "%s,%s"`, "FORWARD", cidrV4, util.OvnSubnetGatewayIptables, subnetName), - fmt.Sprintf(`-A %s -s %s -m comment --comment "%s,%s"`, "FORWARD", cidrV4, util.OvnSubnetGatewayIptables, subnetName), - } - - iptables.CheckIptablesRulesOnNode(f, node.Name, "filter", "FORWARD", apiv1.ProtocolIPv4, expectedRules, true) - } - if cidrV6 != "" { - expectedRules := []string{ - fmt.Sprintf(`-A %s -d %s -m comment --comment "%s,%s"`, "FORWARD", cidrV6, util.OvnSubnetGatewayIptables, subnetName), - fmt.Sprintf(`-A %s -s %s -m comment --comment "%s,%s"`, "FORWARD", cidrV6, util.OvnSubnetGatewayIptables, subnetName), - } - iptables.CheckIptablesRulesOnNode(f, node.Name, "filter", "FORWARD", apiv1.ProtocolIPv6, expectedRules, true) - } - } - - ginkgo.By("Checking subnet gateway type/node change " + subnetName) - - gatewayNodes := make([]string, 0, len(nodes.Items)) - for i := 0; i < 3 && i < max(1, len(nodes.Items)-1); i++ { - gatewayNodes = append(gatewayNodes, nodes.Items[i].Name) - } - modifiedSubnet := subnet.DeepCopy() - modifiedSubnet.Spec.GatewayType = apiv1.GWCentralizedType - modifiedSubnet.Spec.GatewayNode = strings.Join(gatewayNodes, ",") - - subnet = subnetClient.PatchSync(subnet, modifiedSubnet) - eventClient = f.EventClientNS("default") - events := eventClient.WaitToHaveEvent("Subnet", subnetName, "Normal", "SubnetGatewayTypeChanged", "kube-ovn-controller", "") - - message := fmt.Sprintf("subnet gateway type changes from %q to %q", apiv1.GWDistributedType, apiv1.GWCentralizedType) - found := false - for _, event := range events { - if event.Message == message { - found = true - break - } - } - framework.ExpectTrue(found, "no SubnetGatewayTypeChanged event") - found = false - events = eventClient.WaitToHaveEvent("Subnet", subnetName, "Normal", "SubnetGatewayNodeChanged", "kube-ovn-controller", "") - message = fmt.Sprintf("gateway node changes from %q to %q", "", modifiedSubnet.Spec.GatewayNode) - for _, event := range events { - if event.Message == message { - found = true - break - } - } - framework.ExpectTrue(found, "no SubnetGatewayNodeChanged event") - ginkgo.By("when remove subnet the iptables rules will remove") - subnetClient.DeleteSync(subnetName) - - for _, node := range nodes.Items { - ginkgo.By("Checking iptables rules on node " + node.Name + " for subnet " + subnetName) - if cidrV4 != "" { - expectedRules := []string{ - fmt.Sprintf(`-A %s -d %s -m comment --comment "%s,%s"`, "FORWARD", cidrV4, util.OvnSubnetGatewayIptables, subnetName), - fmt.Sprintf(`-A %s -s %s -m comment --comment "%s,%s"`, "FORWARD", cidrV4, util.OvnSubnetGatewayIptables, subnetName), - } - - iptables.CheckIptablesRulesOnNode(f, node.Name, "filter", "FORWARD", apiv1.ProtocolIPv4, expectedRules, false) - } - if cidrV6 != "" { - expectedRules := []string{ - fmt.Sprintf(`-A %s -d %s -m comment --comment "%s,%s"`, "FORWARD", cidrV6, util.OvnSubnetGatewayIptables, subnetName), - fmt.Sprintf(`-A %s -s %s -m comment --comment "%s,%s"`, "FORWARD", cidrV6, util.OvnSubnetGatewayIptables, subnetName), - } - iptables.CheckIptablesRulesOnNode(f, node.Name, "filter", "FORWARD", apiv1.ProtocolIPv6, expectedRules, false) - } - } - }) - - framework.ConformanceIt("should support subnet add nat outgoing policy rules", func() { - f.SkipVersionPriorTo(1, 12, "Support for subnet add nat outgoing policy rules in v1.12") - - ginkgo.By("Creating subnet " + subnetName) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, nil) - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Creating pod " + podName) - annotations := map[string]string{ - util.LogicalSwitchAnnotation: subnetName, - } - - pod := framework.MakePod(namespaceName, podName, nil, annotations, framework.AgnhostImage, nil, nil) - _ = podClient.CreateSync(pod) - - fakeV4Rules := []apiv1.NatOutgoingPolicyRule{ - { - Match: apiv1.NatOutGoingPolicyMatch{ - SrcIPs: "1.1.1.1", - }, - Action: util.NatPolicyRuleActionForward, - }, - { - Match: apiv1.NatOutGoingPolicyMatch{ - SrcIPs: "1.1.1.1", - DstIPs: "199.255.0.0/16", - }, - Action: util.NatPolicyRuleActionNat, - }, - } - - fakeV6Rules := []apiv1.NatOutgoingPolicyRule{ - { - Match: apiv1.NatOutGoingPolicyMatch{ - SrcIPs: "ff0e::1", - }, - Action: util.NatPolicyRuleActionForward, - }, - { - Match: apiv1.NatOutGoingPolicyMatch{ - SrcIPs: "ff0e::1", - DstIPs: "fd12:3456:789a:bcde::/64", - }, - Action: util.NatPolicyRuleActionNat, - }, - } - - subnet = subnetClient.Get(subnetName) - modifiedSubnet := subnet.DeepCopy() - - rules := make([]apiv1.NatOutgoingPolicyRule, 0, 6) - if cidrV4 != "" { - rule := apiv1.NatOutgoingPolicyRule{ - Match: apiv1.NatOutGoingPolicyMatch{ - SrcIPs: cidrV4, - }, - Action: util.NatPolicyRuleActionForward, - } - rules = append(rules, rule) - rules = append(rules, fakeV4Rules...) - } - - if cidrV6 != "" { - rule := apiv1.NatOutgoingPolicyRule{ - Match: apiv1.NatOutGoingPolicyMatch{ - SrcIPs: cidrV6, - }, - Action: util.NatPolicyRuleActionForward, - } - rules = append(rules, rule) - rules = append(rules, fakeV6Rules...) - } - - ginkgo.By("Step1: Creating nat outgoing policy rules for subnet " + subnetName) - modifiedSubnet.Spec.NatOutgoing = true - modifiedSubnet.Spec.NatOutgoingPolicyRules = rules - _ = subnetClient.PatchSync(subnet, modifiedSubnet) - - ginkgo.By("Creating another subnet with the same rules: " + fakeSubnetName) - fakeCidr := framework.RandomCIDR(f.ClusterIPFamily) - fakeCidrV4, fakeCidrV6 := util.SplitStringIP(fakeCidr) - fakeSubnet := framework.MakeSubnet(fakeSubnetName, "", fakeCidr, "", "", "", nil, nil, nil) - fakeSubnet.Spec.NatOutgoingPolicyRules = rules - fakeSubnet.Spec.NatOutgoing = true - _ = subnetClient.CreateSync(fakeSubnet) - - subnet = checkSubnetNatOutgoingPolicyRuleStatus(subnetClient, subnetName, rules) - fakeSubnet = checkSubnetNatOutgoingPolicyRuleStatus(subnetClient, fakeSubnetName, rules) - checkNatPolicyIPsets(f, cs, subnet, cidrV4, cidrV6, true) - checkNatPolicyRules(f, cs, subnet, cidrV4, cidrV6, true) - checkNatPolicyIPsets(f, cs, fakeSubnet, fakeCidrV4, fakeCidrV6, true) - checkNatPolicyRules(f, cs, fakeSubnet, fakeCidrV4, fakeCidrV6, true) - - ginkgo.By("Checking accessible to external") - checkAccessExternal(podName, namespaceName, subnet.Spec.Protocol, false) - - ginkgo.By("Step2: Change nat policy rules action to nat") - rules = make([]apiv1.NatOutgoingPolicyRule, 0, 6) - if cidrV4 != "" { - rule := apiv1.NatOutgoingPolicyRule{ - Match: apiv1.NatOutGoingPolicyMatch{ - SrcIPs: cidrV4, - }, - Action: util.NatPolicyRuleActionNat, - } - rules = append(rules, rule) - rules = append(rules, fakeV4Rules...) - } - - if cidrV6 != "" { - rule := apiv1.NatOutgoingPolicyRule{ - Match: apiv1.NatOutGoingPolicyMatch{ - SrcIPs: cidrV6, - }, - Action: util.NatPolicyRuleActionNat, - } - rules = append(rules, rule) - rules = append(rules, fakeV6Rules...) - } - - subnet = subnetClient.Get(subnetName) - modifiedSubnet = subnet.DeepCopy() - modifiedSubnet.Spec.NatOutgoing = true - modifiedSubnet.Spec.NatOutgoingPolicyRules = rules - _ = subnetClient.PatchSync(subnet, modifiedSubnet) - - cachedSubnet := checkSubnetNatOutgoingPolicyRuleStatus(subnetClient, subnetName, rules) - checkNatPolicyIPsets(f, cs, cachedSubnet, cidrV4, cidrV6, true) - checkNatPolicyRules(f, cs, cachedSubnet, cidrV4, cidrV6, true) - checkNatPolicyIPsets(f, cs, fakeSubnet, fakeCidrV4, fakeCidrV6, true) - checkNatPolicyRules(f, cs, fakeSubnet, fakeCidrV4, fakeCidrV6, true) - - ginkgo.By("Checking accessible to external") - checkAccessExternal(podName, namespaceName, subnet.Spec.Protocol, true) - - ginkgo.By("Step3: When natoutgoing disable, natoutgoing policy rule not work") - subnet = subnetClient.Get(subnetName) - modifiedSubnet = subnet.DeepCopy() - modifiedSubnet.Spec.NatOutgoing = false - _ = subnetClient.PatchSync(subnet, modifiedSubnet) - - _ = checkSubnetNatOutgoingPolicyRuleStatus(subnetClient, subnetName, nil) - checkNatPolicyRules(f, cs, cachedSubnet, cidrV4, cidrV6, false) - checkNatPolicyIPsets(f, cs, cachedSubnet, cidrV4, cidrV6, false) - checkNatPolicyIPsets(f, cs, fakeSubnet, fakeCidrV4, fakeCidrV6, true) - checkNatPolicyRules(f, cs, fakeSubnet, fakeCidrV4, fakeCidrV6, true) - - ginkgo.By("Checking accessible to external") - checkAccessExternal(podName, namespaceName, subnet.Spec.Protocol, false) - - ginkgo.By("Step4: Remove network policy rules") - subnet = subnetClient.Get(subnetName) - modifiedSubnet = subnet.DeepCopy() - modifiedSubnet.Spec.NatOutgoing = true - modifiedSubnet.Spec.NatOutgoingPolicyRules = nil - _ = subnetClient.PatchSync(subnet, modifiedSubnet) - - _ = checkSubnetNatOutgoingPolicyRuleStatus(subnetClient, subnetName, nil) - checkNatPolicyRules(f, cs, cachedSubnet, cidrV4, cidrV6, false) - checkNatPolicyIPsets(f, cs, cachedSubnet, cidrV4, cidrV6, false) - checkNatPolicyIPsets(f, cs, fakeSubnet, fakeCidrV4, fakeCidrV6, true) - checkNatPolicyRules(f, cs, fakeSubnet, fakeCidrV4, fakeCidrV6, true) - - ginkgo.By("Checking accessible to external") - checkAccessExternal(podName, namespaceName, subnet.Spec.Protocol, true) - - ginkgo.By("Deleting subnet " + fakeSubnetName) - subnetClient.DeleteSync(fakeSubnetName) - checkNatPolicyRules(f, cs, fakeSubnet, fakeCidrV4, fakeCidrV6, false) - checkNatPolicyIPsets(f, cs, fakeSubnet, fakeCidrV4, fakeCidrV6, false) - }) - - framework.ConformanceIt("should support customize mtu of all pods in subnet", func() { - ginkgo.By("Creating subnet " + subnetName) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, nil) - subnet.Spec.Mtu = 1600 - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Creating pod " + podName) - annotations := map[string]string{ - util.LogicalSwitchAnnotation: subnetName, - } - - pod := framework.MakePod(namespaceName, podName, nil, annotations, framework.AgnhostImage, nil, nil) - _ = podClient.CreateSync(pod) - - ginkgo.By("Validating pod MTU") - links, err := iproute.AddressShow("eth0", func(cmd ...string) ([]byte, []byte, error) { - return framework.KubectlExec(namespaceName, podName, cmd...) - }) - framework.ExpectNoError(err) - framework.ExpectHaveLen(links, 1, "should get eth0 information") - framework.ExpectEqual(links[0].Mtu, int(subnet.Spec.Mtu)) - }) -}) - -func checkNatPolicyIPsets(f *framework.Framework, cs clientset.Interface, subnet *apiv1.Subnet, cidrV4, cidrV6 string, shouldExist bool) { - ginkgo.GinkgoHelper() - - ginkgo.By(fmt.Sprintf("Checking nat policy rule ipset existed: %v", shouldExist)) - nodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - framework.ExpectNotEmpty(nodes.Items) - for _, node := range nodes.Items { - var expectedIPsets []string - if cidrV4 != "" && shouldExist { - expectedIPsets = append(expectedIPsets, "ovn40subnets-nat-policy") - } - if cidrV6 != "" && shouldExist { - expectedIPsets = append(expectedIPsets, "ovn60subnets-nat-policy") - } - - for _, natPolicyRule := range subnet.Status.NatOutgoingPolicyRules { - protocol := "" - if natPolicyRule.Match.SrcIPs != "" { - protocol = util.CheckProtocol(strings.Split(natPolicyRule.Match.SrcIPs, ",")[0]) - } else if natPolicyRule.Match.DstIPs != "" { - protocol = util.CheckProtocol(strings.Split(natPolicyRule.Match.DstIPs, ",")[0]) - } - - if protocol == apiv1.ProtocolIPv4 { - if natPolicyRule.Match.SrcIPs != "" { - expectedIPsets = append(expectedIPsets, fmt.Sprintf("ovn40natpr-%s-src", natPolicyRule.RuleID)) - } - if natPolicyRule.Match.DstIPs != "" { - expectedIPsets = append(expectedIPsets, fmt.Sprintf("ovn40natpr-%s-dst", natPolicyRule.RuleID)) - } - } - if protocol == apiv1.ProtocolIPv6 { - if natPolicyRule.Match.SrcIPs != "" { - expectedIPsets = append(expectedIPsets, fmt.Sprintf("ovn60natpr-%s-src", natPolicyRule.RuleID)) - } - if natPolicyRule.Match.DstIPs != "" { - expectedIPsets = append(expectedIPsets, fmt.Sprintf("ovn60natpr-%s-dst", natPolicyRule.RuleID)) - } - } - } - checkIPSetOnNode(f, node.Name, expectedIPsets, shouldExist) - } -} - -func checkNatPolicyRules(f *framework.Framework, cs clientset.Interface, subnet *apiv1.Subnet, cidrV4, cidrV6 string, shouldExist bool) { - ginkgo.GinkgoHelper() - - ginkgo.By(fmt.Sprintf("Checking nat policy rule existed: %v", shouldExist)) - nodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - framework.ExpectNotEmpty(nodes.Items) - - for _, node := range nodes.Items { - var expectV4Rules, expectV6Rules, staticV4Rules, staticV6Rules []string - if cidrV4 != "" { - staticV4Rules = append(staticV4Rules, "-A OVN-POSTROUTING -m set --match-set ovn40subnets-nat-policy src -m set ! --match-set ovn40subnets dst -j OVN-NAT-POLICY") - expectV4Rules = append(expectV4Rules, fmt.Sprintf("-A OVN-NAT-POLICY -s %s -m comment --comment natPolicySubnet-%s -j OVN-NAT-PSUBNET-%s", cidrV4, subnet.Name, subnet.UID[len(subnet.UID)-12:])) - } - - if cidrV6 != "" { - staticV6Rules = append(staticV6Rules, "-A OVN-POSTROUTING -m set --match-set ovn60subnets-nat-policy src -m set ! --match-set ovn60subnets dst -j OVN-NAT-POLICY") - expectV6Rules = append(expectV6Rules, fmt.Sprintf("-A OVN-NAT-POLICY -s %s -m comment --comment natPolicySubnet-%s -j OVN-NAT-PSUBNET-%s", cidrV6, subnet.Name, subnet.UID[len(subnet.UID)-12:])) - } - - for _, natPolicyRule := range subnet.Status.NatOutgoingPolicyRules { - markCode := "" - if natPolicyRule.Action == util.NatPolicyRuleActionNat { - markCode = "0x90001/0x90001" - } else if natPolicyRule.Action == util.NatPolicyRuleActionForward { - markCode = "0x90002/0x90002" - } - - protocol := "" - if natPolicyRule.Match.SrcIPs != "" { - protocol = util.CheckProtocol(strings.Split(natPolicyRule.Match.SrcIPs, ",")[0]) - } else if natPolicyRule.Match.DstIPs != "" { - protocol = util.CheckProtocol(strings.Split(natPolicyRule.Match.DstIPs, ",")[0]) - } - - var rule string - if protocol == apiv1.ProtocolIPv4 { - rule = fmt.Sprintf("-A OVN-NAT-PSUBNET-%s", util.GetTruncatedUID(string(subnet.UID))) - if natPolicyRule.Match.SrcIPs != "" { - rule += fmt.Sprintf(" -m set --match-set ovn40natpr-%s-src src", natPolicyRule.RuleID) - } - if natPolicyRule.Match.DstIPs != "" { - rule += fmt.Sprintf(" -m set --match-set ovn40natpr-%s-dst dst", natPolicyRule.RuleID) - } - rule += fmt.Sprintf(" -j MARK --set-xmark %s", markCode) - expectV4Rules = append(expectV4Rules, rule) - } - if protocol == apiv1.ProtocolIPv6 { - rule = fmt.Sprintf("-A OVN-NAT-PSUBNET-%s", util.GetTruncatedUID(string(subnet.UID))) - if natPolicyRule.Match.SrcIPs != "" { - rule += fmt.Sprintf(" -m set --match-set ovn60natpr-%s-src src", natPolicyRule.RuleID) - } - if natPolicyRule.Match.DstIPs != "" { - rule += fmt.Sprintf(" -m set --match-set ovn60natpr-%s-dst dst", natPolicyRule.RuleID) - } - rule += fmt.Sprintf(" -j MARK --set-xmark %s", markCode) - expectV6Rules = append(expectV6Rules, rule) - } - } - - if cidrV4 != "" { - iptables.CheckIptablesRulesOnNode(f, node.Name, "nat", "", apiv1.ProtocolIPv4, staticV4Rules, true) - iptables.CheckIptablesRulesOnNode(f, node.Name, "nat", "", apiv1.ProtocolIPv4, expectV4Rules, shouldExist) - } - if cidrV6 != "" { - iptables.CheckIptablesRulesOnNode(f, node.Name, "nat", "", apiv1.ProtocolIPv6, staticV6Rules, true) - iptables.CheckIptablesRulesOnNode(f, node.Name, "nat", "", apiv1.ProtocolIPv6, expectV6Rules, shouldExist) - } - } -} - -func checkAccessExternal(podName, podNamespace, protocol string, expectReachable bool) { - ginkgo.GinkgoHelper() - - ginkgo.By("checking external ip reachable") - if protocol == apiv1.ProtocolIPv4 || protocol == apiv1.ProtocolDual { - externalIP := "114.114.114.114" - isv4ExternalIPReachable := func() bool { - cmd := fmt.Sprintf("ping %s -w 1", externalIP) - output, _ := exec.Command("bash", "-c", cmd).CombinedOutput() - outputStr := string(output) - return strings.Contains(outputStr, "1 received") - } - if isv4ExternalIPReachable() { - cmd := fmt.Sprintf("kubectl exec %s -n %s -- nc -vz -w 5 %s 53", podName, podNamespace, externalIP) - output, _ := exec.Command("bash", "-c", cmd).CombinedOutput() - outputStr := string(output) - framework.ExpectEqual(strings.Contains(outputStr, "succeeded"), expectReachable) - } - } - - if protocol == apiv1.ProtocolIPv6 || protocol == apiv1.ProtocolDual { - externalIP := "2400:3200::1" - isv6ExternalIPReachable := func() bool { - cmd := fmt.Sprintf("ping6 %s -w 1", externalIP) - output, _ := exec.Command("bash", "-c", cmd).CombinedOutput() - outputStr := string(output) - return strings.Contains(outputStr, "1 received") - } - - if isv6ExternalIPReachable() { - cmd := fmt.Sprintf("kubectl exec %s -n %s -- nc -6 -vz -w 5 %s 53", podName, podNamespace, externalIP) - output, _ := exec.Command("bash", "-c", cmd).CombinedOutput() - outputStr := string(output) - framework.ExpectEqual(strings.Contains(outputStr, "succeeded"), expectReachable) - } - } -} - -func createPodsByRandomIPs(podClient *framework.PodClient, subnetClient *framework.SubnetClient, subnetName, podNamePrefix string, podCount int, startIPv4, startIPv6 string) ([]string, []string) { - var allocIP string - var podIPv4s, podIPv6s []string - podv4IP := startIPv4 - podv6IP := startIPv6 - - subnet := subnetClient.Get(subnetName) - for i := 1; i <= podCount; i++ { - step := rand.Int64()%10 + 2 - switch subnet.Spec.Protocol { - case apiv1.ProtocolIPv4: - podv4IP = util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(podv4IP), big.NewInt(step))) - allocIP = podv4IP - case apiv1.ProtocolIPv6: - podv6IP = util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(podv6IP), big.NewInt(step))) - allocIP = podv6IP - default: - podv4IP = util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(podv4IP), big.NewInt(step))) - podv6IP = util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(podv6IP), big.NewInt(step))) - allocIP = fmt.Sprintf("%s,%s", podv4IP, podv6IP) - } - - annotations := map[string]string{ - util.LogicalSwitchAnnotation: subnetName, - fmt.Sprintf(util.IPAddressAnnotationTemplate, subnet.Spec.Provider): allocIP, - } - - podName := fmt.Sprintf("%s-%d", podNamePrefix, i) - pod := framework.MakePod("", podName, nil, annotations, "", nil, nil) - podClient.CreateSync(pod) - - if podv4IP != "" { - podIPv4s = append(podIPv4s, podv4IP) - } - if podv6IP != "" { - podIPv6s = append(podIPv6s, podv6IP) - } - } - - return podIPv4s, podIPv6s -} - -func calcuIPRangeListStr(podIPs []string, startIP, lastIP string) (string, string) { - var usingIPs, availableIPs []string - var usingIPStr, availableIPStr, prePodIP string - - for index, podIP := range podIPs { - usingIPs = append(usingIPs, podIP) - if index == 0 { - availableIPs = append(availableIPs, fmt.Sprintf("%s-%s", startIP, util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(podIP), big.NewInt(-1))))) - } else { - preIP := prePodIP - start := util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(preIP), big.NewInt(1))) - end := util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(podIP), big.NewInt(-1))) - - if start == end { - availableIPs = append(availableIPs, start) - } else { - availableIPs = append(availableIPs, fmt.Sprintf("%s-%s", start, end)) - } - } - prePodIP = podIP - } - - if prePodIP != "" { - availableIPs = append(availableIPs, fmt.Sprintf("%s-%s", util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(prePodIP), big.NewInt(1))), lastIP)) - } - - usingIPStr = strings.Join(usingIPs, ",") - availableIPStr = strings.Join(availableIPs, ",") - framework.Logf("usingIPs is %q", usingIPStr) - framework.Logf("availableIPs is %q", availableIPStr) - return usingIPStr, availableIPStr -} diff --git a/test/e2e/kube-ovn/switch_lb_rule/switch_lb_rule.go b/test/e2e/kube-ovn/switch_lb_rule/switch_lb_rule.go deleted file mode 100644 index e4d51e79fd3..00000000000 --- a/test/e2e/kube-ovn/switch_lb_rule/switch_lb_rule.go +++ /dev/null @@ -1,356 +0,0 @@ -package switch_lb_rule - -import ( - "context" - "fmt" - "strconv" - "time" - - "github.com/onsi/ginkgo/v2" - corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" - - kubeovnv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -func generateSwitchLBRuleName(ruleName string) string { - return "lr-" + ruleName -} - -func generateServiceName(slrName string) string { - return "slr-" + slrName -} - -func generateVpcName(name string) string { - return "vpc-" + name -} - -func generateSubnetName(name string) string { - return "subnet-" + name -} - -func curlSvc(f *framework.Framework, clientPodName, vip string, port int32) { - ginkgo.GinkgoHelper() - cmd := fmt.Sprintf("curl -q -s --connect-timeout 2 --max-time 2 %s", util.JoinHostPort(vip, port)) - ginkgo.By(fmt.Sprintf(`Executing %q in pod %s/%s`, cmd, f.Namespace.Name, clientPodName)) - e2epodoutput.RunHostCmdOrDie(f.Namespace.Name, clientPodName, cmd) -} - -var _ = framework.Describe("[group:slr]", func() { - f := framework.NewDefaultFramework("slr") - - var ( - switchLBRuleClient *framework.SwitchLBRuleClient - endpointsClient *framework.EndpointsClient - serviceClient *framework.ServiceClient - stsClient *framework.StatefulSetClient - podClient *framework.PodClient - subnetClient *framework.SubnetClient - vpcClient *framework.VpcClient - - namespaceName, suffix string - vpcName, subnetName, clientPodName, label string - stsName, stsSvcName string - selSlrName, selSvcName string - epSlrName, epSvcName string - overlaySubnetCidr, vip string - // TODO:// slr support dual-stack - frontPort, selSlrFrontPort, epSlrFrontPort, backendPort int32 - ) - - ginkgo.BeforeEach(func() { - switchLBRuleClient = f.SwitchLBRuleClient() - endpointsClient = f.EndpointClient() - serviceClient = f.ServiceClient() - stsClient = f.StatefulSetClient() - podClient = f.PodClient() - subnetClient = f.SubnetClient() - vpcClient = f.VpcClient() - - suffix = framework.RandomSuffix() - - namespaceName = f.Namespace.Name - selSlrName = "sel-" + generateSwitchLBRuleName(suffix) - selSvcName = generateServiceName(selSlrName) - epSlrName = "ep-" + generateSwitchLBRuleName(suffix) - epSvcName = generateServiceName(epSlrName) - stsName = "sts-" + suffix - stsSvcName = stsName - label = "slr" - clientPodName = "client-" + suffix - subnetName = generateSubnetName(suffix) - vpcName = generateVpcName(suffix) - frontPort = 8090 - selSlrFrontPort = 8091 - epSlrFrontPort = 8092 - backendPort = 80 - vip = "" - overlaySubnetCidr = framework.RandomCIDR(f.ClusterIPFamily) - ginkgo.By("Creating custom vpc") - vpc := framework.MakeVpc(vpcName, "", false, false, []string{namespaceName}) - _ = vpcClient.CreateSync(vpc) - ginkgo.By("Creating custom overlay subnet") - overlaySubnet := framework.MakeSubnet(subnetName, "", overlaySubnetCidr, "", vpcName, "", nil, nil, nil) - _ = subnetClient.CreateSync(overlaySubnet) - annotations := map[string]string{ - util.LogicalSwitchAnnotation: subnetName, - } - ginkgo.By("Creating client pod " + clientPodName) - clientPod := framework.MakePod(namespaceName, clientPodName, nil, annotations, framework.AgnhostImage, nil, nil) - podClient.CreateSync(clientPod) - }) - - ginkgo.AfterEach(func() { - ginkgo.By("Deleting client pod " + clientPodName) - podClient.DeleteSync(clientPodName) - ginkgo.By("Deleting statefulset " + stsName) - stsClient.DeleteSync(stsName) - ginkgo.By("Deleting service " + stsSvcName) - serviceClient.DeleteSync(stsSvcName) - ginkgo.By("Deleting switch-lb-rule " + selSlrName) - switchLBRuleClient.DeleteSync(selSlrName) - ginkgo.By("Deleting switch-lb-rule " + epSlrName) - switchLBRuleClient.DeleteSync(epSlrName) - ginkgo.By("Deleting subnet " + subnetName) - subnetClient.DeleteSync(subnetName) - ginkgo.By("Deleting vpc " + vpcName) - vpcClient.DeleteSync(vpcName) - }) - - framework.ConformanceIt("should access sts and slr svc ok", func() { - f.SkipVersionPriorTo(1, 12, "This feature was introduced in v1.12") - ginkgo.By("1. Creating sts svc with slr") - var ( - clientPod *corev1.Pod - err error - stsSvc, selSvc, epSvc *corev1.Service - selSlrEps, epSlrEps *corev1.Endpoints - ) - replicas := 1 - labels := map[string]string{"app": label} - ginkgo.By("Creating statefulset " + stsName + " with subnet " + subnetName) - sts := framework.MakeStatefulSet(stsName, stsSvcName, int32(replicas), labels, framework.AgnhostImage) - ginkgo.By("Creating sts " + stsName) - sts.Spec.Template.Annotations = map[string]string{ - util.LogicalSwitchAnnotation: subnetName, - } - portStr := strconv.Itoa(80) - webServerCmd := []string{"/agnhost", "netexec", "--http-port", portStr} - sts.Spec.Template.Spec.Containers[0].Command = webServerCmd - _ = stsClient.CreateSync(sts) - ginkgo.By("Creating service " + stsSvcName) - ports := []corev1.ServicePort{{ - Name: "http", - Protocol: corev1.ProtocolTCP, - Port: frontPort, - TargetPort: intstr.FromInt32(80), - }} - selector := map[string]string{"app": label} - annotations := map[string]string{ - util.LogicalSwitchAnnotation: subnetName, - } - stsSvc = framework.MakeService(stsSvcName, corev1.ServiceTypeClusterIP, annotations, selector, ports, corev1.ServiceAffinityNone) - stsSvc = serviceClient.CreateSync(stsSvc, func(s *corev1.Service) (bool, error) { - return len(s.Spec.ClusterIPs) != 0, nil - }, "cluster ips are not empty") - - ginkgo.By("Waiting for sts service " + stsSvcName + " to be ready") - framework.WaitUntil(2*time.Second, time.Minute, func(_ context.Context) (bool, error) { - stsSvc, err = serviceClient.ServiceInterface.Get(context.TODO(), stsSvcName, metav1.GetOptions{}) - if err == nil { - return true, nil - } - if k8serrors.IsNotFound(err) { - return false, nil - } - return false, err - }, fmt.Sprintf("service %s is created", stsSvcName)) - framework.ExpectNotNil(stsSvc) - - ginkgo.By("Get client pod " + clientPodName) - clientPod, err = podClient.Get(context.TODO(), clientPodName, metav1.GetOptions{}) - framework.ExpectNil(err) - framework.ExpectNotNil(clientPod) - ginkgo.By("Checking sts service " + stsSvc.Name) - for _, ip := range stsSvc.Spec.ClusterIPs { - curlSvc(f, clientPodName, ip, frontPort) - } - vip = stsSvc.Spec.ClusterIP - - ginkgo.By("2. Creating switch-lb-rule with selector with lb front vip " + vip) - ginkgo.By("Creating selector SwitchLBRule " + epSlrName) - var ( - selRule *kubeovnv1.SwitchLBRule - slrSlector []string - slrPorts, epPorts []kubeovnv1.SlrPort - sessionAffinity corev1.ServiceAffinity - ) - sessionAffinity = corev1.ServiceAffinityNone - slrPorts = []kubeovnv1.SlrPort{ - { - Name: "http", - Port: selSlrFrontPort, - TargetPort: backendPort, - Protocol: "TCP", - }, - } - slrSlector = []string{fmt.Sprintf("app:%s", label)} - selRule = framework.MakeSwitchLBRule(selSlrName, namespaceName, vip, sessionAffinity, nil, slrSlector, nil, slrPorts) - _ = switchLBRuleClient.Create(selRule) - - ginkgo.By("Waiting for switch-lb-rule " + selSlrName + " to be ready") - framework.WaitUntil(2*time.Second, time.Minute, func(_ context.Context) (bool, error) { - _, err = switchLBRuleClient.SwitchLBRuleInterface.Get(context.TODO(), selSlrName, metav1.GetOptions{}) - if err == nil { - return true, nil - } - if k8serrors.IsNotFound(err) { - return false, nil - } - return false, err - }, fmt.Sprintf("switch-lb-rule %s is created", selSlrName)) - - ginkgo.By("Waiting for headless service " + selSvcName + " to be ready") - framework.WaitUntil(2*time.Second, time.Minute, func(_ context.Context) (bool, error) { - selSvc, err = serviceClient.ServiceInterface.Get(context.TODO(), selSvcName, metav1.GetOptions{}) - if err == nil { - return true, nil - } - if k8serrors.IsNotFound(err) { - return false, nil - } - return false, err - }, fmt.Sprintf("service %s is created", selSvcName)) - framework.ExpectNotNil(selSvc) - - ginkgo.By("Waiting for endpoints " + selSvcName + " to be ready") - framework.WaitUntil(2*time.Second, time.Minute, func(_ context.Context) (bool, error) { - selSlrEps, err = endpointsClient.EndpointsInterface.Get(context.TODO(), selSvcName, metav1.GetOptions{}) - if err == nil { - return true, nil - } - if k8serrors.IsNotFound(err) { - return false, nil - } - return false, err - }, fmt.Sprintf("endpoints %s is created", selSvcName)) - framework.ExpectNotNil(selSlrEps) - - pods := stsClient.GetPods(sts) - framework.ExpectHaveLen(pods.Items, replicas) - - for i, subset := range selSlrEps.Subsets { - var ( - ips []string - tps []int32 - protocols = make(map[int32]string) - ) - - ginkgo.By("Checking endpoint address") - for _, address := range subset.Addresses { - ips = append(ips, address.IP) - } - framework.ExpectContainElement(ips, pods.Items[i].Status.PodIP) - - ginkgo.By("Checking endpoint ports") - for _, port := range subset.Ports { - tps = append(tps, port.Port) - protocols[port.Port] = string(port.Protocol) - } - for _, port := range slrPorts { - framework.ExpectContainElement(tps, port.TargetPort) - framework.ExpectHaveKeyWithValue(protocols, port.TargetPort, port.Protocol) - } - } - - ginkgo.By("Checking selector switch lb service " + selSvc.Name) - curlSvc(f, clientPodName, vip, selSlrFrontPort) - - ginkgo.By("3. Creating switch-lb-rule with endpoints with lb front vip " + vip) - ginkgo.By("Creating endpoint SwitchLBRule " + epSlrName) - sessionAffinity = corev1.ServiceAffinityClientIP - epPorts = []kubeovnv1.SlrPort{ - { - Name: "http", - Port: epSlrFrontPort, - TargetPort: backendPort, - Protocol: "TCP", - }, - } - presetEndpoints := []string{} - for _, pod := range pods.Items { - presetEndpoints = append(presetEndpoints, pod.Status.PodIP) - } - epRule := framework.MakeSwitchLBRule(epSlrName, namespaceName, vip, sessionAffinity, annotations, nil, presetEndpoints, epPorts) - _ = switchLBRuleClient.Create(epRule) - - ginkgo.By("Waiting for switch-lb-rule " + epSlrName + " to be ready") - framework.WaitUntil(2*time.Second, time.Minute, func(_ context.Context) (bool, error) { - _, err := switchLBRuleClient.SwitchLBRuleInterface.Get(context.TODO(), epSlrName, metav1.GetOptions{}) - if err == nil { - return true, nil - } - if k8serrors.IsNotFound(err) { - return false, nil - } - return false, err - }, fmt.Sprintf("switch-lb-rule %s is created", epSlrName)) - - ginkgo.By("Waiting for headless service " + epSvcName + " to be ready") - framework.WaitUntil(2*time.Second, time.Minute, func(_ context.Context) (bool, error) { - epSvc, err = serviceClient.ServiceInterface.Get(context.TODO(), epSvcName, metav1.GetOptions{}) - if err == nil { - return true, nil - } - if k8serrors.IsNotFound(err) { - return false, nil - } - return false, err - }, fmt.Sprintf("service %s is created", epSvcName)) - framework.ExpectNotNil(epSvc) - - ginkgo.By("Waiting for endpoints " + epSvcName + " to be ready") - framework.WaitUntil(2*time.Second, time.Minute, func(_ context.Context) (bool, error) { - epSlrEps, err = endpointsClient.EndpointsInterface.Get(context.TODO(), epSvcName, metav1.GetOptions{}) - if err == nil { - return true, nil - } - if k8serrors.IsNotFound(err) { - return false, nil - } - return false, err - }, fmt.Sprintf("endpoints %s is created", epSvcName)) - framework.ExpectNotNil(epSlrEps) - - for i, subset := range epSlrEps.Subsets { - var ( - ips []string - tps []int32 - protocols = make(map[int32]string) - ) - - ginkgo.By("Checking endpoint address") - for _, address := range subset.Addresses { - ips = append(ips, address.IP) - } - framework.ExpectContainElement(ips, pods.Items[i].Status.PodIP) - - ginkgo.By("Checking endpoint ports") - for _, port := range subset.Ports { - tps = append(tps, port.Port) - protocols[port.Port] = string(port.Protocol) - } - for _, port := range epPorts { - framework.ExpectContainElement(tps, port.TargetPort) - framework.ExpectHaveKeyWithValue(protocols, port.TargetPort, port.Protocol) - } - } - ginkgo.By("Checking endpoint switch lb service " + epSvc.Name) - curlSvc(f, clientPodName, vip, epSlrFrontPort) - }) -}) diff --git a/test/e2e/kube-ovn/underlay/underlay.go b/test/e2e/kube-ovn/underlay/underlay.go deleted file mode 100644 index 5a2573deb73..00000000000 --- a/test/e2e/kube-ovn/underlay/underlay.go +++ /dev/null @@ -1,1000 +0,0 @@ -package underlay - -import ( - "context" - "fmt" - "net" - "strconv" - "strings" - "time" - - dockernetwork "github.com/docker/docker/api/types/network" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" - e2enode "k8s.io/kubernetes/test/e2e/framework/node" - e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" - - "github.com/onsi/ginkgo/v2" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/ipam" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" - "github.com/kubeovn/kube-ovn/test/e2e/framework/docker" - "github.com/kubeovn/kube-ovn/test/e2e/framework/iproute" - "github.com/kubeovn/kube-ovn/test/e2e/framework/kind" -) - -const ( - dockerNetworkName = "kube-ovn-vlan" - curlListenPort = 8081 -) - -func makeProviderNetwork(providerNetworkName string, exchangeLinkName bool, linkMap map[string]*iproute.Link) *apiv1.ProviderNetwork { - var defaultInterface string - customInterfaces := make(map[string][]string, 0) - for node, link := range linkMap { - if !strings.ContainsRune(node, '-') { - continue - } - - if defaultInterface == "" { - defaultInterface = link.IfName - } else if link.IfName != defaultInterface { - customInterfaces[link.IfName] = append(customInterfaces[link.IfName], node) - } - } - - return framework.MakeProviderNetwork(providerNetworkName, exchangeLinkName, defaultInterface, customInterfaces, nil) -} - -func waitSubnetStatusUpdate(subnetName string, subnetClient *framework.SubnetClient, expectedUsingIPs float64) { - ginkgo.GinkgoHelper() - - ginkgo.By("Waiting for using ips count of subnet " + subnetName + " to be " + fmt.Sprintf("%.0f", expectedUsingIPs)) - framework.WaitUntil(2*time.Second, 30*time.Second, func(_ context.Context) (bool, error) { - subnet := subnetClient.Get(subnetName) - if (subnet.Status.V4AvailableIPs != 0 && subnet.Status.V4UsingIPs != expectedUsingIPs) || - (subnet.Status.V6AvailableIPs != 0 && subnet.Status.V6UsingIPs != expectedUsingIPs) { - framework.Logf("current subnet status: v4AvailableIPs = %.0f, v4UsingIPs = %.0f, v6AvailableIPs = %.0f, v6UsingIPs = %.0f", - subnet.Status.V4AvailableIPs, subnet.Status.V4UsingIPs, subnet.Status.V6AvailableIPs, subnet.Status.V6UsingIPs) - return false, nil - } - return true, nil - }, "") -} - -func waitSubnetU2OStatus(subnetName string, subnetClient *framework.SubnetClient, enableU2O bool) { - ginkgo.GinkgoHelper() - - framework.WaitUntil(1*time.Second, 3*time.Second, func(_ context.Context) (bool, error) { - ginkgo.By("Waiting for U2OInterconnection status of subnet " + subnetName + " to be " + strconv.FormatBool(enableU2O)) - subnet := subnetClient.Get(subnetName) - if enableU2O { - if subnet.Status.U2OInterconnectionIP != "" && subnet.Status.U2OInterconnectionVPC != "" { - framework.Logf("current enable U2O subnet status: U2OInterconnectionIP = %s, U2OInterconnectionVPC = %s", - subnet.Status.U2OInterconnectionIP, subnet.Status.U2OInterconnectionVPC) - return true, nil - } - ginkgo.By("Keep waiting for U2O to be true: current enable U2O subnet status: U2OInterconnectionIP = " + subnet.Status.U2OInterconnectionIP + ", U2OInterconnectionVPC = " + subnet.Status.U2OInterconnectionVPC) - } else if subnet.Status.U2OInterconnectionIP == "" && subnet.Status.U2OInterconnectionVPC == "" { - return true, nil - } - return false, nil - }, "") -} - -var _ = framework.SerialDescribe("[group:underlay]", func() { - f := framework.NewDefaultFramework("underlay") - - var skip bool - var itFn func(bool) - var cs clientset.Interface - var nodeNames []string - var clusterName, providerNetworkName, vlanName, subnetName, podName, namespaceName string - var vpcName string - var u2oPodNameUnderlay, u2oOverlaySubnetName, u2oPodNameOverlay, u2oOverlaySubnetNameCustomVPC, u2oPodOverlayCustomVPC string - var linkMap map[string]*iproute.Link - var routeMap map[string][]iproute.Route - var eventClient *framework.EventClient - var podClient *framework.PodClient - var subnetClient *framework.SubnetClient - var vpcClient *framework.VpcClient - var vlanClient *framework.VlanClient - var providerNetworkClient *framework.ProviderNetworkClient - var dockerNetwork *dockernetwork.Inspect - var containerID string - - ginkgo.BeforeEach(func() { - cs = f.ClientSet - eventClient = f.EventClient() - podClient = f.PodClient() - subnetClient = f.SubnetClient() - vpcClient = f.VpcClient() - vlanClient = f.VlanClient() - providerNetworkClient = f.ProviderNetworkClient() - namespaceName = f.Namespace.Name - podName = "pod-" + framework.RandomSuffix() - u2oPodNameOverlay = "pod-" + framework.RandomSuffix() - u2oPodNameUnderlay = "pod-" + framework.RandomSuffix() - u2oPodOverlayCustomVPC = "pod-" + framework.RandomSuffix() - subnetName = "subnet-" + framework.RandomSuffix() - u2oOverlaySubnetName = "subnet-" + framework.RandomSuffix() - u2oOverlaySubnetNameCustomVPC = "subnet-" + framework.RandomSuffix() - vlanName = "vlan-" + framework.RandomSuffix() - providerNetworkName = "pn-" + framework.RandomSuffix() - vpcName = "vpc-" + framework.RandomSuffix() - containerID = "" - - if skip { - ginkgo.Skip("underlay spec only runs on kind clusters") - } - - if clusterName == "" { - ginkgo.By("Getting k8s nodes") - k8sNodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - - cluster, ok := kind.IsKindProvided(k8sNodes.Items[0].Spec.ProviderID) - if !ok { - skip = true - ginkgo.Skip("underlay spec only runs on kind clusters") - } - clusterName = cluster - } - - if dockerNetwork == nil { - ginkgo.By("Ensuring docker network " + dockerNetworkName + " exists") - network, err := docker.NetworkCreate(dockerNetworkName, true, true) - framework.ExpectNoError(err, "creating docker network "+dockerNetworkName) - dockerNetwork = network - } - - ginkgo.By("Getting kind nodes") - nodes, err := kind.ListNodes(clusterName, "") - framework.ExpectNoError(err, "getting nodes in kind cluster") - framework.ExpectNotEmpty(nodes) - - ginkgo.By("Connecting nodes to the docker network") - err = kind.NetworkConnect(dockerNetwork.ID, nodes) - framework.ExpectNoError(err, "connecting nodes to network "+dockerNetworkName) - - ginkgo.By("Getting node links that belong to the docker network") - nodes, err = kind.ListNodes(clusterName, "") - framework.ExpectNoError(err, "getting nodes in kind cluster") - linkMap = make(map[string]*iproute.Link, len(nodes)) - routeMap = make(map[string][]iproute.Route, len(nodes)) - nodeNames = make([]string, 0, len(nodes)) - for _, node := range nodes { - links, err := node.ListLinks() - framework.ExpectNoError(err, "failed to list links on node %s: %v", node.Name(), err) - - routes, err := node.ListRoutes(true) - framework.ExpectNoError(err, "failed to list routes on node %s: %v", node.Name(), err) - - for _, link := range links { - if link.Address == node.NetworkSettings.Networks[dockerNetworkName].MacAddress { - linkMap[node.ID] = &link - break - } - } - framework.ExpectHaveKey(linkMap, node.ID) - - link := linkMap[node.ID] - for _, route := range routes { - if route.Dev == link.IfName { - r := iproute.Route{ - Dst: route.Dst, - Gateway: route.Gateway, - Dev: route.Dev, - Flags: route.Flags, - } - routeMap[node.ID] = append(routeMap[node.ID], r) - } - } - framework.ExpectHaveKey(linkMap, node.ID) - - linkMap[node.Name()] = linkMap[node.ID] - routeMap[node.Name()] = routeMap[node.ID] - nodeNames = append(nodeNames, node.Name()) - } - - itFn = func(exchangeLinkName bool) { - ginkgo.GinkgoHelper() - - ginkgo.By("Creating provider network " + providerNetworkName) - pn := makeProviderNetwork(providerNetworkName, exchangeLinkName, linkMap) - pn = providerNetworkClient.CreateSync(pn) - - ginkgo.By("Getting k8s nodes") - k8sNodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - - ginkgo.By("Validating node labels") - for _, node := range k8sNodes.Items { - link := linkMap[node.Name] - framework.ExpectHaveKeyWithValue(node.Labels, fmt.Sprintf(util.ProviderNetworkInterfaceTemplate, providerNetworkName), link.IfName) - framework.ExpectHaveKeyWithValue(node.Labels, fmt.Sprintf(util.ProviderNetworkReadyTemplate, providerNetworkName), "true") - framework.ExpectHaveKeyWithValue(node.Labels, fmt.Sprintf(util.ProviderNetworkMtuTemplate, providerNetworkName), strconv.Itoa(link.Mtu)) - framework.ExpectNotHaveKey(node.Labels, fmt.Sprintf(util.ProviderNetworkExcludeTemplate, providerNetworkName)) - } - - ginkgo.By("Validating provider network status") - framework.ExpectEqual(pn.Status.Ready, true, "field .status.ready should be true") - framework.ExpectConsistOf(pn.Status.ReadyNodes, nodeNames) - framework.ExpectEmpty(pn.Status.Vlans) - - ginkgo.By("Getting kind nodes") - kindNodes, err := kind.ListNodes(clusterName, "") - framework.ExpectNoError(err) - - ginkgo.By("Validating node links") - linkNameMap := make(map[string]string, len(kindNodes)) - bridgeName := util.ExternalBridgeName(providerNetworkName) - for _, node := range kindNodes { - if exchangeLinkName { - bridgeName = linkMap[node.ID].IfName - } - - links, err := node.ListLinks() - framework.ExpectNoError(err, "failed to list links on node %s: %v", node.Name(), err) - - var port, bridge *iproute.Link - for i, link := range links { - if link.IfIndex == linkMap[node.ID].IfIndex { - port = &links[i] - } else if link.IfName == bridgeName { - bridge = &links[i] - } - if port != nil && bridge != nil { - break - } - } - framework.ExpectNotNil(port) - framework.ExpectEqual(port.Address, linkMap[node.ID].Address) - framework.ExpectEqual(port.Mtu, linkMap[node.ID].Mtu) - framework.ExpectEqual(port.Master, "ovs-system") - framework.ExpectEqual(port.OperState, "UP") - if exchangeLinkName { - framework.ExpectEqual(port.IfName, util.ExternalBridgeName(providerNetworkName)) - } - - framework.ExpectNotNil(bridge) - framework.ExpectEqual(bridge.LinkInfo.InfoKind, "openvswitch") - framework.ExpectEqual(bridge.Address, port.Address) - framework.ExpectEqual(bridge.Mtu, port.Mtu) - framework.ExpectEqual(bridge.OperState, "UNKNOWN") - framework.ExpectContainElement(bridge.Flags, "UP") - - framework.ExpectEmpty(port.NonLinkLocalAddresses()) - framework.ExpectConsistOf(bridge.NonLinkLocalAddresses(), linkMap[node.ID].NonLinkLocalAddresses()) - - linkNameMap[node.ID] = port.IfName - } - - ginkgo.By("Validating node routes") - for _, node := range kindNodes { - if exchangeLinkName { - bridgeName = linkMap[node.ID].IfName - } - - routes, err := node.ListRoutes(true) - framework.ExpectNoError(err, "failed to list routes on node %s: %v", node.Name(), err) - - var portRoutes, bridgeRoutes []iproute.Route - for _, route := range routes { - r := iproute.Route{ - Dst: route.Dst, - Gateway: route.Gateway, - Dev: route.Dev, - Flags: route.Flags, - } - if route.Dev == linkNameMap[node.ID] { - portRoutes = append(portRoutes, r) - } else if route.Dev == bridgeName { - r.Dev = linkMap[node.ID].IfName - bridgeRoutes = append(bridgeRoutes, r) - } - } - - framework.ExpectEmpty(portRoutes, "no routes should exists on provider link") - framework.ExpectConsistOf(bridgeRoutes, routeMap[node.ID]) - } - } - }) - ginkgo.AfterEach(func() { - if containerID != "" { - ginkgo.By("Deleting container " + containerID) - err := docker.ContainerRemove(containerID) - framework.ExpectNoError(err) - } - - ginkgo.By("Deleting pod " + podName) - podClient.DeleteSync(podName) - - ginkgo.By("Deleting pod " + u2oPodNameUnderlay) - podClient.DeleteSync(u2oPodNameUnderlay) - - ginkgo.By("Deleting pod " + u2oPodNameOverlay) - podClient.DeleteSync(u2oPodNameOverlay) - - ginkgo.By("Deleting pod " + u2oPodOverlayCustomVPC) - podClient.DeleteSync(u2oPodOverlayCustomVPC) - - ginkgo.By("Deleting subnet " + u2oOverlaySubnetNameCustomVPC) - subnetClient.DeleteSync(u2oOverlaySubnetNameCustomVPC) - - ginkgo.By("Deleting subnet " + u2oOverlaySubnetName) - subnetClient.DeleteSync(u2oOverlaySubnetName) - - ginkgo.By("Deleting subnet " + subnetName) - subnetClient.DeleteSync(subnetName) - - ginkgo.By("Deleting vpc " + vpcName) - vpcClient.DeleteSync(vpcName) - - ginkgo.By("Deleting vlan " + vlanName) - vlanClient.Delete(vlanName, metav1.DeleteOptions{}) - - ginkgo.By("Deleting provider network " + providerNetworkName) - providerNetworkClient.DeleteSync(providerNetworkName) - - ginkgo.By("Getting nodes") - nodes, err := kind.ListNodes(clusterName, "") - framework.ExpectNoError(err, "getting nodes in cluster") - - ginkgo.By("Waiting for ovs bridge to disappear") - deadline := time.Now().Add(time.Minute) - for _, node := range nodes { - err = node.WaitLinkToDisappear(util.ExternalBridgeName(providerNetworkName), 2*time.Second, deadline) - framework.ExpectNoError(err, "timed out waiting for ovs bridge to disappear in node %s", node.Name()) - } - - if dockerNetwork != nil { - ginkgo.By("Disconnecting nodes from the docker network") - err = kind.NetworkDisconnect(dockerNetwork.ID, nodes) - framework.ExpectNoError(err, "disconnecting nodes from network "+dockerNetworkName) - } - }) - - framework.ConformanceIt(`should be able to create provider network`, func() { - itFn(false) - }) - - framework.ConformanceIt(`should exchange link names`, func() { - f.SkipVersionPriorTo(1, 9, "Support for exchanging link names was introduced in v1.9") - - itFn(true) - }) - - framework.ConformanceIt("should keep pod mtu the same with node interface", func() { - ginkgo.By("Creating provider network " + providerNetworkName) - pn := makeProviderNetwork(providerNetworkName, false, linkMap) - _ = providerNetworkClient.CreateSync(pn) - - ginkgo.By("Getting docker network " + dockerNetworkName) - network, err := docker.NetworkInspect(dockerNetworkName) - framework.ExpectNoError(err, "getting docker network "+dockerNetworkName) - - ginkgo.By("Creating vlan " + vlanName) - vlan := framework.MakeVlan(vlanName, providerNetworkName, 0) - _ = vlanClient.Create(vlan) - - ginkgo.By("Creating subnet " + subnetName) - var cidrV4, cidrV6, gatewayV4, gatewayV6 string - for _, config := range dockerNetwork.IPAM.Config { - switch util.CheckProtocol(config.Subnet) { - case apiv1.ProtocolIPv4: - if f.HasIPv4() { - cidrV4 = config.Subnet - gatewayV4 = config.Gateway - } - case apiv1.ProtocolIPv6: - if f.HasIPv6() { - cidrV6 = config.Subnet - gatewayV6 = config.Gateway - } - } - } - cidr := make([]string, 0, 2) - gateway := make([]string, 0, 2) - if f.HasIPv4() { - cidr = append(cidr, cidrV4) - gateway = append(gateway, gatewayV4) - } - if f.HasIPv6() { - cidr = append(cidr, cidrV6) - gateway = append(gateway, gatewayV6) - } - excludeIPs := make([]string, 0, len(network.Containers)*2) - for _, container := range network.Containers { - if container.IPv4Address != "" && f.HasIPv4() { - excludeIPs = append(excludeIPs, strings.Split(container.IPv4Address, "/")[0]) - } - if container.IPv6Address != "" && f.HasIPv6() { - excludeIPs = append(excludeIPs, strings.Split(container.IPv6Address, "/")[0]) - } - } - subnet := framework.MakeSubnet(subnetName, vlanName, strings.Join(cidr, ","), strings.Join(gateway, ","), "", "", excludeIPs, nil, []string{namespaceName}) - _ = subnetClient.CreateSync(subnet) - - ginkgo.By("Creating pod " + podName) - cmd := []string{"sh", "-c", "sleep 600"} - pod := framework.MakePod(namespaceName, podName, nil, nil, f.KubeOVNImage, cmd, nil) - _ = podClient.CreateSync(pod) - - ginkgo.By("Validating pod MTU") - links, err := iproute.AddressShow("eth0", func(cmd ...string) ([]byte, []byte, error) { - return framework.KubectlExec(namespaceName, podName, cmd...) - }) - framework.ExpectNoError(err) - framework.ExpectHaveLen(links, 1, "should get eth0 information") - framework.ExpectEqual(links[0].Mtu, docker.MTU) - }) - - framework.ConformanceIt("should be able to detect IPv4 address conflict", func() { - if !f.IsIPv4() { - ginkgo.Skip("Address conflict detection only supports IPv4") - } - f.SkipVersionPriorTo(1, 9, "Address conflict detection was introduced in v1.9") - - ginkgo.By("Creating provider network " + providerNetworkName) - pn := makeProviderNetwork(providerNetworkName, false, linkMap) - _ = providerNetworkClient.CreateSync(pn) - - ginkgo.By("Getting docker network " + dockerNetworkName) - network, err := docker.NetworkInspect(dockerNetworkName) - framework.ExpectNoError(err, "getting docker network "+dockerNetworkName) - - containerName := "container-" + framework.RandomSuffix() - ginkgo.By("Creating container " + containerName) - cmd := []string{"sh", "-c", "sleep 600"} - containerInfo, err := docker.ContainerCreate(containerName, f.KubeOVNImage, dockerNetworkName, cmd) - framework.ExpectNoError(err) - containerID = containerInfo.ID - - ginkgo.By("Creating vlan " + vlanName) - vlan := framework.MakeVlan(vlanName, providerNetworkName, 0) - _ = vlanClient.Create(vlan) - - ginkgo.By("Creating subnet " + subnetName) - cidr := make([]string, 0, 2) - gateway := make([]string, 0, 2) - for _, config := range dockerNetwork.IPAM.Config { - if util.CheckProtocol(config.Subnet) == apiv1.ProtocolIPv4 { - cidr = append(cidr, config.Subnet) - gateway = append(gateway, config.Gateway) - break - } - } - excludeIPs := make([]string, 0, len(network.Containers)*2) - for _, container := range network.Containers { - if container.IPv4Address != "" { - excludeIPs = append(excludeIPs, strings.Split(container.IPv4Address, "/")[0]) - } - } - subnet := framework.MakeSubnet(subnetName, vlanName, strings.Join(cidr, ","), strings.Join(gateway, ","), "", "", excludeIPs, nil, []string{namespaceName}) - _ = subnetClient.CreateSync(subnet) - - ip := containerInfo.NetworkSettings.Networks[dockerNetworkName].IPAddress - mac := containerInfo.NetworkSettings.Networks[dockerNetworkName].MacAddress - ginkgo.By("Creating pod " + podName + " with IP address " + ip) - annotations := map[string]string{util.IPAddressAnnotation: ip} - pod := framework.MakePod(namespaceName, podName, nil, annotations, f.KubeOVNImage, cmd, nil) - pod.Spec.TerminationGracePeriodSeconds = nil - _ = podClient.Create(pod) - - ginkgo.By("Waiting for pod events") - events := eventClient.WaitToHaveEvent("Pod", podName, "Warning", "FailedCreatePodSandBox", "kubelet", "") - message := fmt.Sprintf("IP address %s has already been used by host with MAC %s", ip, mac) - var found bool - for _, event := range events { - if strings.Contains(event.Message, message) { - found = true - framework.Logf("Found pod event: %s", event.Message) - break - } - } - framework.ExpectTrue(found, "Address conflict should be reported in pod events") - }) - - framework.ConformanceIt("should support underlay to overlay subnet interconnection", func() { - f.SkipVersionPriorTo(1, 9, "This feature was introduced in v1.9") - - ginkgo.By("Creating provider network " + providerNetworkName) - pn := makeProviderNetwork(providerNetworkName, false, linkMap) - _ = providerNetworkClient.CreateSync(pn) - - ginkgo.By("Getting docker network " + dockerNetworkName) - network, err := docker.NetworkInspect(dockerNetworkName) - framework.ExpectNoError(err, "getting docker network "+dockerNetworkName) - - ginkgo.By("Creating vlan " + vlanName) - vlan := framework.MakeVlan(vlanName, providerNetworkName, 0) - _ = vlanClient.Create(vlan) - - ginkgo.By("Creating underlay subnet " + subnetName) - var cidrV4, cidrV6, gatewayV4, gatewayV6 string - for _, config := range dockerNetwork.IPAM.Config { - switch util.CheckProtocol(config.Subnet) { - case apiv1.ProtocolIPv4: - if f.HasIPv4() { - cidrV4 = config.Subnet - gatewayV4 = config.Gateway - } - case apiv1.ProtocolIPv6: - if f.HasIPv6() { - cidrV6 = config.Subnet - gatewayV6 = config.Gateway - } - } - } - underlayCidr := make([]string, 0, 2) - gateway := make([]string, 0, 2) - if f.HasIPv4() { - underlayCidr = append(underlayCidr, cidrV4) - gateway = append(gateway, gatewayV4) - } - if f.HasIPv6() { - underlayCidr = append(underlayCidr, cidrV6) - gateway = append(gateway, gatewayV6) - } - - excludeIPs := make([]string, 0, len(network.Containers)*2) - for _, container := range network.Containers { - if container.IPv4Address != "" && f.HasIPv4() { - excludeIPs = append(excludeIPs, strings.Split(container.IPv4Address, "/")[0]) - } - if container.IPv6Address != "" && f.HasIPv6() { - excludeIPs = append(excludeIPs, strings.Split(container.IPv6Address, "/")[0]) - } - } - - ginkgo.By("Creating underlay subnet " + subnetName) - subnet := framework.MakeSubnet(subnetName, vlanName, strings.Join(underlayCidr, ","), strings.Join(gateway, ","), "", "", excludeIPs, nil, []string{namespaceName}) - subnet.Spec.U2OInterconnection = true - // only ipv4 needs to verify that the gateway address is consistent with U2OInterconnectionIP when enabling DHCP and U2O - if f.HasIPv4() { - subnet.Spec.EnableDHCP = true - } - _ = subnetClient.CreateSync(subnet) - - ginkgo.By("Creating underlay pod " + u2oPodNameUnderlay) - annotations := map[string]string{ - util.LogicalSwitchAnnotation: subnetName, - } - args := []string{"netexec", "--http-port", strconv.Itoa(curlListenPort)} - originUnderlayPod := framework.MakePod(namespaceName, u2oPodNameUnderlay, nil, annotations, framework.AgnhostImage, nil, args) - underlayPod := podClient.CreateSync(originUnderlayPod) - waitSubnetStatusUpdate(subnetName, subnetClient, 2) - - ginkgo.By("Creating overlay subnet " + u2oOverlaySubnetName) - cidr := framework.RandomCIDR(f.ClusterIPFamily) - overlaySubnet := framework.MakeSubnet(u2oOverlaySubnetName, "", cidr, "", "", "", nil, nil, nil) - overlaySubnet = subnetClient.CreateSync(overlaySubnet) - - ginkgo.By("Creating overlay pod " + u2oPodNameOverlay) - overlayAnnotations := map[string]string{ - util.LogicalSwitchAnnotation: overlaySubnet.Name, - } - args = []string{"netexec", "--http-port", strconv.Itoa(curlListenPort)} - overlayPod := framework.MakePod(namespaceName, u2oPodNameOverlay, nil, overlayAnnotations, framework.AgnhostImage, nil, args) - overlayPod = podClient.CreateSync(overlayPod) - - ginkgo.By("step1: Enable u2o check") - subnet = subnetClient.Get(subnetName) - ginkgo.By("1. waiting for U2OInterconnection status of subnet " + subnetName + " to be true") - waitSubnetU2OStatus(subnetName, subnetClient, true) - checkU2OItems(f, subnet, underlayPod, overlayPod, false) - - ginkgo.By("step2: Disable u2o check") - - ginkgo.By("Deleting underlay pod " + u2oPodNameUnderlay) - podClient.DeleteSync(u2oPodNameUnderlay) - - ginkgo.By("Turning off U2OInterconnection of subnet " + subnetName) - subnet = subnetClient.Get(subnetName) - modifiedSubnet := subnet.DeepCopy() - modifiedSubnet.Spec.U2OInterconnection = false - subnetClient.PatchSync(subnet, modifiedSubnet) - - ginkgo.By("Creating underlay pod " + u2oPodNameUnderlay) - underlayPod = podClient.CreateSync(originUnderlayPod) - waitSubnetStatusUpdate(subnetName, subnetClient, 1) - - subnet = subnetClient.Get(subnetName) - ginkgo.By("2. waiting for U2OInterconnection status of subnet " + subnetName + " to be false") - waitSubnetU2OStatus(subnetName, subnetClient, false) - checkU2OItems(f, subnet, underlayPod, overlayPod, false) - - ginkgo.By("step3: Recover enable u2o check") - - ginkgo.By("Deleting underlay pod " + u2oPodNameUnderlay) - podClient.DeleteSync(u2oPodNameUnderlay) - - ginkgo.By("Turning on U2OInterconnection of subnet " + subnetName) - subnet = subnetClient.Get(subnetName) - modifiedSubnet = subnet.DeepCopy() - modifiedSubnet.Spec.U2OInterconnection = true - subnetClient.PatchSync(subnet, modifiedSubnet) - - ginkgo.By("Creating underlay pod " + u2oPodNameUnderlay) - underlayPod = podClient.CreateSync(originUnderlayPod) - waitSubnetStatusUpdate(subnetName, subnetClient, 2) - - subnet = subnetClient.Get(subnetName) - ginkgo.By("3. waiting for U2OInterconnection status of subnet " + subnetName + " to be true") - waitSubnetU2OStatus(subnetName, subnetClient, true) - checkU2OItems(f, subnet, underlayPod, overlayPod, false) - - ginkgo.By("step4: Check if kube-ovn-controller restart") - - ginkgo.By("Restarting kube-ovn-controller") - deployClient := f.DeploymentClientNS(framework.KubeOvnNamespace) - deploy := deployClient.Get("kube-ovn-controller") - deployClient.RestartSync(deploy) - - subnet = subnetClient.Get(subnetName) - ginkgo.By("4. waiting for U2OInterconnection status of subnet " + subnetName + " to be true") - waitSubnetU2OStatus(subnetName, subnetClient, true) - checkU2OItems(f, subnet, underlayPod, overlayPod, false) - - ginkgo.By("step5: Disable u2o check after restart kube-controller") - - ginkgo.By("Deleting underlay pod " + u2oPodNameUnderlay) - podClient.DeleteSync(u2oPodNameUnderlay) - - ginkgo.By("Turning off U2OInterconnection of subnet " + subnetName) - subnet = subnetClient.Get(subnetName) - modifiedSubnet = subnet.DeepCopy() - modifiedSubnet.Spec.U2OInterconnection = false - subnetClient.PatchSync(subnet, modifiedSubnet) - - ginkgo.By("Creating underlay pod " + u2oPodNameUnderlay) - underlayPod = podClient.CreateSync(originUnderlayPod) - waitSubnetStatusUpdate(subnetName, subnetClient, 1) - - subnet = subnetClient.Get(subnetName) - ginkgo.By("5. waiting for U2OInterconnection status of subnet " + subnetName + " to be false") - waitSubnetU2OStatus(subnetName, subnetClient, false) - checkU2OItems(f, subnet, underlayPod, overlayPod, false) - - ginkgo.By("step6: Recover enable u2o check after restart kube-ovn-controller") - - ginkgo.By("Deleting underlay pod " + u2oPodNameUnderlay) - podClient.DeleteSync(u2oPodNameUnderlay) - - ginkgo.By("Turning on U2OInterconnection of subnet " + subnetName) - subnet = subnetClient.Get(subnetName) - modifiedSubnet = subnet.DeepCopy() - modifiedSubnet.Spec.U2OInterconnection = true - subnetClient.PatchSync(subnet, modifiedSubnet) - - ginkgo.By("Creating underlay pod " + u2oPodNameUnderlay) - underlayPod = podClient.CreateSync(originUnderlayPod) - waitSubnetStatusUpdate(subnetName, subnetClient, 2) - - subnet = subnetClient.Get(subnetName) - ginkgo.By("6. waiting for U2OInterconnection status of subnet " + subnetName + " to be true") - waitSubnetU2OStatus(subnetName, subnetClient, true) - checkU2OItems(f, subnet, underlayPod, overlayPod, false) - - if f.VersionPriorTo(1, 9) { - return - } - - ginkgo.By("step7: Specify u2oInterconnectionIP") - - // change u2o interconnection ip twice - for index := 0; index < 2; index++ { - getAvailableIPs := func(subnet *apiv1.Subnet) string { - var availIPs []string - v4Cidr, v6Cidr := util.SplitStringIP(subnet.Spec.CIDRBlock) - if v4Cidr != "" { - startIP := strings.Split(v4Cidr, "/")[0] - ip, _ := ipam.NewIP(startIP) - availIPs = append(availIPs, ip.Add(100+int64(index)).String()) - } - if v6Cidr != "" { - startIP := strings.Split(v6Cidr, "/")[0] - ip, _ := ipam.NewIP(startIP) - availIPs = append(availIPs, ip.Add(100+int64(index)).String()) - } - return strings.Join(availIPs, ",") - } - - subnet = subnetClient.Get(subnetName) - u2oIP := getAvailableIPs(subnet) - ginkgo.By("Setting U2OInterconnectionIP to " + u2oIP + " for subnet " + subnetName) - modifiedSubnet = subnet.DeepCopy() - modifiedSubnet.Spec.U2OInterconnectionIP = u2oIP - modifiedSubnet.Spec.U2OInterconnection = true - subnetClient.PatchSync(subnet, modifiedSubnet) - - ginkgo.By("Deleting underlay pod " + u2oPodNameUnderlay) - podClient.DeleteSync(u2oPodNameUnderlay) - - ginkgo.By("Creating underlay pod " + u2oPodNameUnderlay) - underlayPod = podClient.CreateSync(originUnderlayPod) - waitSubnetStatusUpdate(subnetName, subnetClient, 2) - - subnet = subnetClient.Get(subnetName) - ginkgo.By("7. waiting for U2OInterconnection status of subnet " + subnetName + " to be true") - waitSubnetU2OStatus(subnetName, subnetClient, true) - checkU2OItems(f, subnet, underlayPod, overlayPod, false) - } - - if f.VersionPriorTo(1, 11) { - return - } - - ginkgo.By("step8: Change underlay subnet interconnection to overlay subnet in custom vpc") - - ginkgo.By("Deleting underlay pod " + u2oPodNameUnderlay) - podClient.DeleteSync(u2oPodNameUnderlay) - - ginkgo.By("Creating VPC " + vpcName) - customVPC := framework.MakeVpc(vpcName, "", false, false, []string{namespaceName}) - vpcClient.CreateSync(customVPC) - - ginkgo.By("Creating subnet " + u2oOverlaySubnetNameCustomVPC) - cidr = framework.RandomCIDR(f.ClusterIPFamily) - overlaySubnetCustomVpc := framework.MakeSubnet(u2oOverlaySubnetNameCustomVPC, "", cidr, "", vpcName, "", nil, nil, []string{namespaceName}) - _ = subnetClient.CreateSync(overlaySubnetCustomVpc) - - ginkgo.By("Creating overlay pod " + u2oPodOverlayCustomVPC) - args = []string{"netexec", "--http-port", strconv.Itoa(curlListenPort)} - u2oPodOverlayCustomVPCAnnotations := map[string]string{ - util.LogicalSwitchAnnotation: u2oOverlaySubnetNameCustomVPC, - } - podOverlayCustomVPC := framework.MakePod(namespaceName, u2oPodOverlayCustomVPC, nil, u2oPodOverlayCustomVPCAnnotations, framework.AgnhostImage, nil, args) - podOverlayCustomVPC = podClient.CreateSync(podOverlayCustomVPC) - - ginkgo.By("Turning on U2OInterconnection and set VPC to " + vpcName + " for subnet " + subnetName) - subnet = subnetClient.Get(subnetName) - modifiedSubnet = subnet.DeepCopy() - modifiedSubnet.Spec.Vpc = vpcName - modifiedSubnet.Spec.U2OInterconnection = true - subnetClient.PatchSync(subnet, modifiedSubnet) - - ginkgo.By("Creating underlay pod " + u2oPodNameUnderlay) - underlayPod = podClient.CreateSync(originUnderlayPod) - waitSubnetStatusUpdate(subnetName, subnetClient, 2) - - subnet = subnetClient.Get(subnetName) - ginkgo.By("8. waiting for U2OInterconnection status of subnet " + subnetName + " to be true") - waitSubnetU2OStatus(subnetName, subnetClient, true) - checkU2OItems(f, subnet, underlayPod, podOverlayCustomVPC, true) - - ginkgo.By("step9: Change underlay subnet interconnection to overlay subnet in default vpc") - - ginkgo.By("Deleting underlay pod " + u2oPodNameUnderlay) - podClient.DeleteSync(u2oPodNameUnderlay) - - ginkgo.By("Setting VPC to " + util.DefaultVpc + " for subnet " + subnetName) - subnet = subnetClient.Get(subnetName) - modifiedSubnet = subnet.DeepCopy() - modifiedSubnet.Spec.Vpc = util.DefaultVpc - modifiedSubnet.Spec.Namespaces = nil - subnetClient.PatchSync(subnet, modifiedSubnet) - - ginkgo.By("Creating underlay pod " + u2oPodNameUnderlay) - underlayPod = podClient.CreateSync(originUnderlayPod) - waitSubnetStatusUpdate(subnetName, subnetClient, 2) - - subnet = subnetClient.Get(subnetName) - ginkgo.By("9. waiting for U2OInterconnection status of subnet " + subnetName + " to be true") - waitSubnetU2OStatus(subnetName, subnetClient, true) - checkU2OItems(f, subnet, underlayPod, overlayPod, false) - - ginkgo.By("step10: Disable u2o") - - ginkgo.By("Deleting underlay pod " + u2oPodNameUnderlay) - podClient.DeleteSync(u2oPodNameUnderlay) - - ginkgo.By("Turning off U2OInterconnection of subnet " + subnetName) - subnet = subnetClient.Get(subnetName) - modifiedSubnet = subnet.DeepCopy() - modifiedSubnet.Spec.U2OInterconnection = false - subnetClient.PatchSync(subnet, modifiedSubnet) - - ginkgo.By("Creating underlay pod " + u2oPodNameUnderlay) - underlayPod = podClient.CreateSync(originUnderlayPod) - waitSubnetStatusUpdate(subnetName, subnetClient, 1) - - subnet = subnetClient.Get(subnetName) - ginkgo.By("10. waiting for U2OInterconnection status of subnet " + subnetName + " to be false") - waitSubnetU2OStatus(subnetName, subnetClient, false) - checkU2OItems(f, subnet, underlayPod, overlayPod, false) - }) -}) - -func checkU2OItems(f *framework.Framework, subnet *apiv1.Subnet, underlayPod, overlayPod *corev1.Pod, isU2OCustomVpc bool) { - ginkgo.GinkgoHelper() - - ginkgo.By("checking subnet's u2o interconnect ip of underlay subnet " + subnet.Name) - if subnet.Spec.U2OInterconnection { - framework.ExpectTrue(subnet.Spec.U2OInterconnection) - framework.ExpectIPInCIDR(subnet.Status.U2OInterconnectionIP, subnet.Spec.CIDRBlock) - if !f.VersionPriorTo(1, 11) { - framework.ExpectEqual(subnet.Status.U2OInterconnectionVPC, subnet.Spec.Vpc) - } - if !f.VersionPriorTo(1, 9) { - if subnet.Spec.U2OInterconnectionIP != "" { - framework.ExpectEqual(subnet.Spec.U2OInterconnectionIP, subnet.Status.U2OInterconnectionIP) - } - } - if f.HasIPv4() && subnet.Spec.EnableDHCP { - if !f.VersionPriorTo(1, 12) { - ginkgo.By("checking u2o dhcp gateway ip of underlay subnet " + subnet.Name) - v4Cidr, _ := util.SplitStringIP(subnet.Spec.CIDRBlock) - v4Gateway, _ := util.SplitStringIP(subnet.Status.U2OInterconnectionIP) - nbctlCmd := fmt.Sprintf("ovn-nbctl --bare --columns=options find Dhcp_Options cidr=%s", v4Cidr) - output, _, err := framework.NBExec(nbctlCmd) - framework.ExpectNoError(err) - framework.ExpectContainElement(strings.Fields(string(output)), "router="+v4Gateway) - } - } - } else { - framework.ExpectFalse(subnet.Spec.U2OInterconnection) - framework.ExpectEmpty(subnet.Status.U2OInterconnectionIP) - if !f.VersionPriorTo(1, 11) { - framework.ExpectEmpty(subnet.Status.U2OInterconnectionVPC) - } - if !f.VersionPriorTo(1, 9) { - framework.ExpectEmpty(subnet.Spec.U2OInterconnectionIP) - } - } - - v4gw, v6gw := util.SplitStringIP(subnet.Spec.Gateway) - underlayCidr := strings.Split(subnet.Spec.CIDRBlock, ",") - for _, cidr := range underlayCidr { - var protocolStr, gw string - if util.CheckProtocol(cidr) == apiv1.ProtocolIPv4 { - protocolStr = "ip4" - gw = v4gw - ginkgo.By("checking subnet's using ips of underlay subnet " + subnet.Name + " " + protocolStr) - if subnet.Spec.U2OInterconnection { - framework.ExpectEqual(int(subnet.Status.V4UsingIPs), 2) - } else { - framework.ExpectEqual(int(subnet.Status.V4UsingIPs), 1) - } - } else { - protocolStr = "ip6" - gw = v6gw - ginkgo.By("checking subnet's using ips of underlay subnet " + subnet.Name + " " + protocolStr) - if subnet.Spec.U2OInterconnection { - framework.ExpectEqual(int(subnet.Status.V6UsingIPs), 2) - } else { - framework.ExpectEqual(int(subnet.Status.V6UsingIPs), 1) - } - } - - asName := strings.ReplaceAll(fmt.Sprintf("%s.u2o_exclude_ip.%s", subnet.Name, protocolStr), "-", ".") - if !isU2OCustomVpc { - ginkgo.By(fmt.Sprintf("checking underlay subnet's policy1 route %s", protocolStr)) - hitPolicyStr := fmt.Sprintf("%d %s.dst == %s allow", util.U2OSubnetPolicyPriority, protocolStr, cidr) - checkPolicy(hitPolicyStr, subnet.Spec.U2OInterconnection, subnet.Spec.Vpc) - - ginkgo.By(fmt.Sprintf("checking underlay subnet's policy2 route %s", protocolStr)) - hitPolicyStr = fmt.Sprintf("%d %s.dst == $%s && %s.src == %s reroute %s", util.SubnetRouterPolicyPriority, protocolStr, asName, protocolStr, cidr, gw) - checkPolicy(hitPolicyStr, subnet.Spec.U2OInterconnection, subnet.Spec.Vpc) - } - - ginkgo.By(fmt.Sprintf("checking underlay subnet's policy3 route %s", protocolStr)) - hitPolicyStr := fmt.Sprintf("%d %s.src == %s reroute %s", util.GatewayRouterPolicyPriority, protocolStr, cidr, gw) - checkPolicy(hitPolicyStr, subnet.Spec.U2OInterconnection, subnet.Spec.Vpc) - } - - ginkgo.By("checking underlay pod's ip route's nexthop equal the u2o interconnection ip") - routes, err := iproute.RouteShow("", "eth0", func(cmd ...string) ([]byte, []byte, error) { - return framework.KubectlExec(underlayPod.Namespace, underlayPod.Name, cmd...) - }) - framework.ExpectNoError(err) - framework.ExpectNotEmpty(routes) - - v4InterconnIP, v6InterconnIP := util.SplitStringIP(subnet.Status.U2OInterconnectionIP) - - isV4DefaultRouteExist := false - isV6DefaultRouteExist := false - for _, route := range routes { - if route.Dst == "default" { - if util.CheckProtocol(route.Gateway) == apiv1.ProtocolIPv4 { - if subnet.Spec.U2OInterconnection { - framework.ExpectEqual(route.Gateway, v4InterconnIP) - } else { - framework.ExpectEqual(route.Gateway, v4gw) - } - isV4DefaultRouteExist = true - } else { - if subnet.Spec.U2OInterconnection { - framework.ExpectEqual(route.Gateway, v6InterconnIP) - } else { - framework.ExpectEqual(route.Gateway, v6gw) - } - isV6DefaultRouteExist = true - } - } - } - - switch { - case subnet.Spec.Protocol == apiv1.ProtocolIPv4: - framework.ExpectTrue(isV4DefaultRouteExist) - case subnet.Spec.Protocol == apiv1.ProtocolIPv6: - framework.ExpectTrue(isV6DefaultRouteExist) - case subnet.Spec.Protocol == apiv1.ProtocolDual: - framework.ExpectTrue(isV4DefaultRouteExist) - framework.ExpectTrue(isV6DefaultRouteExist) - } - - UPodIPs := underlayPod.Status.PodIPs - OPodIPs := overlayPod.Status.PodIPs - var v4UPodIP, v4OPodIP, v6UPodIP, v6OPodIP string - for _, UPodIP := range UPodIPs { - if util.CheckProtocol(UPodIP.IP) == apiv1.ProtocolIPv4 { - v4UPodIP = UPodIP.IP - } else { - v6UPodIP = UPodIP.IP - } - } - for _, OPodIP := range OPodIPs { - if util.CheckProtocol(OPodIP.IP) == apiv1.ProtocolIPv4 { - v4OPodIP = OPodIP.IP - } else { - v6OPodIP = OPodIP.IP - } - } - - if v4UPodIP != "" && v4OPodIP != "" { - ginkgo.By("checking underlay pod access to overlay pod v4") - checkReachable(underlayPod.Name, underlayPod.Namespace, v4UPodIP, v4OPodIP, strconv.Itoa(curlListenPort), subnet.Spec.U2OInterconnection) - - ginkgo.By("checking overlay pod access to underlay pod v4") - checkReachable(overlayPod.Name, overlayPod.Namespace, v4OPodIP, v4UPodIP, strconv.Itoa(curlListenPort), subnet.Spec.U2OInterconnection) - } - - if v6UPodIP != "" && v6OPodIP != "" { - ginkgo.By("checking underlay pod access to overlay pod v6") - checkReachable(underlayPod.Name, underlayPod.Namespace, v6UPodIP, v6OPodIP, strconv.Itoa(curlListenPort), subnet.Spec.U2OInterconnection) - - ginkgo.By("checking overlay pod access to underlay pod v6") - checkReachable(overlayPod.Name, overlayPod.Namespace, v6OPodIP, v6UPodIP, strconv.Itoa(curlListenPort), subnet.Spec.U2OInterconnection) - } -} - -func checkReachable(podName, podNamespace, sourceIP, targetIP, targetPort string, expectReachable bool) { - ginkgo.GinkgoHelper() - - ginkgo.By("checking curl reachable") - cmd := fmt.Sprintf("curl -q -s --connect-timeout 2 --max-time 2 %s/clientip", net.JoinHostPort(targetIP, targetPort)) - output, err := e2epodoutput.RunHostCmd(podNamespace, podName, cmd) - if expectReachable { - framework.ExpectNoError(err) - client, _, err := net.SplitHostPort(strings.TrimSpace(output)) - framework.ExpectNoError(err) - // check packet has not SNAT - framework.ExpectEqual(sourceIP, client) - } else { - framework.ExpectError(err) - } -} - -func checkPolicy(hitPolicyStr string, expectPolicyExist bool, vpcName string) { - ginkgo.GinkgoHelper() - - framework.WaitUntil(time.Second, 10*time.Second, func(_ context.Context) (bool, error) { - cmd := "ovn-nbctl lr-policy-list " + vpcName - output, _, err := framework.NBExec(cmd) - if err != nil { - return false, err - } - outputStr := string(output) - for _, line := range strings.Split(outputStr, "\n") { - if strings.Contains(strings.Join(strings.Fields(line), " "), hitPolicyStr) == expectPolicyExist { - return true, nil - } - } - return false, nil - }, "") -} diff --git a/test/e2e/kubevirt/e2e_test.go b/test/e2e/kubevirt/e2e_test.go deleted file mode 100644 index d951a17a585..00000000000 --- a/test/e2e/kubevirt/e2e_test.go +++ /dev/null @@ -1,335 +0,0 @@ -package kubevirt - -import ( - "context" - "flag" - "fmt" - "strings" - "testing" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/klog/v2" - "k8s.io/kubernetes/test/e2e" - k8sframework "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/config" - v1 "kubevirt.io/api/core/v1" - - "github.com/onsi/ginkgo/v2" - - "github.com/kubeovn/kube-ovn/pkg/ovs" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -const image = "quay.io/kubevirt/cirros-container-disk-demo:latest" - -func init() { - klog.SetOutput(ginkgo.GinkgoWriter) - - // Register flags. - config.CopyFlags(config.Flags, flag.CommandLine) - k8sframework.RegisterCommonFlags(flag.CommandLine) - k8sframework.RegisterClusterFlags(flag.CommandLine) -} - -func TestE2E(t *testing.T) { - k8sframework.AfterReadingAllFlags(&k8sframework.TestContext) - e2e.RunE2ETests(t) -} - -var _ = framework.Describe("[group:kubevirt]", func() { - f := framework.NewDefaultFramework("kubevirt") - - var vmName, subnetName, namespaceName string - var subnetClient *framework.SubnetClient - var podClient *framework.PodClient - var vmClient *framework.VMClient - var ipClient *framework.IPClient - ginkgo.BeforeEach(func() { - f.SkipVersionPriorTo(1, 12, "This feature was introduced in v1.12.") - - namespaceName = f.Namespace.Name - vmName = "vm-" + framework.RandomSuffix() - subnetName = "subnet-" + framework.RandomSuffix() - subnetClient = f.SubnetClient() - podClient = f.PodClientNS(namespaceName) - vmClient = f.VMClientNS(namespaceName) - ipClient = f.IPClient() - - ginkgo.By("Creating vm " + vmName) - vm := framework.MakeVM(vmName, image, "small", true) - _ = vmClient.CreateSync(vm) - }) - ginkgo.AfterEach(func() { - ginkgo.By("Deleting vm " + vmName) - vmClient.DeleteSync(vmName) - - ginkgo.By("Deleting subnet " + subnetName) - subnetClient.DeleteSync(subnetName) - }) - - framework.ConformanceIt("should be able to keep pod ips after vm pod is deleted", func() { - ginkgo.By("Getting pod of vm " + vmName) - labelSelector := fmt.Sprintf("%s=%s", v1.VirtualMachineNameLabel, vmName) - podList, err := podClient.List(context.TODO(), metav1.ListOptions{ - LabelSelector: labelSelector, - }) - framework.ExpectNoError(err) - framework.ExpectHaveLen(podList.Items, 1) - - ginkgo.By("Validating pod annotations") - pod := &podList.Items[0] - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.VMAnnotation, vmName) - ips := pod.Status.PodIPs - - ginkgo.By("Deleting pod " + pod.Name) - podClient.DeleteSync(pod.Name) - - ginkgo.By("Waiting for vm " + vmName + " to be ready") - err = vmClient.WaitToBeReady(vmName, 2*time.Minute) - framework.ExpectNoError(err) - - ginkgo.By("Getting pod of vm " + vmName) - podList, err = podClient.List(context.TODO(), metav1.ListOptions{ - LabelSelector: labelSelector, - }) - framework.ExpectNoError(err) - framework.ExpectHaveLen(podList.Items, 1) - - ginkgo.By("Validating new pod annotations") - pod = &podList.Items[0] - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.VMAnnotation, vmName) - - ginkgo.By("Checking whether pod ips are changed") - framework.ExpectEqual(ips, pod.Status.PodIPs) - }) - - framework.ConformanceIt("should be able to keep pod ips after the vm is restarted", func() { - ginkgo.By("Getting pod of vm " + vmName) - labelSelector := fmt.Sprintf("%s=%s", v1.VirtualMachineNameLabel, vmName) - podList, err := podClient.List(context.TODO(), metav1.ListOptions{ - LabelSelector: labelSelector, - }) - framework.ExpectNoError(err) - framework.ExpectHaveLen(podList.Items, 1) - - ginkgo.By("Validating pod annotations") - pod := &podList.Items[0] - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.VMAnnotation, vmName) - ips := pod.Status.PodIPs - - ginkgo.By("Stopping vm " + vmName) - vmClient.StopSync(vmName) - - portName := ovs.PodNameToPortName(vmName, namespaceName, util.OvnProvider) - ginkgo.By("Check ip resource " + portName) - // the ip should exist after vm is stopped - oldVMIP := ipClient.Get(portName) - framework.ExpectNil(oldVMIP.DeletionTimestamp) - ginkgo.By("Starting vm " + vmName) - vmClient.StartSync(vmName) - - // new ip name is the same as the old one - ginkgo.By("Check ip resource " + portName) - newVMIP := ipClient.Get(portName) - framework.ExpectEqual(oldVMIP.Spec, newVMIP.Spec) - - ginkgo.By("Getting pod of vm " + vmName) - podList, err = podClient.List(context.TODO(), metav1.ListOptions{ - LabelSelector: labelSelector, - }) - framework.ExpectNoError(err) - framework.ExpectHaveLen(podList.Items, 1) - - ginkgo.By("Validating new pod annotations") - pod = &podList.Items[0] - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.VMAnnotation, vmName) - - ginkgo.By("Checking whether pod ips are changed") - framework.ExpectEqual(ips, pod.Status.PodIPs) - }) - - framework.ConformanceIt("should be able to handle vm restart when subnet changes before the vm is stopped", func() { - // create a vm within a namespace, the namespace has no subnet, so the vm use ovn-default subnet - // create a subnet in the namespace later, the vm should use its own subnet - // stop the vm, the vm should delete the vm ip, because of the namespace only has one subnet but not ovn-default - // start the vm, the vm should use the namespace owned subnet - ginkgo.By("Creating subnet " + subnetName) - cidr := framework.RandomCIDR(f.ClusterIPFamily) - subnet := framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, []string{namespaceName}) - _ = subnetClient.CreateSync(subnet) - - ginkgo.By("Getting pod of vm " + vmName) - labelSelector := fmt.Sprintf("%s=%s", v1.VirtualMachineNameLabel, vmName) - podList, err := podClient.List(context.TODO(), metav1.ListOptions{ - LabelSelector: labelSelector, - }) - framework.ExpectNoError(err) - framework.ExpectHaveLen(podList.Items, 1) - - ginkgo.By("Validating pod annotations") - pod := &podList.Items[0] - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.VMAnnotation, vmName) - ips := pod.Status.PodIPs - - ginkgo.By("Stopping vm " + vmName) - vmClient.StopSync(vmName) - - // the ip is deleted - portName := ovs.PodNameToPortName(vmName, namespaceName, util.OvnProvider) - err = ipClient.WaitToDisappear(portName, 2*time.Second, 2*time.Minute) - framework.ExpectNoError(err) - - ginkgo.By("Starting vm " + vmName) - vmClient.StartSync(vmName) - - ginkgo.By("Getting pod of vm " + vmName) - podList, err = podClient.List(context.TODO(), metav1.ListOptions{ - LabelSelector: labelSelector, - }) - framework.ExpectNoError(err) - framework.ExpectHaveLen(podList.Items, 1) - - ginkgo.By("Validating new pod annotations") - pod = &podList.Items[0] - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.VMAnnotation, vmName) - - ginkgo.By("Checking whether pod ips are changed") - framework.ExpectNotEqual(ips, pod.Status.PodIPs) - - ginkgo.By("Checking external-ids of LSP " + portName) - cmd := "ovn-nbctl --format=list --data=bare --no-heading --columns=external_ids list Logical_Switch_Port " + portName - output, _, err := framework.NBExec(cmd) - framework.ExpectNoError(err) - framework.ExpectContainElement(strings.Fields(string(output)), "ls="+subnetName) - }) - - framework.ConformanceIt("should be able to handle vm restart when subnet changes after the vm is stopped", func() { - ginkgo.By("Getting pod of vm " + vmName) - labelSelector := fmt.Sprintf("%s=%s", v1.VirtualMachineNameLabel, vmName) - podList, err := podClient.List(context.TODO(), metav1.ListOptions{ - LabelSelector: labelSelector, - }) - framework.ExpectNoError(err) - framework.ExpectHaveLen(podList.Items, 1) - - ginkgo.By("Validating pod annotations") - pod := &podList.Items[0] - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.VMAnnotation, vmName) - ips := pod.Status.PodIPs - - ginkgo.By("Stopping vm " + vmName) - vmClient.StopSync(vmName) - - ginkgo.By("Creating subnet " + subnetName) - cidr := framework.RandomCIDR(f.ClusterIPFamily) - subnet := framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, []string{namespaceName}) - _ = subnetClient.CreateSync(subnet) - - ginkgo.By("Starting vm " + vmName) - vmClient.StartSync(vmName) - - ginkgo.By("Getting pod of vm " + vmName) - podList, err = podClient.List(context.TODO(), metav1.ListOptions{ - LabelSelector: labelSelector, - }) - framework.ExpectNoError(err) - framework.ExpectHaveLen(podList.Items, 1) - - ginkgo.By("Validating new pod annotations") - pod = &podList.Items[0] - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.VMAnnotation, vmName) - - ginkgo.By("Checking whether pod ips are changed") - framework.ExpectNotEqual(ips, pod.Status.PodIPs) - - portName := ovs.PodNameToPortName(vmName, namespaceName, util.OvnProvider) - ginkgo.By("Checking external-ids of LSP " + portName) - cmd := "ovn-nbctl --format=list --data=bare --no-heading --columns=external_ids list Logical_Switch_Port " + portName - output, _, err := framework.NBExec(cmd) - framework.ExpectNoError(err) - framework.ExpectContainElement(strings.Fields(string(output)), "ls="+subnetName) - }) - - framework.ConformanceIt("restart vm should be able to change vm subnet after deleting the old ip", func() { - // case: test change vm subnet after stop vm and delete old ip - // stop vm, delete the ip. - // create new subnet in the namespace. - // make sure ip changed after vm started - ginkgo.By("Getting pod of vm " + vmName) - labelSelector := fmt.Sprintf("%s=%s", v1.VirtualMachineNameLabel, vmName) - podList, err := podClient.List(context.TODO(), metav1.ListOptions{ - LabelSelector: labelSelector, - }) - framework.ExpectNoError(err) - framework.ExpectHaveLen(podList.Items, 1) - - ginkgo.By("Validating pod annotations") - pod := &podList.Items[0] - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.VMAnnotation, vmName) - ginkgo.By("Stopping vm " + vmName) - vmClient.StopSync(vmName) - - // make sure the vm ip is still exist - portName := ovs.PodNameToPortName(vmName, namespaceName, util.OvnProvider) - oldVMIP := ipClient.Get(portName) - framework.ExpectNotEmpty(oldVMIP.Spec.IPAddress) - ipClient.DeleteSync(portName) - // delete old ip to create the same name ip in other subnet - - ginkgo.By("Creating subnet " + subnetName) - cidr := framework.RandomCIDR(f.ClusterIPFamily) - subnet := framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, []string{namespaceName}) - subnet = subnetClient.CreateSync(subnet) - ginkgo.By("Updating vm " + vmName + " to use new subnet " + subnet.Name) - - // the vm should use the new subnet in the namespace - ginkgo.By("Starting vm " + vmName) - vmClient.StartSync(vmName) - // new ip name is the same as the old one - newVMIP := ipClient.Get(portName) - framework.ExpectNotEmpty(newVMIP.Spec.IPAddress) - - ginkgo.By("Getting pod of vm " + vmName) - podList, err = podClient.List(context.TODO(), metav1.ListOptions{ - LabelSelector: labelSelector, - }) - framework.ExpectNoError(err) - framework.ExpectHaveLen(podList.Items, 1) - - ginkgo.By("Validating new pod annotations") - pod = &podList.Items[0] - framework.ExpectHaveKeyWithValue(pod.Annotations, util.AllocatedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.RoutedAnnotation, "true") - framework.ExpectHaveKeyWithValue(pod.Annotations, util.VMAnnotation, vmName) - framework.ExpectHaveKeyWithValue(pod.Annotations, util.LogicalSwitchAnnotation, subnetName) - - ginkgo.By("Checking whether pod ips are changed") - framework.ExpectNotEqual(newVMIP.Spec.IPAddress, oldVMIP.Spec.IPAddress) - - ginkgo.By("Checking external-ids of LSP " + portName) - cmd := "ovn-nbctl --format=list --data=bare --no-heading --columns=external_ids list Logical_Switch_Port " + portName - output, _, err := framework.NBExec(cmd) - framework.ExpectNoError(err) - framework.ExpectContainElement(strings.Fields(string(output)), "ls="+subnetName) - }) -}) diff --git a/test/e2e/lb-svc/e2e_test.go b/test/e2e/lb-svc/e2e_test.go deleted file mode 100644 index d62fe90ae0a..00000000000 --- a/test/e2e/lb-svc/e2e_test.go +++ /dev/null @@ -1,361 +0,0 @@ -package lb_svc - -import ( - "context" - "flag" - "fmt" - "math/big" - "math/rand/v2" - "net" - "strconv" - "testing" - "time" - - dockernetwork "github.com/docker/docker/api/types/network" - nadv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" - corev1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/klog/v2" - "k8s.io/kubernetes/test/e2e" - k8sframework "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/config" - e2enode "k8s.io/kubernetes/test/e2e/framework/node" - e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" - "k8s.io/utils/ptr" - - "github.com/onsi/ginkgo/v2" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" - "github.com/kubeovn/kube-ovn/test/e2e/framework/docker" - "github.com/kubeovn/kube-ovn/test/e2e/framework/kind" -) - -func init() { - klog.SetOutput(ginkgo.GinkgoWriter) - - // Register flags. - config.CopyFlags(config.Flags, flag.CommandLine) - k8sframework.RegisterCommonFlags(flag.CommandLine) - k8sframework.RegisterClusterFlags(flag.CommandLine) -} - -func TestE2E(t *testing.T) { - k8sframework.AfterReadingAllFlags(&k8sframework.TestContext) - e2e.RunE2ETests(t) -} - -func lbSvcDeploymentName(serviceName string) string { - return "lb-svc-" + serviceName -} - -var _ = framework.SerialDescribe("[group:lb-svc]", func() { - f := framework.NewDefaultFramework("lb-svc") - - var skip bool - var cs clientset.Interface - var podClient *framework.PodClient - var subnetClient *framework.SubnetClient - var serviceClient *framework.ServiceClient - var deploymentClient *framework.DeploymentClient - var nadClient *framework.NetworkAttachmentDefinitionClient - var provider, nadName, clusterName, subnetName, namespaceName, serviceName, deploymentName, serverPodName, clientPodName string - var dockerNetwork *dockernetwork.Inspect - var cidr, gateway string - ginkgo.BeforeEach(func() { - cs = f.ClientSet - podClient = f.PodClient() - subnetClient = f.SubnetClient() - serviceClient = f.ServiceClient() - deploymentClient = f.DeploymentClient() - nadClient = f.NetworkAttachmentDefinitionClient() - namespaceName = f.Namespace.Name - nadName = "nad-" + framework.RandomSuffix() - subnetName = "subnet-" + framework.RandomSuffix() - serviceName = "service-" + framework.RandomSuffix() - serverPodName = "pod-" + framework.RandomSuffix() - clientPodName = "pod-" + framework.RandomSuffix() - deploymentName = lbSvcDeploymentName(serviceName) - - if skip { - ginkgo.Skip("lb svc spec only runs on kind clusters") - } - - if clusterName == "" { - ginkgo.By("Getting k8s nodes") - k8sNodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - - cluster, ok := kind.IsKindProvided(k8sNodes.Items[0].Spec.ProviderID) - if !ok { - skip = true - ginkgo.Skip("lb svc spec only runs on kind clusters") - } - clusterName = cluster - } - - if dockerNetwork == nil { - ginkgo.By("Getting docker network " + kind.NetworkName) - network, err := docker.NetworkInspect(kind.NetworkName) - framework.ExpectNoError(err, "getting docker network "+kind.NetworkName) - dockerNetwork = network - } - - provider = fmt.Sprintf("%s.%s", nadName, namespaceName) - - ginkgo.By("Creating network attachment definition " + nadName) - nad := framework.MakeMacvlanNetworkAttachmentDefinition(nadName, namespaceName, "eth0", "bridge", provider, nil) - nad = nadClient.Create(nad) - framework.Logf("created network attachment definition config:\n%s", nad.Spec.Config) - - ginkgo.By("Creating subnet " + subnetName) - for _, config := range dockerNetwork.IPAM.Config { - if util.CheckProtocol(config.Subnet) == apiv1.ProtocolIPv4 { - cidr = config.Subnet - gateway = config.Gateway - break - } - } - excludeIPs := make([]string, 0, len(dockerNetwork.Containers)) - for _, container := range dockerNetwork.Containers { - if container.IPv4Address != "" { - ip, _, _ := net.ParseCIDR(container.IPv4Address) - excludeIPs = append(excludeIPs, ip.String()) - } - } - subnet := framework.MakeSubnet(subnetName, "", cidr, gateway, "", "", excludeIPs, nil, []string{namespaceName}) - subnet.Spec.Provider = provider - _ = subnetClient.Create(subnet) - }) - ginkgo.AfterEach(func() { - ginkgo.By("Deleting pod " + clientPodName) - podClient.DeleteSync(clientPodName) - - ginkgo.By("Deleting pod " + serverPodName) - podClient.DeleteSync(serverPodName) - - ginkgo.By("Deleting service " + serviceName) - serviceClient.DeleteSync(serviceName) - - ginkgo.By("Deleting deployment " + deploymentName) - deploymentClient.DeleteSync(deploymentName) - - ginkgo.By("Deleting subnet " + subnetName) - subnetClient.DeleteSync(subnetName) - - ginkgo.By("Deleting network attachment definition " + nadName) - nadClient.Delete(nadName) - }) - - framework.ConformanceIt("should allocate dynamic external IP for service", func() { - ginkgo.By("Creating server pod " + serverPodName) - labels := map[string]string{"app": serviceName} - port := 8000 + rand.Int32N(1000) - args := []string{"netexec", "--http-port", strconv.Itoa(int(port))} - serverPod := framework.MakePod(namespaceName, serverPodName, labels, nil, framework.AgnhostImage, nil, args) - _ = podClient.CreateSync(serverPod) - - ginkgo.By("Creating service " + serviceName) - ports := []corev1.ServicePort{{ - Name: "tcp", - Protocol: corev1.ProtocolTCP, - Port: port, - TargetPort: intstr.FromInt32(port), - }} - annotations := map[string]string{ - util.AttachmentProvider: provider, - } - service := framework.MakeService(serviceName, corev1.ServiceTypeLoadBalancer, annotations, labels, ports, corev1.ServiceAffinityNone) - service.Spec.AllocateLoadBalancerNodePorts = ptr.To(false) - service = serviceClient.CreateSync(service, func(s *corev1.Service) (bool, error) { - return len(s.Spec.ClusterIPs) != 0, nil - }, "cluster ips are not empty") - - ginkgo.By("Waiting for LB deployment " + deploymentName + " to be ready") - framework.WaitUntil(2*time.Second, time.Minute, func(ctx context.Context) (bool, error) { - _, err := deploymentClient.DeploymentInterface.Get(ctx, deploymentName, metav1.GetOptions{}) - if err == nil { - return true, nil - } - ginkgo.By("deployment " + deploymentName + " still not ready") - if k8serrors.IsNotFound(err) { - return false, nil - } - return false, err - }, fmt.Sprintf("deployment %s is created", deploymentName)) - deployment := deploymentClient.Get(deploymentName) - err := deploymentClient.WaitToComplete(deployment) - framework.ExpectNoError(err, "deployment failed to complete") - - ginkgo.By("Getting pods for deployment " + deploymentName) - pods, err := deploymentClient.GetPods(deployment) - framework.ExpectNoError(err) - framework.ExpectHaveLen(pods.Items, 1) - - ginkgo.By("Checking LB pod annotations") - pod := &pods.Items[0] - key := fmt.Sprintf(util.AllocatedAnnotationTemplate, provider) - framework.ExpectHaveKeyWithValue(pod.Annotations, key, "true") - cidrKey := fmt.Sprintf(util.CidrAnnotationTemplate, provider) - ipKey := fmt.Sprintf(util.IPAddressAnnotationTemplate, provider) - framework.ExpectHaveKey(pod.Annotations, cidrKey) - framework.ExpectHaveKey(pod.Annotations, ipKey) - lbIP := pod.Annotations[ipKey] - framework.ExpectIPInCIDR(lbIP, pod.Annotations[cidrKey]) - - ginkgo.By("Checking service status") - framework.WaitUntil(2*time.Second, time.Minute, func(_ context.Context) (bool, error) { - service = serviceClient.Get(serviceName) - return len(service.Status.LoadBalancer.Ingress) != 0, nil - }, ".status.loadBalancer.ingress is not empty") - framework.ExpectHaveLen(service.Status.LoadBalancer.Ingress, 1) - framework.ExpectEqual(service.Status.LoadBalancer.Ingress[0].IP, lbIP) - - ginkgo.By("Creating client pod " + clientPodName) - annotations = map[string]string{nadv1.NetworkAttachmentAnnot: fmt.Sprintf("%s/%s", namespaceName, nadName)} - cmd := []string{"sh", "-c", "sleep infinity"} - clientPod := framework.MakePod(namespaceName, clientPodName, nil, annotations, f.KubeOVNImage, cmd, nil) - clientPod = podClient.CreateSync(clientPod) - - ginkgo.By("Checking service connectivity from client pod " + clientPodName) - curlCmd := fmt.Sprintf("curl -q -s --connect-timeout 2 --max-time 2 %s/clientip", util.JoinHostPort(lbIP, port)) - ginkgo.By(fmt.Sprintf(`Executing %q in pod %s/%s`, curlCmd, clientPod.Namespace, clientPod.Name)) - _ = e2epodoutput.RunHostCmdOrDie(clientPod.Namespace, clientPod.Name, curlCmd) - - if f.VersionPriorTo(1, 13) { - return - } - - ginkgo.By("Deleting lb svc pod " + pod.Name) - podClient.DeleteSync(pod.Name) - - ginkgo.By("Waiting for LB deployment " + deploymentName + " to be ready") - err = deploymentClient.WaitToComplete(deployment) - framework.ExpectNoError(err, "deployment failed to complete") - - ginkgo.By("Getting pods for deployment " + deploymentName) - pods, err = deploymentClient.GetPods(deployment) - framework.ExpectNoError(err) - framework.ExpectHaveLen(pods.Items, 1) - lbIP = pods.Items[0].Annotations[ipKey] - - ginkgo.By("Checking service connectivity from client pod " + clientPodName) - curlCmd = fmt.Sprintf("curl -q -s --connect-timeout 2 --max-time 2 %s/clientip", util.JoinHostPort(lbIP, port)) - framework.WaitUntil(2*time.Second, 30*time.Second, func(_ context.Context) (bool, error) { - ginkgo.By(fmt.Sprintf(`Executing %q in pod %s/%s`, curlCmd, clientPod.Namespace, clientPod.Name)) - _, err = e2epodoutput.RunHostCmd(clientPod.Namespace, clientPod.Name, curlCmd) - return err == nil, nil - }, "") - - ginkgo.By("Deleting service " + serviceName) - serviceClient.DeleteSync(serviceName) - - ginkgo.By("Waiting for LB deployment " + deploymentName + " to be deleted automatically") - err = deploymentClient.WaitToDisappear(deploymentName, 2*time.Second, 2*time.Minute) - framework.ExpectNoError(err, "deployment failed to disappear") - }) - - framework.ConformanceIt("should allocate static external IP for service", func() { - ginkgo.By("Creating server pod " + serverPodName) - labels := map[string]string{"app": serviceName} - port := 8000 + rand.Int32N(1000) - args := []string{"netexec", "--http-port", strconv.Itoa(int(port))} - serverPod := framework.MakePod(namespaceName, serverPodName, labels, nil, framework.AgnhostImage, nil, args) - _ = podClient.CreateSync(serverPod) - - ginkgo.By("Creating service " + serviceName) - ports := []corev1.ServicePort{{ - Name: "tcp", - Protocol: corev1.ProtocolTCP, - Port: port, - TargetPort: intstr.FromInt32(port), - }} - annotations := map[string]string{ - util.AttachmentProvider: provider, - } - base := util.IP2BigInt(gateway) - lbIP := util.BigInt2Ip(base.Add(base, big.NewInt(50+rand.Int64N(50)))) - service := framework.MakeService(serviceName, corev1.ServiceTypeLoadBalancer, annotations, labels, ports, corev1.ServiceAffinityNone) - service.Spec.LoadBalancerIP = lbIP - service.Spec.AllocateLoadBalancerNodePorts = ptr.To(false) - _ = serviceClient.Create(service) - - ginkgo.By("Waiting for LB deployment " + deploymentName + " to be ready") - framework.WaitUntil(2*time.Second, time.Minute, func(ctx context.Context) (bool, error) { - _, err := deploymentClient.DeploymentInterface.Get(ctx, deploymentName, metav1.GetOptions{}) - if err == nil { - return true, nil - } - if k8serrors.IsNotFound(err) { - return false, nil - } - return false, err - }, fmt.Sprintf("deployment %s is created", deploymentName)) - deployment := deploymentClient.Get(deploymentName) - err := deploymentClient.WaitToComplete(deployment) - framework.ExpectNoError(err, "deployment failed to complete") - - ginkgo.By("Getting pods for deployment " + deploymentName) - pods, err := deploymentClient.GetPods(deployment) - framework.ExpectNoError(err) - framework.ExpectHaveLen(pods.Items, 1) - - ginkgo.By("Checking LB pod annotations") - pod := &pods.Items[0] - key := fmt.Sprintf(util.AllocatedAnnotationTemplate, provider) - framework.ExpectHaveKeyWithValue(pod.Annotations, key, "true") - ipKey := fmt.Sprintf(util.IPAddressAnnotationTemplate, provider) - framework.ExpectHaveKeyWithValue(pod.Annotations, ipKey, lbIP) - cidrKey := fmt.Sprintf(util.CidrAnnotationTemplate, provider) - framework.ExpectHaveKey(pod.Annotations, cidrKey) - framework.ExpectIPInCIDR(lbIP, pod.Annotations[cidrKey]) - - ginkgo.By("Checking service status") - framework.WaitUntil(2*time.Second, time.Minute, func(_ context.Context) (bool, error) { - service = serviceClient.Get(serviceName) - return len(service.Status.LoadBalancer.Ingress) != 0, nil - }, ".status.loadBalancer.ingress is not empty") - framework.ExpectHaveLen(service.Status.LoadBalancer.Ingress, 1) - framework.ExpectEqual(service.Status.LoadBalancer.Ingress[0].IP, lbIP) - - ginkgo.By("Creating client pod " + clientPodName) - annotations = map[string]string{nadv1.NetworkAttachmentAnnot: fmt.Sprintf("%s/%s", namespaceName, nadName)} - cmd := []string{"sh", "-c", "sleep infinity"} - clientPod := framework.MakePod(namespaceName, clientPodName, nil, annotations, f.KubeOVNImage, cmd, nil) - clientPod = podClient.CreateSync(clientPod) - - ginkgo.By("Checking service connectivity from client pod " + clientPodName) - curlCmd := fmt.Sprintf("curl -q -s --connect-timeout 2 --max-time 2 %s/clientip", util.JoinHostPort(lbIP, port)) - ginkgo.By(fmt.Sprintf(`Executing %q in pod %s/%s`, curlCmd, clientPod.Namespace, clientPod.Name)) - _ = e2epodoutput.RunHostCmdOrDie(clientPod.Namespace, clientPod.Name, curlCmd) - - if f.VersionPriorTo(1, 13) { - return - } - - ginkgo.By("Deleting lb svc pod " + pod.Name) - podClient.DeleteSync(pod.Name) - - ginkgo.By("Waiting for LB deployment " + deploymentName + " to be ready") - err = deploymentClient.WaitToComplete(deployment) - framework.ExpectNoError(err, "deployment failed to complete") - - ginkgo.By("Checking service connectivity from client pod " + clientPodName) - framework.WaitUntil(2*time.Second, 30*time.Second, func(_ context.Context) (bool, error) { - ginkgo.By(fmt.Sprintf(`Executing %q in pod %s/%s`, curlCmd, clientPod.Namespace, clientPod.Name)) - _, err = e2epodoutput.RunHostCmd(clientPod.Namespace, clientPod.Name, curlCmd) - return err == nil, nil - }, "") - - ginkgo.By("Deleting service " + serviceName) - serviceClient.DeleteSync(serviceName) - - ginkgo.By("Waiting for LB deployment " + deploymentName + " to be deleted automatically") - err = deploymentClient.WaitToDisappear(deploymentName, 2*time.Second, 2*time.Minute) - framework.ExpectNoError(err, "deployment failed to disappear") - }) -}) diff --git a/test/e2e/multus/e2e_test.go b/test/e2e/multus/e2e_test.go deleted file mode 100644 index 1da36113a39..00000000000 --- a/test/e2e/multus/e2e_test.go +++ /dev/null @@ -1,434 +0,0 @@ -package multus - -import ( - "flag" - "fmt" - "testing" - - nadv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" - "k8s.io/klog/v2" - "k8s.io/kubernetes/test/e2e" - k8sframework "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/config" - - "github.com/onsi/ginkgo/v2" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/ovs" - "github.com/kubeovn/kube-ovn/pkg/request" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" - "github.com/kubeovn/kube-ovn/test/e2e/framework/iproute" -) - -func init() { - klog.SetOutput(ginkgo.GinkgoWriter) - - // Register flags. - config.CopyFlags(config.Flags, flag.CommandLine) - k8sframework.RegisterCommonFlags(flag.CommandLine) - k8sframework.RegisterClusterFlags(flag.CommandLine) -} - -func TestE2E(t *testing.T) { - k8sframework.AfterReadingAllFlags(&k8sframework.TestContext) - e2e.RunE2ETests(t) -} - -var _ = framework.SerialDescribe("[group:multus]", func() { - f := framework.NewDefaultFramework("multus") - - var ipClient *framework.IPClient - var podClient *framework.PodClient - var subnetClient *framework.SubnetClient - var nadClient *framework.NetworkAttachmentDefinitionClient - var nadName, podName, subnetName, namespaceName, cidr string - var subnet *apiv1.Subnet - ginkgo.BeforeEach(func() { - namespaceName = f.Namespace.Name - nadName = "nad-" + framework.RandomSuffix() - podName = "pod-" + framework.RandomSuffix() - subnetName = "subnet-" + framework.RandomSuffix() - cidr = framework.RandomCIDR(f.ClusterIPFamily) - ipClient = f.IPClient() - podClient = f.PodClient() - subnetClient = f.SubnetClient() - nadClient = f.NetworkAttachmentDefinitionClient() - }) - ginkgo.AfterEach(func() { - ginkgo.By("Deleting pod " + podName) - podClient.DeleteSync(podName) - - ginkgo.By("Deleting subnet " + subnetName) - subnetClient.DeleteSync(subnetName) - - ginkgo.By("Deleting network attachment definition " + nadName) - nadClient.Delete(nadName) - }) - - framework.ConformanceIt("should be able to create attachment interface", func() { - provider := fmt.Sprintf("%s.%s.%s", nadName, namespaceName, util.OvnProvider) - - ginkgo.By("Creating network attachment definition " + nadName) - nad := framework.MakeOVNNetworkAttachmentDefinition(nadName, namespaceName, provider, nil) - nad = nadClient.Create(nad) - framework.Logf("created network attachment definition config:\n%s", nad.Spec.Config) - - ginkgo.By("Creating subnet " + subnetName) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, nil) - subnet.Spec.Provider = provider - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Creating pod " + podName) - annotations := map[string]string{nadv1.NetworkAttachmentAnnot: fmt.Sprintf("%s/%s", nad.Namespace, nad.Name)} - cmd := []string{"sh", "-c", "sleep infinity"} - pod := framework.MakePod(namespaceName, podName, nil, annotations, f.KubeOVNImage, cmd, nil) - pod = podClient.CreateSync(pod) - - ginkgo.By("Validating pod annotations") - framework.ExpectHaveKey(pod.Annotations, nadv1.NetworkStatusAnnot) - framework.Logf("pod network status:\n%s", pod.Annotations[nadv1.NetworkStatusAnnot]) - cidr := pod.Annotations[fmt.Sprintf(util.CidrAnnotationTemplate, provider)] - ip := pod.Annotations[fmt.Sprintf(util.IPAddressAnnotationTemplate, provider)] - gateway := pod.Annotations[fmt.Sprintf(util.GatewayAnnotationTemplate, provider)] - mac := pod.Annotations[fmt.Sprintf(util.MacAddressAnnotationTemplate, provider)] - framework.ExpectIPInCIDR(ip, cidr) - framework.ExpectIPInCIDR(gateway, cidr) - framework.ExpectMAC(mac) - - ipName := ovs.PodNameToPortName(podName, namespaceName, provider) - ginkgo.By("Validating IP resource " + ipName) - ipCR := ipClient.Get(ipName) - framework.ExpectEqual(ipCR.Spec.Subnet, subnetName) - framework.ExpectEqual(ipCR.Spec.PodName, podName) - framework.ExpectEqual(ipCR.Spec.Namespace, namespaceName) - framework.ExpectEqual(ipCR.Spec.NodeName, pod.Spec.NodeName) - framework.ExpectEqual(ipCR.Spec.IPAddress, ip) - framework.ExpectEqual(ipCR.Spec.MacAddress, mac) - ipv4, ipv6 := util.SplitStringIP(ip) - framework.ExpectEqual(ipCR.Spec.V4IPAddress, ipv4) - framework.ExpectEqual(ipCR.Spec.V6IPAddress, ipv6) - framework.ExpectHaveKeyWithValue(ipCR.Labels, subnetName, "") - framework.ExpectHaveKeyWithValue(ipCR.Labels, util.SubnetNameLabel, subnetName) - framework.ExpectHaveKeyWithValue(ipCR.Labels, util.NodeNameLabel, pod.Spec.NodeName) - if !f.VersionPriorTo(1, 13) { - framework.ExpectHaveKeyWithValue(ipCR.Labels, util.IPReservedLabel, "false") - } - - ginkgo.By("Retrieving pod routes") - podRoutes, err := iproute.RouteShow("", "", func(cmd ...string) ([]byte, []byte, error) { - return framework.KubectlExec(namespaceName, podName, cmd...) - }) - framework.ExpectNoError(err) - - ginkgo.By("Validating pod routes") - actualRoutes := make([]request.Route, 0, len(podRoutes)) - for _, r := range podRoutes { - if r.Gateway != "" || r.Dst != "" { - actualRoutes = append(actualRoutes, request.Route{Destination: r.Dst, Gateway: r.Gateway}) - } - } - ipv4CIDR, ipv6CIDR := util.SplitStringIP(pod.Annotations[util.CidrAnnotation]) - ipv4Gateway, ipv6Gateway := util.SplitStringIP(pod.Annotations[util.GatewayAnnotation]) - nadIPv4CIDR, nadIPv6CIDR := util.SplitStringIP(subnet.Spec.CIDRBlock) - nadIPv4Gateway, nadIPv6Gateway := util.SplitStringIP(subnet.Spec.Gateway) - if f.HasIPv4() { - framework.ExpectContainElement(actualRoutes, request.Route{Destination: ipv4CIDR}) - framework.ExpectContainElement(actualRoutes, request.Route{Destination: "default", Gateway: ipv4Gateway}) - framework.ExpectContainElement(actualRoutes, request.Route{Destination: nadIPv4CIDR}) - framework.ExpectNotContainElement(actualRoutes, request.Route{Destination: "default", Gateway: nadIPv4Gateway}) - } - if f.HasIPv6() { - framework.ExpectContainElement(actualRoutes, request.Route{Destination: ipv6CIDR}) - framework.ExpectContainElement(actualRoutes, request.Route{Destination: "default", Gateway: ipv6Gateway}) - framework.ExpectContainElement(actualRoutes, request.Route{Destination: nadIPv6CIDR}) - framework.ExpectNotContainElement(actualRoutes, request.Route{Destination: "default", Gateway: nadIPv6Gateway}) - } - }) - - framework.ConformanceIt("should be able to create attachment interface with custom routes", func() { - provider := fmt.Sprintf("%s.%s.%s", nadName, namespaceName, util.OvnProvider) - - ginkgo.By("Creating subnet " + subnetName) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, nil) - subnet.Spec.Provider = provider - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Constructing network attachment definition config") - var routeDst string - for i := 0; i < 3; i++ { - routeDst = framework.RandomCIDR(f.ClusterIPFamily) - if routeDst != subnet.Spec.CIDRBlock { - break - } - } - framework.ExpectNotEqual(routeDst, subnet.Spec.CIDRBlock) - routeGw := framework.RandomIPs(subnet.Spec.CIDRBlock, "", 1) - nadIPv4Gateway, nadIPv6Gateway := util.SplitStringIP(subnet.Spec.Gateway) - ipv4RouteDst, ipv6RouteDst := util.SplitStringIP(routeDst) - ipv4RouteGw, ipv6RouteGw := util.SplitStringIP(routeGw) - routes := make([]request.Route, 0, 4) - if f.HasIPv4() { - routes = append(routes, request.Route{Gateway: nadIPv4Gateway}) - routes = append(routes, request.Route{Destination: ipv4RouteDst, Gateway: ipv4RouteGw}) - } - if f.HasIPv6() { - routes = append(routes, request.Route{Gateway: nadIPv6Gateway}) - routes = append(routes, request.Route{Destination: ipv6RouteDst, Gateway: ipv6RouteGw}) - } - - ginkgo.By("Creating network attachment definition " + nadName) - nad := framework.MakeOVNNetworkAttachmentDefinition(nadName, namespaceName, provider, routes) - nad = nadClient.Create(nad) - framework.Logf("created network attachment definition config:\n%s", nad.Spec.Config) - - ginkgo.By("Creating pod " + podName) - annotations := map[string]string{nadv1.NetworkAttachmentAnnot: fmt.Sprintf("%s/%s", nad.Namespace, nad.Name)} - cmd := []string{"sh", "-c", "sleep infinity"} - pod := framework.MakePod(namespaceName, podName, nil, annotations, f.KubeOVNImage, cmd, nil) - pod = podClient.CreateSync(pod) - - ginkgo.By("Validating pod annotations") - framework.ExpectHaveKey(pod.Annotations, nadv1.NetworkStatusAnnot) - framework.Logf("pod network status:\n%s", pod.Annotations[nadv1.NetworkStatusAnnot]) - cidr := pod.Annotations[fmt.Sprintf(util.CidrAnnotationTemplate, provider)] - ip := pod.Annotations[fmt.Sprintf(util.IPAddressAnnotationTemplate, provider)] - gateway := pod.Annotations[fmt.Sprintf(util.GatewayAnnotationTemplate, provider)] - mac := pod.Annotations[fmt.Sprintf(util.MacAddressAnnotationTemplate, provider)] - framework.ExpectIPInCIDR(ip, cidr) - framework.ExpectIPInCIDR(gateway, cidr) - framework.ExpectMAC(mac) - - ipName := ovs.PodNameToPortName(podName, namespaceName, provider) - ginkgo.By("Validating IP resource " + ipName) - ipCR := ipClient.Get(ipName) - framework.ExpectEqual(ipCR.Spec.Subnet, subnetName) - framework.ExpectEqual(ipCR.Spec.PodName, podName) - framework.ExpectEqual(ipCR.Spec.Namespace, namespaceName) - framework.ExpectEqual(ipCR.Spec.NodeName, pod.Spec.NodeName) - framework.ExpectEqual(ipCR.Spec.IPAddress, ip) - framework.ExpectEqual(ipCR.Spec.MacAddress, mac) - ipv4, ipv6 := util.SplitStringIP(ip) - framework.ExpectEqual(ipCR.Spec.V4IPAddress, ipv4) - framework.ExpectEqual(ipCR.Spec.V6IPAddress, ipv6) - framework.ExpectHaveKeyWithValue(ipCR.Labels, subnetName, "") - framework.ExpectHaveKeyWithValue(ipCR.Labels, util.SubnetNameLabel, subnetName) - framework.ExpectHaveKeyWithValue(ipCR.Labels, util.NodeNameLabel, pod.Spec.NodeName) - if !f.VersionPriorTo(1, 13) { - framework.ExpectHaveKeyWithValue(ipCR.Labels, util.IPReservedLabel, "false") - } - - ginkgo.By("Retrieving pod routes") - podRoutes, err := iproute.RouteShow("", "", func(cmd ...string) ([]byte, []byte, error) { - return framework.KubectlExec(namespaceName, podName, cmd...) - }) - framework.ExpectNoError(err) - - ginkgo.By("Validating pod routes") - actualRoutes := make([]request.Route, 0, len(podRoutes)) - for _, r := range podRoutes { - if r.Gateway != "" || r.Dst != "" { - actualRoutes = append(actualRoutes, request.Route{Destination: r.Dst, Gateway: r.Gateway}) - } - } - ipv4CIDR, ipv6CIDR := util.SplitStringIP(pod.Annotations[util.CidrAnnotation]) - ipv4Gateway, ipv6Gateway := util.SplitStringIP(pod.Annotations[util.GatewayAnnotation]) - nadIPv4CIDR, nadIPv6CIDR := util.SplitStringIP(subnet.Spec.CIDRBlock) - if f.HasIPv4() { - framework.ExpectContainElement(actualRoutes, request.Route{Destination: ipv4CIDR}) - framework.ExpectNotContainElement(actualRoutes, request.Route{Destination: "default", Gateway: ipv4Gateway}) - framework.ExpectContainElement(actualRoutes, request.Route{Destination: nadIPv4CIDR}) - framework.ExpectContainElement(actualRoutes, request.Route{Destination: "default", Gateway: nadIPv4Gateway}) - framework.ExpectContainElement(actualRoutes, request.Route{Destination: ipv4RouteDst, Gateway: ipv4RouteGw}) - } - if f.HasIPv6() { - framework.ExpectContainElement(actualRoutes, request.Route{Destination: ipv6CIDR}) - framework.ExpectNotContainElement(actualRoutes, request.Route{Destination: "default", Gateway: ipv6Gateway}) - framework.ExpectContainElement(actualRoutes, request.Route{Destination: nadIPv6CIDR}) - framework.ExpectContainElement(actualRoutes, request.Route{Destination: "default", Gateway: nadIPv6Gateway}) - framework.ExpectContainElement(actualRoutes, request.Route{Destination: ipv6RouteDst, Gateway: ipv6RouteGw}) - } - }) - - framework.ConformanceIt("should be able to provide IPAM for macvlan", func() { - provider := fmt.Sprintf("%s.%s", nadName, namespaceName) - - ginkgo.By("Creating network attachment definition " + nadName) - nad := framework.MakeMacvlanNetworkAttachmentDefinition(nadName, namespaceName, "eth0", "bridge", provider, nil) - nad = nadClient.Create(nad) - framework.Logf("created network attachment definition config:\n%s", nad.Spec.Config) - - ginkgo.By("Creating subnet " + subnetName) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, nil) - subnet.Spec.Provider = provider - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Creating pod " + podName) - annotations := map[string]string{nadv1.NetworkAttachmentAnnot: fmt.Sprintf("%s/%s", nad.Namespace, nad.Name)} - cmd := []string{"sh", "-c", "sleep infinity"} - pod := framework.MakePod(namespaceName, podName, nil, annotations, f.KubeOVNImage, cmd, nil) - pod = podClient.CreateSync(pod) - - ginkgo.By("Validating pod annotations") - framework.ExpectHaveKey(pod.Annotations, nadv1.NetworkStatusAnnot) - framework.Logf("pod network status:\n%s", pod.Annotations[nadv1.NetworkStatusAnnot]) - cidr := pod.Annotations[fmt.Sprintf(util.CidrAnnotationTemplate, provider)] - ip := pod.Annotations[fmt.Sprintf(util.IPAddressAnnotationTemplate, provider)] - gateway := pod.Annotations[fmt.Sprintf(util.GatewayAnnotationTemplate, provider)] - framework.ExpectIPInCIDR(ip, cidr) - framework.ExpectIPInCIDR(gateway, cidr) - framework.ExpectNotHaveKey(pod.Annotations, fmt.Sprintf(util.MacAddressAnnotationTemplate, provider)) - - ipName := ovs.PodNameToPortName(podName, namespaceName, provider) - ginkgo.By("Validating IP resource " + ipName) - ipCR := ipClient.Get(ipName) - framework.ExpectEqual(ipCR.Spec.Subnet, subnetName) - framework.ExpectEqual(ipCR.Spec.PodName, podName) - framework.ExpectEqual(ipCR.Spec.Namespace, namespaceName) - framework.ExpectEqual(ipCR.Spec.NodeName, pod.Spec.NodeName) - framework.ExpectEqual(ipCR.Spec.IPAddress, ip) - framework.ExpectEmpty(ipCR.Spec.MacAddress) - ipv4, ipv6 := util.SplitStringIP(ip) - framework.ExpectEqual(ipCR.Spec.V4IPAddress, ipv4) - framework.ExpectEqual(ipCR.Spec.V6IPAddress, ipv6) - framework.ExpectHaveKeyWithValue(ipCR.Labels, subnetName, "") - framework.ExpectHaveKeyWithValue(ipCR.Labels, util.SubnetNameLabel, subnetName) - framework.ExpectHaveKeyWithValue(ipCR.Labels, util.NodeNameLabel, pod.Spec.NodeName) - if !f.VersionPriorTo(1, 13) { - framework.ExpectHaveKeyWithValue(ipCR.Labels, util.IPReservedLabel, "false") - } - - ginkgo.By("Retrieving pod routes") - podRoutes, err := iproute.RouteShow("", "", func(cmd ...string) ([]byte, []byte, error) { - return framework.KubectlExec(namespaceName, podName, cmd...) - }) - framework.ExpectNoError(err) - - ginkgo.By("Validating pod routes") - actualRoutes := make([]request.Route, 0, len(podRoutes)) - for _, r := range podRoutes { - if r.Gateway != "" || r.Dst != "" { - actualRoutes = append(actualRoutes, request.Route{Destination: r.Dst, Gateway: r.Gateway}) - } - } - ipv4CIDR, ipv6CIDR := util.SplitStringIP(pod.Annotations[util.CidrAnnotation]) - ipv4Gateway, ipv6Gateway := util.SplitStringIP(pod.Annotations[util.GatewayAnnotation]) - nadIPv4CIDR, nadIPv6CIDR := util.SplitStringIP(subnet.Spec.CIDRBlock) - nadIPv4Gateway, nadIPv6Gateway := util.SplitStringIP(subnet.Spec.Gateway) - if f.HasIPv4() { - framework.ExpectContainElement(actualRoutes, request.Route{Destination: ipv4CIDR}) - framework.ExpectContainElement(actualRoutes, request.Route{Destination: "default", Gateway: ipv4Gateway}) - framework.ExpectContainElement(actualRoutes, request.Route{Destination: nadIPv4CIDR}) - framework.ExpectNotContainElement(actualRoutes, request.Route{Destination: "default", Gateway: nadIPv4Gateway}) - } - if f.HasIPv6() { - framework.ExpectContainElement(actualRoutes, request.Route{Destination: ipv6CIDR}) - framework.ExpectContainElement(actualRoutes, request.Route{Destination: "default", Gateway: ipv6Gateway}) - framework.ExpectContainElement(actualRoutes, request.Route{Destination: nadIPv6CIDR}) - framework.ExpectNotContainElement(actualRoutes, request.Route{Destination: "default", Gateway: nadIPv6Gateway}) - } - }) - - framework.ConformanceIt("should be able to provide IPAM with custom routes for macvlan", func() { - provider := fmt.Sprintf("%s.%s", nadName, namespaceName) - - ginkgo.By("Creating subnet " + subnetName) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, nil) - subnet.Spec.Provider = provider - subnet = subnetClient.CreateSync(subnet) - - ginkgo.By("Constructing network attachment definition config") - var routeDst string - for i := 0; i < 3; i++ { - routeDst = framework.RandomCIDR(f.ClusterIPFamily) - if routeDst != subnet.Spec.CIDRBlock { - break - } - } - framework.ExpectNotEqual(routeDst, subnet.Spec.CIDRBlock) - routeGw := framework.RandomIPs(subnet.Spec.CIDRBlock, "", 1) - nadIPv4Gateway, nadIPv6Gateway := util.SplitStringIP(subnet.Spec.Gateway) - ipv4RouteDst, ipv6RouteDst := util.SplitStringIP(routeDst) - ipv4RouteGw, ipv6RouteGw := util.SplitStringIP(routeGw) - routes := make([]request.Route, 0, 4) - if f.HasIPv4() { - routes = append(routes, request.Route{Gateway: nadIPv4Gateway}) - routes = append(routes, request.Route{Destination: ipv4RouteDst, Gateway: ipv4RouteGw}) - } - if f.HasIPv6() { - routes = append(routes, request.Route{Gateway: nadIPv6Gateway}) - routes = append(routes, request.Route{Destination: ipv6RouteDst, Gateway: ipv6RouteGw}) - } - - ginkgo.By("Creating network attachment definition " + nadName) - nad := framework.MakeMacvlanNetworkAttachmentDefinition(nadName, namespaceName, "eth0", "bridge", provider, routes) - nad = nadClient.Create(nad) - framework.Logf("created network attachment definition config:\n%s", nad.Spec.Config) - - ginkgo.By("Creating pod " + podName) - annotations := map[string]string{nadv1.NetworkAttachmentAnnot: fmt.Sprintf("%s/%s", nad.Namespace, nad.Name)} - cmd := []string{"sh", "-c", "sleep infinity"} - pod := framework.MakePod(namespaceName, podName, nil, annotations, f.KubeOVNImage, cmd, nil) - pod = podClient.CreateSync(pod) - - ginkgo.By("Validating pod annotations") - framework.ExpectHaveKey(pod.Annotations, nadv1.NetworkStatusAnnot) - framework.Logf("pod network status:\n%s", pod.Annotations[nadv1.NetworkStatusAnnot]) - cidr := pod.Annotations[fmt.Sprintf(util.CidrAnnotationTemplate, provider)] - ip := pod.Annotations[fmt.Sprintf(util.IPAddressAnnotationTemplate, provider)] - gateway := pod.Annotations[fmt.Sprintf(util.GatewayAnnotationTemplate, provider)] - framework.ExpectIPInCIDR(ip, cidr) - framework.ExpectIPInCIDR(gateway, cidr) - framework.ExpectNotHaveKey(pod.Annotations, fmt.Sprintf(util.MacAddressAnnotationTemplate, provider)) - - ipName := ovs.PodNameToPortName(podName, namespaceName, provider) - ginkgo.By("Validating IP resource " + ipName) - ipCR := ipClient.Get(ipName) - framework.ExpectEqual(ipCR.Spec.Subnet, subnetName) - framework.ExpectEqual(ipCR.Spec.PodName, podName) - framework.ExpectEqual(ipCR.Spec.Namespace, namespaceName) - framework.ExpectEqual(ipCR.Spec.NodeName, pod.Spec.NodeName) - framework.ExpectEqual(ipCR.Spec.IPAddress, ip) - framework.ExpectEmpty(ipCR.Spec.MacAddress) - ipv4, ipv6 := util.SplitStringIP(ip) - framework.ExpectEqual(ipCR.Spec.V4IPAddress, ipv4) - framework.ExpectEqual(ipCR.Spec.V6IPAddress, ipv6) - framework.ExpectHaveKeyWithValue(ipCR.Labels, subnetName, "") - framework.ExpectHaveKeyWithValue(ipCR.Labels, util.SubnetNameLabel, subnetName) - framework.ExpectHaveKeyWithValue(ipCR.Labels, util.NodeNameLabel, pod.Spec.NodeName) - if !f.VersionPriorTo(1, 13) { - framework.ExpectHaveKeyWithValue(ipCR.Labels, util.IPReservedLabel, "false") - } - - ginkgo.By("Retrieving pod routes") - podRoutes, err := iproute.RouteShow("", "", func(cmd ...string) ([]byte, []byte, error) { - return framework.KubectlExec(namespaceName, podName, cmd...) - }) - framework.ExpectNoError(err) - - ginkgo.By("Validating pod routes") - actualRoutes := make([]request.Route, 0, len(podRoutes)) - for _, r := range podRoutes { - if r.Gateway != "" || r.Dst != "" { - actualRoutes = append(actualRoutes, request.Route{Destination: r.Dst, Gateway: r.Gateway}) - } - } - ipv4CIDR, ipv6CIDR := util.SplitStringIP(pod.Annotations[util.CidrAnnotation]) - ipv4Gateway, ipv6Gateway := util.SplitStringIP(pod.Annotations[util.GatewayAnnotation]) - nadIPv4CIDR, nadIPv6CIDR := util.SplitStringIP(subnet.Spec.CIDRBlock) - if f.HasIPv4() { - framework.ExpectContainElement(actualRoutes, request.Route{Destination: ipv4CIDR}) - framework.ExpectNotContainElement(actualRoutes, request.Route{Destination: "default", Gateway: ipv4Gateway}) - framework.ExpectContainElement(actualRoutes, request.Route{Destination: nadIPv4CIDR}) - framework.ExpectContainElement(actualRoutes, request.Route{Destination: "default", Gateway: nadIPv4Gateway}) - framework.ExpectContainElement(actualRoutes, request.Route{Destination: ipv4RouteDst, Gateway: ipv4RouteGw}) - } - if f.HasIPv6() { - framework.ExpectContainElement(actualRoutes, request.Route{Destination: ipv6CIDR}) - framework.ExpectNotContainElement(actualRoutes, request.Route{Destination: "default", Gateway: ipv6Gateway}) - framework.ExpectContainElement(actualRoutes, request.Route{Destination: nadIPv6CIDR}) - framework.ExpectContainElement(actualRoutes, request.Route{Destination: "default", Gateway: nadIPv6Gateway}) - framework.ExpectContainElement(actualRoutes, request.Route{Destination: ipv6RouteDst, Gateway: ipv6RouteGw}) - } - }) -}) diff --git a/test/e2e/ovn-ic/e2e_test.go b/test/e2e/ovn-ic/e2e_test.go deleted file mode 100644 index 4f203a3f0cb..00000000000 --- a/test/e2e/ovn-ic/e2e_test.go +++ /dev/null @@ -1,312 +0,0 @@ -package ovn_ic - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "math/rand/v2" - "net" - "os/exec" - "strconv" - "strings" - "testing" - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - k8stypes "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/klog/v2" - "k8s.io/kubernetes/test/e2e" - k8sframework "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/config" - e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" - e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" - - "github.com/onsi/ginkgo/v2" - - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" - "github.com/kubeovn/kube-ovn/test/e2e/framework/kind" -) - -var clusters []string - -func init() { - klog.SetOutput(ginkgo.GinkgoWriter) - - // Register flags. - config.CopyFlags(config.Flags, flag.CommandLine) - k8sframework.RegisterCommonFlags(flag.CommandLine) - k8sframework.RegisterClusterFlags(flag.CommandLine) -} - -func TestE2E(t *testing.T) { - var err error - if clusters, err = kind.ListClusters(); err != nil { - t.Fatalf("failed to list kind clusters: %v", err) - } - if len(clusters) < 2 { - t.Fatal("no enough kind clusters to run ovn-ic e2e testing") - } - - k8sframework.AfterReadingAllFlags(&k8sframework.TestContext) - e2e.RunE2ETests(t) -} - -func execOrDie(kubeContext, cmd string) string { - ginkgo.GinkgoHelper() - - ginkgo.By(`Switching context to ` + kubeContext) - e2ekubectl.NewKubectlCommand("", "config", "use-context", kubeContext).ExecOrDie("") - - ginkgo.By(`Executing "kubectl ` + cmd + `"`) - return e2ekubectl.NewKubectlCommand("", strings.Fields(cmd)...).ExecOrDie("") -} - -func execPodOrDie(kubeContext, namespace, pod, cmd string) string { - ginkgo.GinkgoHelper() - - ginkgo.By(`Switching context to ` + kubeContext) - e2ekubectl.NewKubectlCommand("", "config", "use-context", kubeContext).ExecOrDie("") - - ginkgo.By(fmt.Sprintf(`Executing %q in pod %s/%s`, cmd, namespace, pod)) - return e2epodoutput.RunHostCmdOrDie(namespace, pod, cmd) -} - -var _ = framework.OrderedDescribe("[group:ovn-ic]", func() { - frameworks := make([]*framework.Framework, len(clusters)) - for i := range clusters { - frameworks[i] = framework.NewFrameworkWithContext("ovn-ic", "kind-"+clusters[i]) - } - - clientSets := make([]clientset.Interface, len(clusters)) - podClients := make([]*framework.PodClient, len(clusters)) - namespaceNames := make([]string, len(clusters)) - var kubectlConfig string - ginkgo.BeforeEach(func() { - for i := range clusters { - clientSets[i] = frameworks[i].ClientSet - podClients[i] = frameworks[i].PodClient() - namespaceNames[i] = frameworks[i].Namespace.Name - } - kubectlConfig = k8sframework.TestContext.KubeConfig - k8sframework.TestContext.KubeConfig = "" - }) - ginkgo.AfterEach(func() { - k8sframework.TestContext.KubeConfig = kubectlConfig - }) - - fnCheckPodHTTP := func() { - ginkgo.GinkgoHelper() - - podNames := make([]string, len(clusters)) - pods := make([]*corev1.Pod, len(clusters)) - ports := make([]string, len(clusters)) - for i := range clusters { - podNames[i] = "pod-" + framework.RandomSuffix() - ginkgo.By("Creating pod " + podNames[i] + " in cluster " + clusters[i]) - port := 8000 + rand.Int32N(1000) - ports[i] = strconv.Itoa(int(port)) - args := []string{"netexec", "--http-port", ports[i]} - pods[i] = framework.MakePod(namespaceNames[i], podNames[i], nil, nil, framework.AgnhostImage, nil, args) - pods[i].Spec.Containers[0].ReadinessProbe = &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Port: intstr.FromInt32(port), - }, - }, - } - pods[i] = podClients[i].CreateSync(pods[i]) - } - - for i := range clusters { - sourceIPs := util.PodIPs(*pods[i]) - for j := range clusters { - if j == i { - continue - } - - for _, podIP := range pods[j].Status.PodIPs { - ip := podIP.IP - protocol := strings.ToLower(util.CheckProtocol(ip)) - ginkgo.By("Checking connection from cluster " + clusters[i] + " to cluster " + clusters[j] + " via " + protocol) - cmd := fmt.Sprintf("curl -q -s --connect-timeout 2 --max-time 2 %s/clientip", net.JoinHostPort(ip, ports[j])) - output := execPodOrDie(frameworks[i].KubeContext, pods[i].Namespace, pods[i].Name, cmd) - client, _, err := net.SplitHostPort(strings.TrimSpace(output)) - framework.ExpectNoError(err) - framework.ExpectContainElement(sourceIPs, client) - } - } - } - } - - framework.ConformanceIt("should create logical switch ts", func() { - azNames := make([]string, len(clusters)) - for i := range clusters { - ginkgo.By("fetching the ConfigMap in cluster " + clusters[i]) - cm, err := clientSets[i].CoreV1().ConfigMaps(framework.KubeOvnNamespace).Get(context.TODO(), util.InterconnectionConfig, metav1.GetOptions{}) - framework.ExpectNoError(err, "failed to get ConfigMap") - azNames[i] = cm.Data["az-name"] - } - - for i := range clusters { - ginkgo.By("Ensuring logical switch ts exists in cluster " + clusters[i]) - output := execOrDie(frameworks[i].KubeContext, "ko nbctl show ts") - for _, az := range azNames { - framework.ExpectTrue(strings.Contains(output, "ts-"+az), "should have lsp ts-"+az) - } - } - }) - - framework.ConformanceIt("should be able to communicate between clusters", func() { - fnCheckPodHTTP() - }) - - framework.ConformanceIt("should be able to update az name", func() { - frameworks[0].SkipVersionPriorTo(1, 11, "This feature was introduced in v1.11") - - azNames := make([]string, len(clusters)) - for i := range clusters { - ginkgo.By("fetching the ConfigMap in cluster " + clusters[i]) - cm, err := clientSets[i].CoreV1().ConfigMaps(framework.KubeOvnNamespace).Get(context.TODO(), util.InterconnectionConfig, metav1.GetOptions{}) - framework.ExpectNoError(err, "failed to get ConfigMap") - azNames[i] = cm.Data["az-name"] - } - - azNames[0] = fmt.Sprintf("az%04d", rand.IntN(10000)) - configMapPatchPayload, err := json.Marshal(corev1.ConfigMap{ - Data: map[string]string{ - "az-name": azNames[0], - }, - }) - framework.ExpectNoError(err, "failed to marshal patch data") - - ginkgo.By("patching the ConfigMap in cluster " + clusters[0]) - _, err = clientSets[0].CoreV1().ConfigMaps(framework.KubeOvnNamespace).Patch(context.TODO(), util.InterconnectionConfig, k8stypes.StrategicMergePatchType, configMapPatchPayload, metav1.PatchOptions{}) - framework.ExpectNoError(err, "failed to patch ConfigMap") - - ginkgo.By("Waiting for new az names to be applied") - time.Sleep(10 * time.Second) - - pods, err := clientSets[0].CoreV1().Pods(framework.KubeOvnNamespace).List(context.TODO(), metav1.ListOptions{LabelSelector: "app=ovs"}) - framework.ExpectNoError(err, "failed to get ovs-ovn pods") - cmd := "ovn-appctl -t ovn-controller inc-engine/recompute" - for _, pod := range pods.Items { - execPodOrDie(frameworks[0].KubeContext, pod.Namespace, pod.Name, cmd) - } - time.Sleep(2 * time.Second) - - ginkgo.By("Ensuring logical switch ts exists in cluster " + clusters[0]) - output := execOrDie(frameworks[0].KubeContext, "ko nbctl show ts") - for _, az := range azNames { - lsp := "ts-" + az - framework.ExpectTrue(strings.Contains(output, lsp), "should have lsp "+lsp) - framework.ExpectTrue(strings.Contains(output, lsp), "should have lsp "+lsp) - } - - fnCheckPodHTTP() - }) - - framework.ConformanceIt("should be able to update gateway to ecmp or HA ", func() { - frameworks[0].SkipVersionPriorTo(1, 13, "This feature was introduced in v1.13") - gwNodes := make([]string, len(clusters)) - for i := range clusters { - ginkgo.By("fetching the ConfigMap in cluster " + clusters[i]) - cm, err := clientSets[i].CoreV1().ConfigMaps(framework.KubeOvnNamespace).Get(context.TODO(), util.InterconnectionConfig, metav1.GetOptions{}) - framework.ExpectNoError(err, "failed to get ConfigMap") - gwNodes[i] = cm.Data["gw-nodes"] - } - - ginkgo.By("Case 1: Changing the ConfigMap in cluster to HA") - changeGatewayType("ha", gwNodes, clientSets) - ginkgo.By("Waiting for HA gateway to be applied") - time.Sleep(15 * time.Second) - - checkECMPCount(0) - fnCheckPodHTTP() - - ginkgo.By("Case 2: Changing the ConfigMap in cluster to ecmp ") - changeGatewayType("ecmp", gwNodes, clientSets) - ginkgo.By("Waiting for ecmp gateway to be applied") - time.Sleep(15 * time.Second) - if frameworks[0].ClusterIPFamily == "dual" { - checkECMPCount(6) - } else { - checkECMPCount(3) - } - fnCheckPodHTTP() - - ginkgo.By("Case 3: Changing the ConfigMap in cluster to ha + ecmp") - changeGatewayType("half", gwNodes, clientSets) - ginkgo.By("Waiting for half gateway to be applied") - time.Sleep(15 * time.Second) - - if frameworks[0].ClusterIPFamily == "dual" { - checkECMPCount(4) - } else { - checkECMPCount(2) - } - fnCheckPodHTTP() - }) -}) - -func checkECMPCount(expectCount int) { - ginkgo.GinkgoHelper() - - ecmpCount := 0 - maxRetryTimes := 30 - - for _, cluster := range clusters { - clusterName := "kind-" + cluster - ginkgo.By("Switching kubectl config context to " + clusterName) - switchCmd := "kubectl config use-context " + clusterName - _, err := exec.Command("bash", "-c", switchCmd).CombinedOutput() - framework.ExpectNoError(err, "failed to switch kubectl config context to %s", clusterName) - - ginkgo.By("Checking logical router route count") - for i := 0; i < maxRetryTimes; i++ { - time.Sleep(3 * time.Second) - cmd := "ovn-nbctl lr-route-list " + util.DefaultVpc - output, _, err := framework.NBExec(cmd) - framework.ExpectNoError(err) - ecmpCount = strings.Count(string(output), "ecmp") - if ecmpCount == expectCount { - break - } - } - framework.ExpectEqual(ecmpCount, expectCount) - } - - switchCmd := "kubectl config use-context kind-kube-ovn" - _, err := exec.Command("bash", "-c", switchCmd).CombinedOutput() - framework.ExpectNoError(err, "switch to kube-ovn cluster failed") -} - -func changeGatewayType(gatewayType string, gwNodes []string, clientSets []clientset.Interface) { - for index, clientSet := range clientSets { - var gatewayStr string - switch gatewayType { - case "ha": - gatewayStr = strings.ReplaceAll(gwNodes[index], ";", ",") - case "ecmp": - gatewayStr = strings.ReplaceAll(gwNodes[index], ",", ";") - case "half": - gatewayStr = gwNodes[index] - } - framework.Logf("check gatewayStr %s ", gatewayStr) - configMapPatchPayload, err := json.Marshal(corev1.ConfigMap{ - Data: map[string]string{ - "gw-nodes": gatewayStr, - }, - }) - - framework.ExpectNoError(err, "failed to marshal patch data") - - ginkgo.By("patching the ConfigMap in cluster " + clusters[index]) - _, err = clientSet.CoreV1().ConfigMaps(framework.KubeOvnNamespace).Patch(context.TODO(), util.InterconnectionConfig, k8stypes.StrategicMergePatchType, configMapPatchPayload, metav1.PatchOptions{}) - framework.ExpectNoError(err, "failed to patch ConfigMap") - } -} diff --git a/test/e2e/ovn-vpc-nat-gw/e2e_test.go b/test/e2e/ovn-vpc-nat-gw/e2e_test.go deleted file mode 100644 index 21f0bb49c02..00000000000 --- a/test/e2e/ovn-vpc-nat-gw/e2e_test.go +++ /dev/null @@ -1,1041 +0,0 @@ -package ovn_eip - -import ( - "context" - "flag" - "fmt" - "strconv" - "strings" - "testing" - "time" - - dockernetwork "github.com/docker/docker/api/types/network" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/klog/v2" - "k8s.io/kubernetes/test/e2e" - k8sframework "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/config" - e2enode "k8s.io/kubernetes/test/e2e/framework/node" - - "github.com/onsi/ginkgo/v2" - - kubeovnv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/ovs" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" - "github.com/kubeovn/kube-ovn/test/e2e/framework/docker" - "github.com/kubeovn/kube-ovn/test/e2e/framework/iproute" - "github.com/kubeovn/kube-ovn/test/e2e/framework/kind" -) - -const dockerNetworkName = "kube-ovn-vlan" - -const dockerExtraNetworkName = "kube-ovn-extra-vlan" - -func makeProviderNetwork(providerNetworkName string, exchangeLinkName bool, linkMap map[string]*iproute.Link) *kubeovnv1.ProviderNetwork { - var defaultInterface string - customInterfaces := make(map[string][]string, 0) - for node, link := range linkMap { - if !strings.ContainsRune(node, '-') { - continue - } - - if defaultInterface == "" { - defaultInterface = link.IfName - } else if link.IfName != defaultInterface { - customInterfaces[link.IfName] = append(customInterfaces[link.IfName], node) - } - } - - return framework.MakeProviderNetwork(providerNetworkName, exchangeLinkName, defaultInterface, customInterfaces, nil) -} - -func makeOvnEip(name, subnet, v4ip, v6ip, mac, usage string) *kubeovnv1.OvnEip { - return framework.MakeOvnEip(name, subnet, v4ip, v6ip, mac, usage) -} - -func makeOvnVip(namespaceName, name, subnet, v4ip, v6ip, vipType string) *kubeovnv1.Vip { - return framework.MakeVip(namespaceName, name, subnet, v4ip, v6ip, vipType) -} - -func makeOvnFip(name, ovnEip, ipType, ipName, vpc, v4Ip string) *kubeovnv1.OvnFip { - return framework.MakeOvnFip(name, ovnEip, ipType, ipName, vpc, v4Ip) -} - -func makeOvnSnat(name, ovnEip, vpcSubnet, ipName, vpc, v4IpCidr string) *kubeovnv1.OvnSnatRule { - return framework.MakeOvnSnatRule(name, ovnEip, vpcSubnet, ipName, vpc, v4IpCidr) -} - -func makeOvnDnat(name, ovnEip, ipType, ipName, vpc, v4Ip, internalPort, externalPort, protocol string) *kubeovnv1.OvnDnatRule { - return framework.MakeOvnDnatRule(name, ovnEip, ipType, ipName, vpc, v4Ip, internalPort, externalPort, protocol) -} - -var _ = framework.Describe("[group:ovn-vpc-nat-gw]", func() { - f := framework.NewDefaultFramework("ovn-vpc-nat-gw") - - var skip bool - var itFn func(bool, string, map[string]*iproute.Link, *[]string) - var cs clientset.Interface - var dockerNetwork, dockerExtraNetwork *dockernetwork.Inspect - var nodeNames, gwNodeNames, providerBridgeIps, extraProviderBridgeIps []string - var clusterName, providerNetworkName, vlanName, underlaySubnetName, noBfdVpcName, bfdVpcName, noBfdSubnetName, bfdSubnetName string - var providerExtraNetworkName, vlanExtraName, underlayExtraSubnetName, noBfdExtraSubnetName string - var linkMap, extraLinkMap map[string]*iproute.Link - var providerNetworkClient *framework.ProviderNetworkClient - var vlanClient *framework.VlanClient - var vpcClient *framework.VpcClient - var subnetClient *framework.SubnetClient - var ovnEipClient *framework.OvnEipClient - var ipClient *framework.IPClient - var vipClient *framework.VipClient - var ovnFipClient *framework.OvnFipClient - var ovnSnatRuleClient *framework.OvnSnatRuleClient - var ovnDnatRuleClient *framework.OvnDnatRuleClient - var podClient *framework.PodClient - var countingEipName, lrpEipSnatName, lrpExtraEipSnatName string - var fipName string - var ipDnatVipName, ipDnatEipName, ipDnatName string - var ipFipVipName, ipFipEipName, ipFipName string - var cidrSnatEipName, cidrSnatName, ipSnatVipName, ipSnatEipName, ipSnatName string - - var namespaceName string - - var sharedVipName, sharedEipDnatName, sharedEipFipShoudOkName, sharedEipFipShoudFailName string - var fipPodName, podEipName, podFipName string - var fipExtraPodName, podExtraEipName, podExtraFipName string - - ginkgo.BeforeEach(func() { - cs = f.ClientSet - subnetClient = f.SubnetClient() - vlanClient = f.VlanClient() - vpcClient = f.VpcClient() - providerNetworkClient = f.ProviderNetworkClient() - ovnEipClient = f.OvnEipClient() - ipClient = f.IPClient() - vipClient = f.VipClient() - ovnFipClient = f.OvnFipClient() - ovnSnatRuleClient = f.OvnSnatRuleClient() - ovnDnatRuleClient = f.OvnDnatRuleClient() - - podClient = f.PodClient() - - namespaceName = f.Namespace.Name - - gwNodeNum := 2 - // gw node is 2 means e2e HA cluster will have 2 gw nodes and a worker node - // in this env, tcpdump gw nat flows will be more clear - - noBfdVpcName = "no-bfd-vpc-" + framework.RandomSuffix() - bfdVpcName = "bfd-vpc-" + framework.RandomSuffix() - - // nats use ip crd name or vip crd - fipName = "fip-" + framework.RandomSuffix() - - countingEipName = "counting-eip-" + framework.RandomSuffix() - noBfdSubnetName = "no-bfd-subnet-" + framework.RandomSuffix() - noBfdExtraSubnetName = "no-bfd-extra-subnet-" + framework.RandomSuffix() - lrpEipSnatName = "lrp-eip-snat-" + framework.RandomSuffix() - lrpExtraEipSnatName = "lrp-extra-eip-snat-" + framework.RandomSuffix() - bfdSubnetName = "bfd-subnet-" + framework.RandomSuffix() - providerNetworkName = "external" - providerExtraNetworkName = "extra" - vlanName = "vlan-" + framework.RandomSuffix() - vlanExtraName = "vlan-extra-" + framework.RandomSuffix() - underlaySubnetName = "external" - underlayExtraSubnetName = "extra" - - // sharing case - sharedVipName = "shared-vip-" + framework.RandomSuffix() - sharedEipDnatName = "shared-eip-dnat-" + framework.RandomSuffix() - sharedEipFipShoudOkName = "shared-eip-fip-should-ok-" + framework.RandomSuffix() - sharedEipFipShoudFailName = "shared-eip-fip-should-fail-" + framework.RandomSuffix() - - // pod with fip - fipPodName = "fip-pod-" + framework.RandomSuffix() - podEipName = fipPodName - podFipName = fipPodName - - // pod with fip for extra external subnet - fipExtraPodName = "fip-extra-pod-" + framework.RandomSuffix() - podExtraEipName = fipExtraPodName - podExtraFipName = fipExtraPodName - - // fip use ip addr - ipFipVipName = "ip-fip-vip-" + framework.RandomSuffix() - ipFipEipName = "ip-fip-eip-" + framework.RandomSuffix() - ipFipName = "ip-fip-" + framework.RandomSuffix() - - // dnat use ip addr - ipDnatVipName = "ip-dnat-vip-" + framework.RandomSuffix() - ipDnatEipName = "ip-dnat-eip-" + framework.RandomSuffix() - ipDnatName = "ip-dnat-" + framework.RandomSuffix() - - // snat use ip cidr - cidrSnatEipName = "cidr-snat-eip-" + framework.RandomSuffix() - cidrSnatName = "cidr-snat-" + framework.RandomSuffix() - ipSnatVipName = "ip-snat-vip-" + framework.RandomSuffix() - ipSnatEipName = "ip-snat-eip-" + framework.RandomSuffix() - ipSnatName = "ip-snat-" + framework.RandomSuffix() - - if skip { - ginkgo.Skip("underlay spec only runs on kind clusters") - } - - if clusterName == "" { - ginkgo.By("Getting k8s nodes") - k8sNodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - - cluster, ok := kind.IsKindProvided(k8sNodes.Items[0].Spec.ProviderID) - if !ok { - skip = true - ginkgo.Skip("underlay spec only runs on kind clusters") - } - clusterName = cluster - } - - if dockerNetwork == nil { - ginkgo.By("Ensuring docker network " + dockerNetworkName + " exists") - network, err := docker.NetworkCreate(dockerNetworkName, true, true) - framework.ExpectNoError(err, "creating docker network "+dockerNetworkName) - dockerNetwork = network - } - - if dockerExtraNetwork == nil { - ginkgo.By("Ensuring extra docker network " + dockerExtraNetworkName + " exists") - network, err := docker.NetworkCreate(dockerExtraNetworkName, true, true) - framework.ExpectNoError(err, "creating extra docker network "+dockerExtraNetworkName) - dockerExtraNetwork = network - } - - ginkgo.By("Getting kind nodes") - nodes, err := kind.ListNodes(clusterName, "") - framework.ExpectNoError(err, "getting nodes in kind cluster") - framework.ExpectNotEmpty(nodes) - - ginkgo.By("Connecting nodes to the docker network") - err = kind.NetworkConnect(dockerNetwork.ID, nodes) - framework.ExpectNoError(err, "connecting nodes to network "+dockerNetworkName) - - ginkgo.By("Connecting nodes to the extra docker network") - err = kind.NetworkConnect(dockerExtraNetwork.ID, nodes) - framework.ExpectNoError(err, "connecting nodes to extra network "+dockerExtraNetworkName) - - ginkgo.By("Getting node links that belong to the docker network") - nodes, err = kind.ListNodes(clusterName, "") - framework.ExpectNoError(err, "getting nodes in kind cluster") - - linkMap = make(map[string]*iproute.Link, len(nodes)) - extraLinkMap = make(map[string]*iproute.Link, len(nodes)) - nodeNames = make([]string, 0, len(nodes)) - gwNodeNames = make([]string, 0, gwNodeNum) - providerBridgeIps = make([]string, 0, len(nodes)) - extraProviderBridgeIps = make([]string, 0, len(nodes)) - - // node ext gw ovn eip name is the same as node name in this scenario - for index, node := range nodes { - links, err := node.ListLinks() - framework.ExpectNoError(err, "failed to list links on node %s: %v", node.Name(), err) - for _, link := range links { - if link.Address == node.NetworkSettings.Networks[dockerNetworkName].MacAddress { - linkMap[node.ID] = &link - break - } - } - for _, link := range links { - if link.Address == node.NetworkSettings.Networks[dockerExtraNetworkName].MacAddress { - extraLinkMap[node.ID] = &link - break - } - } - framework.ExpectHaveKey(linkMap, node.ID) - framework.ExpectHaveKey(extraLinkMap, node.ID) - linkMap[node.Name()] = linkMap[node.ID] - extraLinkMap[node.Name()] = extraLinkMap[node.ID] - nodeNames = append(nodeNames, node.Name()) - if index < gwNodeNum { - gwNodeNames = append(gwNodeNames, node.Name()) - } - } - - itFn = func(exchangeLinkName bool, providerNetworkName string, linkMap map[string]*iproute.Link, bridgeIps *[]string) { - ginkgo.GinkgoHelper() - - ginkgo.By("Creating provider network " + providerNetworkName) - pn := makeProviderNetwork(providerNetworkName, exchangeLinkName, linkMap) - pn = providerNetworkClient.CreateSync(pn) - - ginkgo.By("Getting k8s nodes") - k8sNodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - - ginkgo.By("Validating node labels") - for _, node := range k8sNodes.Items { - link := linkMap[node.Name] - framework.ExpectHaveKeyWithValue(node.Labels, fmt.Sprintf(util.ProviderNetworkInterfaceTemplate, providerNetworkName), link.IfName) - framework.ExpectHaveKeyWithValue(node.Labels, fmt.Sprintf(util.ProviderNetworkReadyTemplate, providerNetworkName), "true") - framework.ExpectHaveKeyWithValue(node.Labels, fmt.Sprintf(util.ProviderNetworkMtuTemplate, providerNetworkName), strconv.Itoa(link.Mtu)) - framework.ExpectNotHaveKey(node.Labels, fmt.Sprintf(util.ProviderNetworkExcludeTemplate, providerNetworkName)) - } - - ginkgo.By("Validating provider network spec") - framework.ExpectEqual(pn.Spec.ExchangeLinkName, false, "field .spec.exchangeLinkName should be false") - - ginkgo.By("Validating provider network status") - framework.ExpectEqual(pn.Status.Ready, true, "field .status.ready should be true") - framework.ExpectConsistOf(pn.Status.ReadyNodes, nodeNames) - framework.ExpectEmpty(pn.Status.Vlans) - - ginkgo.By("Getting kind nodes") - kindNodes, err := kind.ListNodes(clusterName, "") - framework.ExpectNoError(err) - - ginkgo.By("Validating node links") - linkNameMap := make(map[string]string, len(kindNodes)) - bridgeName := util.ExternalBridgeName(providerNetworkName) - for _, node := range kindNodes { - if exchangeLinkName { - bridgeName = linkMap[node.ID].IfName - } - - links, err := node.ListLinks() - framework.ExpectNoError(err, "failed to list links on node %s: %v", node.Name(), err) - - var port, bridge *iproute.Link - for i, link := range links { - if link.IfIndex == linkMap[node.ID].IfIndex { - port = &links[i] - } else if link.IfName == bridgeName { - bridge = &links[i] - for _, addr := range bridge.NonLinkLocalAddresses() { - if util.CheckProtocol(addr) == kubeovnv1.ProtocolIPv4 { - ginkgo.By("get provider bridge v4 ip " + addr) - *bridgeIps = append(*bridgeIps, addr) - } - } - } - if port != nil && bridge != nil { - break - } - } - framework.ExpectNotNil(port) - framework.ExpectEqual(port.Address, linkMap[node.ID].Address) - framework.ExpectEqual(port.Mtu, linkMap[node.ID].Mtu) - framework.ExpectEqual(port.Master, "ovs-system") - framework.ExpectEqual(port.OperState, "UP") - if exchangeLinkName { - framework.ExpectEqual(port.IfName, util.ExternalBridgeName(providerNetworkName)) - } - - framework.ExpectNotNil(bridge) - framework.ExpectEqual(bridge.LinkInfo.InfoKind, "openvswitch") - framework.ExpectEqual(bridge.Address, port.Address) - framework.ExpectEqual(bridge.Mtu, port.Mtu) - framework.ExpectEqual(bridge.OperState, "UNKNOWN") - framework.ExpectContainElement(bridge.Flags, "UP") - - framework.ExpectEmpty(port.NonLinkLocalAddresses()) - framework.ExpectConsistOf(bridge.NonLinkLocalAddresses(), linkMap[node.ID].NonLinkLocalAddresses()) - - linkNameMap[node.ID] = port.IfName - } - } - }) - - ginkgo.AfterEach(func() { - ginkgo.By("Deleting ovn fip " + fipName) - ovnFipClient.DeleteSync(fipName) - // clean up share eip case resource - ginkgo.By("Deleting share ovn dnat " + sharedEipDnatName) - ovnDnatRuleClient.DeleteSync(sharedEipDnatName) - ginkgo.By("Deleting share ovn fip " + sharedEipFipShoudOkName) - ovnFipClient.DeleteSync(sharedEipFipShoudOkName) - ginkgo.By("Deleting share ovn fip " + sharedEipFipShoudFailName) - ovnFipClient.DeleteSync(sharedEipFipShoudFailName) - ginkgo.By("Deleting share ovn snat " + lrpEipSnatName) - ovnSnatRuleClient.DeleteSync(lrpEipSnatName) - ginkgo.By("Deleting share ovn snat " + lrpExtraEipSnatName) - ovnSnatRuleClient.DeleteSync(lrpExtraEipSnatName) - - // clean up nats with ip or ip cidr - ginkgo.By("Deleting ovn dnat " + ipDnatName) - ovnDnatRuleClient.DeleteSync(ipDnatName) - ginkgo.By("Deleting ovn snat " + ipSnatName) - ovnSnatRuleClient.DeleteSync(ipSnatName) - ginkgo.By("Deleting ovn fip " + ipFipName) - ovnFipClient.DeleteSync(ipFipName) - ginkgo.By("Deleting ovn snat " + cidrSnatName) - ovnSnatRuleClient.DeleteSync(cidrSnatName) - - ginkgo.By("Deleting ovn eip " + ipFipEipName) - ovnFipClient.DeleteSync(ipFipEipName) - ginkgo.By("Deleting ovn eip " + ipDnatEipName) - ovnEipClient.DeleteSync(ipDnatEipName) - ginkgo.By("Deleting ovn eip " + ipSnatEipName) - ovnEipClient.DeleteSync(ipSnatEipName) - ginkgo.By("Deleting ovn eip " + cidrSnatEipName) - ovnEipClient.DeleteSync(cidrSnatEipName) - ginkgo.By("Deleting ovn eip " + ipFipEipName) - ovnEipClient.DeleteSync(ipFipEipName) - - ginkgo.By("Deleting ovn vip " + ipFipVipName) - vipClient.DeleteSync(ipFipVipName) - ginkgo.By("Deleting ovn vip " + ipDnatVipName) - vipClient.DeleteSync(ipDnatVipName) - ginkgo.By("Deleting ovn vip " + ipSnatVipName) - vipClient.DeleteSync(ipSnatVipName) - - ginkgo.By("Deleting ovn share vip " + sharedVipName) - vipClient.DeleteSync(sharedVipName) - - // clean fip pod - ginkgo.By("Deleting pod fip " + podFipName) - ovnFipClient.DeleteSync(podFipName) - ginkgo.By("Deleting pod with fip " + fipPodName) - podClient.DeleteSync(fipPodName) - ginkgo.By("Deleting pod eip " + podEipName) - ovnEipClient.DeleteSync(podEipName) - - // clean fip extra pod - ginkgo.By("Deleting pod fip " + podExtraFipName) - ovnFipClient.DeleteSync(podExtraFipName) - ginkgo.By("Deleting pod with fip " + fipExtraPodName) - podClient.DeleteSync(fipExtraPodName) - ginkgo.By("Deleting pod eip " + podExtraEipName) - ovnEipClient.DeleteSync(podExtraEipName) - - ginkgo.By("Deleting subnet " + noBfdSubnetName) - subnetClient.DeleteSync(noBfdSubnetName) - ginkgo.By("Deleting subnet " + noBfdExtraSubnetName) - subnetClient.DeleteSync(noBfdExtraSubnetName) - ginkgo.By("Deleting subnet " + bfdSubnetName) - subnetClient.DeleteSync(bfdSubnetName) - - ginkgo.By("Deleting no bfd custom vpc " + noBfdVpcName) - vpcClient.DeleteSync(noBfdVpcName) - ginkgo.By("Deleting bfd custom vpc " + bfdVpcName) - vpcClient.DeleteSync(bfdVpcName) - - ginkgo.By("Deleting underlay vlan subnet") - time.Sleep(1 * time.Second) - // wait 1s to make sure webhook allow delete subnet - ginkgo.By("Deleting underlay subnet " + underlaySubnetName) - subnetClient.DeleteSync(underlaySubnetName) - ginkgo.By("Deleting extra underlay subnet " + underlayExtraSubnetName) - subnetClient.DeleteSync(underlayExtraSubnetName) - - ginkgo.By("Deleting vlan " + vlanName) - vlanClient.Delete(vlanName, metav1.DeleteOptions{}) - ginkgo.By("Deleting extra vlan " + vlanExtraName) - vlanClient.Delete(vlanExtraName, metav1.DeleteOptions{}) - - ginkgo.By("Deleting provider network " + providerNetworkName) - providerNetworkClient.DeleteSync(providerNetworkName) - - ginkgo.By("Deleting provider extra network " + providerExtraNetworkName) - providerNetworkClient.DeleteSync(providerExtraNetworkName) - - ginkgo.By("Getting nodes") - nodes, err := kind.ListNodes(clusterName, "") - framework.ExpectNoError(err, "getting nodes in cluster") - - ginkgo.By("Waiting for ovs bridge to disappear") - deadline := time.Now().Add(time.Minute) - for _, node := range nodes { - err = node.WaitLinkToDisappear(util.ExternalBridgeName(providerNetworkName), 2*time.Second, deadline) - framework.ExpectNoError(err, "timed out waiting for ovs bridge to disappear in node %s", node.Name()) - } - - if dockerNetwork != nil { - ginkgo.By("Disconnecting nodes from the docker network") - err = kind.NetworkDisconnect(dockerNetwork.ID, nodes) - framework.ExpectNoError(err, "disconnecting nodes from network "+dockerNetworkName) - } - - if dockerExtraNetwork != nil { - ginkgo.By("Disconnecting nodes from the docker extra network") - err = kind.NetworkDisconnect(dockerExtraNetwork.ID, nodes) - framework.ExpectNoError(err, "disconnecting nodes from extra network "+dockerExtraNetworkName) - } - }) - - framework.ConformanceIt("Test ovn eip fip snat dnat", func() { - ginkgo.By("Getting docker network " + dockerNetworkName) - network, err := docker.NetworkInspect(dockerNetworkName) - framework.ExpectNoError(err, "getting docker network "+dockerNetworkName) - - exchangeLinkName := false - itFn(exchangeLinkName, providerNetworkName, linkMap, &providerBridgeIps) - - ginkgo.By("Creating underlay vlan " + vlanName) - vlan := framework.MakeVlan(vlanName, providerNetworkName, 0) - _ = vlanClient.Create(vlan) - - ginkgo.By("Creating underlay subnet " + underlaySubnetName) - var cidrV4, cidrV6, gatewayV4, gatewayV6 string - for _, config := range dockerNetwork.IPAM.Config { - switch util.CheckProtocol(config.Subnet) { - case kubeovnv1.ProtocolIPv4: - if f.HasIPv4() { - cidrV4 = config.Subnet - gatewayV4 = config.Gateway - } - case kubeovnv1.ProtocolIPv6: - if f.HasIPv6() { - cidrV6 = config.Subnet - gatewayV6 = config.Gateway - } - } - } - cidr := make([]string, 0, 2) - gateway := make([]string, 0, 2) - if f.HasIPv4() { - cidr = append(cidr, cidrV4) - gateway = append(gateway, gatewayV4) - } - if f.HasIPv6() { - cidr = append(cidr, cidrV6) - gateway = append(gateway, gatewayV6) - } - excludeIPs := make([]string, 0, len(network.Containers)*2) - for _, container := range network.Containers { - if container.IPv4Address != "" && f.HasIPv4() { - excludeIPs = append(excludeIPs, strings.Split(container.IPv4Address, "/")[0]) - } - if container.IPv6Address != "" && f.HasIPv6() { - excludeIPs = append(excludeIPs, strings.Split(container.IPv6Address, "/")[0]) - } - } - vlanSubnetCidr := strings.Join(cidr, ",") - vlanSubnetGw := strings.Join(gateway, ",") - underlaySubnet := framework.MakeSubnet(underlaySubnetName, vlanName, vlanSubnetCidr, vlanSubnetGw, "", "", excludeIPs, nil, nil) - oldUnderlayExternalSubnet := subnetClient.CreateSync(underlaySubnet) - countingEip := makeOvnEip(countingEipName, underlaySubnetName, "", "", "", "") - _ = ovnEipClient.CreateSync(countingEip) - ginkgo.By("Checking underlay vlan " + oldUnderlayExternalSubnet.Name) - framework.ExpectEqual(oldUnderlayExternalSubnet.Spec.Vlan, vlanName) - framework.ExpectNotEqual(oldUnderlayExternalSubnet.Spec.CIDRBlock, "") - time.Sleep(3 * time.Second) - newUnerlayExternalSubnet := subnetClient.Get(underlaySubnetName) - ginkgo.By("Check status using ovn eip for subnet " + underlaySubnetName) - if newUnerlayExternalSubnet.Spec.Protocol == kubeovnv1.ProtocolIPv4 { - framework.ExpectEqual(oldUnderlayExternalSubnet.Status.V4AvailableIPs-1, newUnerlayExternalSubnet.Status.V4AvailableIPs) - framework.ExpectEqual(oldUnderlayExternalSubnet.Status.V4UsingIPs+1, newUnerlayExternalSubnet.Status.V4UsingIPs) - framework.ExpectNotEqual(oldUnderlayExternalSubnet.Status.V4AvailableIPRange, newUnerlayExternalSubnet.Status.V4AvailableIPRange) - framework.ExpectNotEqual(oldUnderlayExternalSubnet.Status.V4UsingIPRange, newUnerlayExternalSubnet.Status.V4UsingIPRange) - } else { - framework.ExpectEqual(oldUnderlayExternalSubnet.Status.V6AvailableIPs-1, newUnerlayExternalSubnet.Status.V6AvailableIPs) - framework.ExpectEqual(oldUnderlayExternalSubnet.Status.V6UsingIPs+1, newUnerlayExternalSubnet.Status.V6UsingIPs) - framework.ExpectNotEqual(oldUnderlayExternalSubnet.Status.V6AvailableIPRange, newUnerlayExternalSubnet.Status.V6AvailableIPRange) - framework.ExpectNotEqual(oldUnderlayExternalSubnet.Status.V6UsingIPRange, newUnerlayExternalSubnet.Status.V6UsingIPRange) - } - // delete counting eip - oldUnderlayExternalSubnet = newUnerlayExternalSubnet - ovnEipClient.DeleteSync(countingEipName) - time.Sleep(3 * time.Second) - newUnerlayExternalSubnet = subnetClient.Get(underlaySubnetName) - if newUnerlayExternalSubnet.Spec.Protocol == kubeovnv1.ProtocolIPv4 { - framework.ExpectEqual(oldUnderlayExternalSubnet.Status.V4AvailableIPs+1, newUnerlayExternalSubnet.Status.V4AvailableIPs) - framework.ExpectEqual(oldUnderlayExternalSubnet.Status.V4UsingIPs-1, newUnerlayExternalSubnet.Status.V4UsingIPs) - framework.ExpectNotEqual(oldUnderlayExternalSubnet.Status.V4AvailableIPRange, newUnerlayExternalSubnet.Status.V4AvailableIPRange) - framework.ExpectNotEqual(oldUnderlayExternalSubnet.Status.V4UsingIPRange, newUnerlayExternalSubnet.Status.V4UsingIPRange) - } else { - framework.ExpectEqual(oldUnderlayExternalSubnet.Status.V6AvailableIPs+1, newUnerlayExternalSubnet.Status.V6AvailableIPs) - framework.ExpectEqual(oldUnderlayExternalSubnet.Status.V6UsingIPs-1, newUnerlayExternalSubnet.Status.V6UsingIPs) - framework.ExpectNotEqual(oldUnderlayExternalSubnet.Status.V6AvailableIPRange, newUnerlayExternalSubnet.Status.V6AvailableIPRange) - framework.ExpectNotEqual(oldUnderlayExternalSubnet.Status.V6UsingIPRange, newUnerlayExternalSubnet.Status.V6UsingIPRange) - } - - externalGwNodes := strings.Join(gwNodeNames, ",") - ginkgo.By("Creating config map ovn-external-gw-config for centralized case") - cmData := map[string]string{ - "enable-external-gw": "true", - "external-gw-nodes": externalGwNodes, - "type": kubeovnv1.GWCentralizedType, - "external-gw-nic": "eth1", - "external-gw-addr": strings.Join(cidr, ","), - } - configMap := &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: util.ExternalGatewayConfig, - Namespace: framework.KubeOvnNamespace, - }, - Data: cmData, - } - _, err = cs.CoreV1().ConfigMaps(framework.KubeOvnNamespace).Create(context.Background(), configMap, metav1.CreateOptions{}) - framework.ExpectNoError(err, "failed to create") - - ginkgo.By("1. Test custom vpc nats using centralized external gw") - noBfdSubnetV4Cidr := "192.168.0.0/24" - noBfdSubnetV4Gw := "192.168.0.1" - enableExternal := true - disableBfd := false - noBfdVpc := framework.MakeVpc(noBfdVpcName, "", enableExternal, disableBfd, nil) - _ = vpcClient.CreateSync(noBfdVpc) - ginkgo.By("Creating overlay subnet " + noBfdSubnetName) - noBfdSubnet := framework.MakeSubnet(noBfdSubnetName, "", noBfdSubnetV4Cidr, noBfdSubnetV4Gw, noBfdVpcName, util.OvnProvider, nil, nil, nil) - _ = subnetClient.CreateSync(noBfdSubnet) - ginkgo.By("Creating pod on nodes") - for _, node := range nodeNames { - // create pod on gw node and worker node - podOnNodeName := fmt.Sprintf("no-bfd-%s", node) - ginkgo.By("Creating no bfd pod " + podOnNodeName + " with subnet " + noBfdSubnetName) - annotations := map[string]string{util.LogicalSwitchAnnotation: noBfdSubnetName} - cmd := []string{"sh", "-c", "sleep infinity"} - pod := framework.MakePod(namespaceName, podOnNodeName, nil, annotations, f.KubeOVNImage, cmd, nil) - pod.Spec.NodeName = node - _ = podClient.CreateSync(pod) - } - - ginkgo.By("Creating pod with fip") - annotations := map[string]string{util.LogicalSwitchAnnotation: noBfdSubnetName} - cmd := []string{"sh", "-c", "sleep infinity"} - fipPod := framework.MakePod(namespaceName, fipPodName, nil, annotations, f.KubeOVNImage, cmd, nil) - fipPod = podClient.CreateSync(fipPod) - podEip := framework.MakeOvnEip(podEipName, underlaySubnetName, "", "", "", "") - _ = ovnEipClient.CreateSync(podEip) - fipPodIP := ovs.PodNameToPortName(fipPod.Name, fipPod.Namespace, noBfdSubnet.Spec.Provider) - podFip := framework.MakeOvnFip(podFipName, podEipName, "", fipPodIP, "", "") - podFip = ovnFipClient.CreateSync(podFip) - - ginkgo.By("1.1 Test fip dnat snat share eip by setting eip name and ip name") - ginkgo.By("Create snat, dnat, fip with the same vpc lrp eip") - noBfdlrpEipName := fmt.Sprintf("%s-%s", noBfdVpcName, underlaySubnetName) - noBfdLrpEip := ovnEipClient.Get(noBfdlrpEipName) - lrpEipSnat := framework.MakeOvnSnatRule(lrpEipSnatName, noBfdlrpEipName, noBfdSubnetName, "", "", "") - _ = ovnSnatRuleClient.CreateSync(lrpEipSnat) - ginkgo.By("Get lrp eip snat") - lrpEipSnat = ovnSnatRuleClient.Get(lrpEipSnatName) - ginkgo.By("Check share snat should has the external ip label") - framework.ExpectHaveKeyWithValue(lrpEipSnat.Labels, util.EipV4IpLabel, noBfdLrpEip.Spec.V4Ip) - - ginkgo.By("Creating share vip") - shareVip := framework.MakeVip(namespaceName, sharedVipName, noBfdSubnetName, "", "", "") - _ = vipClient.CreateSync(shareVip) - ginkgo.By("Creating the first ovn fip with share eip vip should be ok") - shareFipShouldOk := framework.MakeOvnFip(sharedEipFipShoudOkName, noBfdlrpEipName, util.Vip, sharedVipName, "", "") - _ = ovnFipClient.CreateSync(shareFipShouldOk) - ginkgo.By("Creating the second ovn fip with share eip vip should be failed") - shareFipShouldFail := framework.MakeOvnFip(sharedEipFipShoudFailName, noBfdlrpEipName, util.Vip, sharedVipName, "", "") - _ = ovnFipClient.Create(shareFipShouldFail) - ginkgo.By("Creating ovn dnat for dnat with share eip vip") - shareDnat := framework.MakeOvnDnatRule(sharedEipDnatName, noBfdlrpEipName, util.Vip, sharedVipName, "", "", "80", "8080", "tcp") - _ = ovnDnatRuleClient.CreateSync(shareDnat) - - ginkgo.By("Get shared lrp eip") - noBfdLrpEip = ovnEipClient.Get(noBfdlrpEipName) - ginkgo.By("Get share dnat") - shareDnat = ovnDnatRuleClient.Get(sharedEipDnatName) - - ginkgo.By("Get share fip should ok") - shareFipShouldOk = ovnFipClient.Get(sharedEipFipShoudOkName) - ginkgo.By("Get share fip should fail") - shareFipShouldFail = ovnFipClient.Get(sharedEipFipShoudFailName) - // check - ginkgo.By("Check share eip should has the external ip label") - framework.ExpectHaveKeyWithValue(noBfdLrpEip.Labels, util.EipV4IpLabel, noBfdLrpEip.Spec.V4Ip) - ginkgo.By("Check share dnat should has the external ip label") - framework.ExpectHaveKeyWithValue(shareDnat.Labels, util.EipV4IpLabel, noBfdLrpEip.Spec.V4Ip) - ginkgo.By("Check share fip should ok should has the external ip label") - framework.ExpectHaveKeyWithValue(shareFipShouldOk.Labels, util.EipV4IpLabel, noBfdLrpEip.Spec.V4Ip) - ginkgo.By("Check share fip should fail should not be ready") - framework.ExpectEqual(shareFipShouldFail.Status.Ready, false) - // make sure eip is shared - nats := []string{util.DnatUsingEip, util.FipUsingEip, util.SnatUsingEip} - framework.ExpectEqual(noBfdLrpEip.Status.Nat, strings.Join(nats, ",")) - // make sure vpc has normal external static routes - noBfdVpc = vpcClient.Get(noBfdVpcName) - for _, route := range noBfdVpc.Spec.StaticRoutes { - framework.ExpectEqual(route.RouteTable, util.MainRouteTable) - framework.ExpectEqual(route.Policy, kubeovnv1.PolicyDst) - framework.ExpectContainSubstring(vlanSubnetGw, route.NextHopIP) - } - - ginkgo.By("1.2 Test snat, fip external connectivity") - for _, node := range nodeNames { - // all the pods should ping lrp, node br-external ip successfully - podOnNodeName := fmt.Sprintf("no-bfd-%s", node) - pod := podClient.GetPod(podOnNodeName) - ginkgo.By("Test pod ping lrp eip " + noBfdlrpEipName) - command := fmt.Sprintf("ping -W 1 -c 1 %s", noBfdLrpEip.Status.V4Ip) - stdOutput, errOutput, err := framework.ExecShellInPod(context.Background(), f, pod.Namespace, pod.Name, command) - framework.Logf("output from exec on client pod %s dest lrp ip %s\n", pod.Name, noBfdLrpEip.Name) - if stdOutput != "" && err == nil { - framework.Logf("output:\n%s", stdOutput) - } - framework.Logf("exec %s failed err: %v, errOutput: %s, stdOutput: %s", command, err, errOutput, stdOutput) - - ginkgo.By("Test pod ping pod fip " + podFip.Status.V4Eip) - command = fmt.Sprintf("ping -W 1 -c 1 %s", podFip.Status.V4Eip) - stdOutput, errOutput, err = framework.ExecShellInPod(context.Background(), f, pod.Namespace, pod.Name, command) - framework.Logf("output from exec on client pod %s dst fip %s\n", pod.Name, noBfdLrpEip.Name) - if stdOutput != "" && err == nil { - framework.Logf("output:\n%s", stdOutput) - } - framework.Logf("exec %s failed err: %v, errOutput: %s, stdOutput: %s", command, err, errOutput, stdOutput) - - ginkgo.By("Test pod ping node provider bridge ip " + strings.Join(providerBridgeIps, ",")) - for _, ip := range providerBridgeIps { - command := fmt.Sprintf("ping -W 1 -c 1 %s", ip) - stdOutput, errOutput, err = framework.ExecShellInPod(context.Background(), f, pod.Namespace, pod.Name, command) - framework.Logf("output from exec on client pod %s dest node ip %s\n", pod.Name, ip) - if stdOutput != "" && err == nil { - framework.Logf("output:\n%s", stdOutput) - } - } - framework.Logf("exec %s failed err: %v, errOutput: %s, stdOutput: %s", command, err, errOutput, stdOutput) - } - - ginkgo.By("Getting docker extra network " + dockerExtraNetworkName) - extraNetwork, err := docker.NetworkInspect(dockerExtraNetworkName) - framework.ExpectNoError(err, "getting extra docker network "+dockerExtraNetworkName) - itFn(exchangeLinkName, providerExtraNetworkName, extraLinkMap, &extraProviderBridgeIps) - - ginkgo.By("Creating underlay extra vlan " + vlanExtraName) - vlan = framework.MakeVlan(vlanExtraName, providerExtraNetworkName, 0) - _ = vlanClient.Create(vlan) - - ginkgo.By("Creating extra underlay subnet " + underlayExtraSubnetName) - cidrV4, cidrV6, gatewayV4, gatewayV6 = "", "", "", "" - for _, config := range dockerExtraNetwork.IPAM.Config { - switch util.CheckProtocol(config.Subnet) { - case kubeovnv1.ProtocolIPv4: - if f.HasIPv4() { - cidrV4 = config.Subnet - gatewayV4 = config.Gateway - } - case kubeovnv1.ProtocolIPv6: - if f.HasIPv6() { - cidrV6 = config.Subnet - gatewayV6 = config.Gateway - } - } - } - cidr = make([]string, 0, 2) - gateway = make([]string, 0, 2) - if f.HasIPv4() { - cidr = append(cidr, cidrV4) - gateway = append(gateway, gatewayV4) - } - if f.HasIPv6() { - cidr = append(cidr, cidrV6) - gateway = append(gateway, gatewayV6) - } - - extraExcludeIPs := make([]string, 0, len(extraNetwork.Containers)*2) - for _, container := range extraNetwork.Containers { - if container.IPv4Address != "" && f.HasIPv4() { - extraExcludeIPs = append(extraExcludeIPs, strings.Split(container.IPv4Address, "/")[0]) - } - if container.IPv6Address != "" && f.HasIPv6() { - extraExcludeIPs = append(extraExcludeIPs, strings.Split(container.IPv6Address, "/")[0]) - } - } - extraVlanSubnetCidr := strings.Join(cidr, ",") - extraVlanSubnetGw := strings.Join(gateway, ",") - underlayExtraSubnet := framework.MakeSubnet(underlayExtraSubnetName, vlanExtraName, extraVlanSubnetCidr, extraVlanSubnetGw, "", "", extraExcludeIPs, nil, nil) - _ = subnetClient.CreateSync(underlayExtraSubnet) - vlanExtraSubnet := subnetClient.Get(underlayExtraSubnetName) - ginkgo.By("Checking extra underlay vlan " + vlanExtraSubnet.Name) - framework.ExpectEqual(vlanExtraSubnet.Spec.Vlan, vlanExtraName) - framework.ExpectNotEqual(vlanExtraSubnet.Spec.CIDRBlock, "") - - ginkgo.By("1.3 Test custom vpc nats using extra centralized external gw") - noBfdExtraSubnetV4Cidr := "192.168.3.0/24" - noBfdExtraSubnetV4Gw := "192.168.3.1" - - cachedVpc := vpcClient.Get(noBfdVpcName) - noBfdVpc = cachedVpc.DeepCopy() - noBfdVpc.Spec.ExtraExternalSubnets = append(noBfdVpc.Spec.ExtraExternalSubnets, underlayExtraSubnetName) - noBfdVpc.Spec.StaticRoutes = append(noBfdVpc.Spec.StaticRoutes, &kubeovnv1.StaticRoute{ - Policy: kubeovnv1.PolicySrc, - CIDR: noBfdExtraSubnetV4Cidr, - NextHopIP: gatewayV4, - }) - _, err = vpcClient.Update(context.Background(), noBfdVpc, metav1.UpdateOptions{}) - framework.ExpectNoError(err) - - ginkgo.By("Creating overlay subnet " + noBfdExtraSubnetName) - noBfdExtraSubnet := framework.MakeSubnet(noBfdExtraSubnetName, "", noBfdExtraSubnetV4Cidr, noBfdExtraSubnetV4Gw, noBfdVpcName, util.OvnProvider, nil, nil, nil) - _ = subnetClient.CreateSync(noBfdExtraSubnet) - - ginkgo.By("Creating pod on nodes") - for _, node := range nodeNames { - // create pod on gw node and worker node - podOnNodeName := fmt.Sprintf("no-bfd-extra-%s", node) - ginkgo.By("Creating no bfd extra pod " + podOnNodeName + " with subnet " + noBfdExtraSubnetName) - annotations := map[string]string{util.LogicalSwitchAnnotation: noBfdExtraSubnetName} - cmd := []string{"sh", "-c", "sleep infinity"} - pod := framework.MakePod(namespaceName, podOnNodeName, nil, annotations, f.KubeOVNImage, cmd, nil) - pod.Spec.NodeName = node - _ = podClient.CreateSync(pod) - } - - ginkgo.By("Creating pod with fip") - annotations = map[string]string{util.LogicalSwitchAnnotation: noBfdExtraSubnetName} - fipPod = framework.MakePod(namespaceName, fipExtraPodName, nil, annotations, f.KubeOVNImage, cmd, nil) - fipPod = podClient.CreateSync(fipPod) - podEip = framework.MakeOvnEip(podExtraEipName, underlayExtraSubnetName, "", "", "", "") - _ = ovnEipClient.CreateSync(podEip) - fipPodIP = ovs.PodNameToPortName(fipPod.Name, fipPod.Namespace, noBfdExtraSubnet.Spec.Provider) - podFip = framework.MakeOvnFip(podExtraFipName, podExtraEipName, "", fipPodIP, "", "") - podFip = ovnFipClient.CreateSync(podFip) - - ginkgo.By("Create snat, dnat, fip for extra centralized external gw") - noBfdlrpEipName = fmt.Sprintf("%s-%s", noBfdVpcName, underlayExtraSubnetName) - noBfdLrpEip = ovnEipClient.Get(noBfdlrpEipName) - lrpEipSnat = framework.MakeOvnSnatRule(lrpExtraEipSnatName, noBfdlrpEipName, noBfdExtraSubnetName, "", "", "") - _ = ovnSnatRuleClient.CreateSync(lrpEipSnat) - ginkgo.By("Get lrp eip snat") - lrpEipSnat = ovnSnatRuleClient.Get(lrpExtraEipSnatName) - ginkgo.By("Check share snat should has the external ip label") - framework.ExpectHaveKeyWithValue(lrpEipSnat.Labels, util.EipV4IpLabel, noBfdLrpEip.Spec.V4Ip) - - ginkgo.By("1.4 Test snat, fip extra external connectivity") - for _, node := range nodeNames { - // all the pods should ping lrp, node br-external ip successfully - podOnNodeName := fmt.Sprintf("no-bfd-extra-%s", node) - pod := podClient.GetPod(podOnNodeName) - ginkgo.By("Test pod ping lrp eip " + noBfdlrpEipName) - command := fmt.Sprintf("ping -W 1 -c 1 %s", noBfdLrpEip.Status.V4Ip) - stdOutput, errOutput, err := framework.ExecShellInPod(context.Background(), f, pod.Namespace, pod.Name, command) - framework.Logf("output from exec on client pod %s dest lrp ip %s\n", pod.Name, noBfdLrpEip.Name) - if stdOutput != "" && err == nil { - framework.Logf("output:\n%s", stdOutput) - } - framework.Logf("exec %s failed err: %v, errOutput: %s, stdOutput: %s", command, err, errOutput, stdOutput) - - ginkgo.By("Test pod ping pod fip " + podFip.Status.V4Eip) - command = fmt.Sprintf("ping -W 1 -c 1 %s", podFip.Status.V4Eip) - stdOutput, errOutput, err = framework.ExecShellInPod(context.Background(), f, pod.Namespace, pod.Name, command) - framework.Logf("output from exec on client pod %s dst fip %s\n", pod.Name, noBfdLrpEip.Name) - if stdOutput != "" && err == nil { - framework.Logf("output:\n%s", stdOutput) - } - framework.Logf("exec %s failed err: %v, errOutput: %s, stdOutput: %s", command, err, errOutput, stdOutput) - - ginkgo.By("Test pod ping node provider bridge ip " + strings.Join(extraProviderBridgeIps, ",")) - framework.Logf("Pod can not ping bridge ip through extra external subnet in Kind test") - for _, ip := range extraProviderBridgeIps { - command := fmt.Sprintf("ping -W 1 -c 1 %s", ip) - stdOutput, errOutput, err = framework.ExecShellInPod(context.Background(), f, pod.Namespace, pod.Name, command) - framework.Logf("output from exec on client pod %s dest node ip %s\n", pod.Name, ip) - if stdOutput != "" && err == nil { - framework.Logf("output:\n%s", stdOutput) - } - } - framework.Logf("exec %s failed err: %v, errOutput: %s, stdOutput: %s", command, err, errOutput, stdOutput) - } - - // nat with ip crd name and share the same external eip tests all passed - ginkgo.By("2. Test custom vpc with bfd route") - ginkgo.By("2.1 Test custom vpc dnat, fip, snat in traditonal way") - ginkgo.By("Create dnat, fip, snat with eip name and ip or ip cidr") - - for _, nodeName := range nodeNames { - ginkgo.By("Creating ovn node-ext-gw type eip on node " + nodeName) - eip := makeOvnEip(nodeName, underlaySubnetName, "", "", "", util.OvnEipTypeLSP) - _ = ovnEipClient.CreateSync(eip) - } - - // TODO:// ipv6, dual stack support - bfdSubnetV4Cidr := "192.168.1.0/24" - bfdSubnetV4Gw := "192.168.1.1" - enableBfd := true - bfdVpc := framework.MakeVpc(bfdVpcName, "", enableExternal, enableBfd, nil) - _ = vpcClient.CreateSync(bfdVpc) - ginkgo.By("Creating overlay subnet enable ecmp") - bfdSubnet := framework.MakeSubnet(bfdSubnetName, "", bfdSubnetV4Cidr, bfdSubnetV4Gw, bfdVpcName, util.OvnProvider, nil, nil, nil) - bfdSubnet.Spec.EnableEcmp = true - _ = subnetClient.CreateSync(bfdSubnet) - - // TODO:// support vip type allowed address pair while use security group - - ginkgo.By("Test ovn fip with eip name and ip") - ginkgo.By("Creating ovn vip " + ipFipVipName) - ipFipVip := makeOvnVip(namespaceName, ipFipVipName, bfdSubnetName, "", "", "") - ipFipVip = vipClient.CreateSync(ipFipVip) - framework.ExpectNotEmpty(ipFipVip.Status.V4ip) - ginkgo.By("Creating ovn eip " + ipFipEipName) - ipFipEip := makeOvnEip(ipFipEipName, underlaySubnetName, "", "", "", util.OvnEipTypeNAT) - ipFipEip = ovnEipClient.CreateSync(ipFipEip) - framework.ExpectNotEmpty(ipFipEip.Status.V4Ip) - ginkgo.By("Creating ovn fip " + ipFipName) - ipFip := makeOvnFip(fipName, ipFipEipName, "", "", bfdVpcName, ipFipVip.Status.V4ip) - ipFip = ovnFipClient.CreateSync(ipFip) - framework.ExpectEqual(ipFip.Status.V4Eip, ipFipEip.Status.V4Ip) - framework.ExpectNotEmpty(ipFip.Status.V4Ip) - - ginkgo.By("Test ovn dnat with eip name and ip") - ginkgo.By("Creating ovn vip " + ipDnatVipName) - ipDnatVip := makeOvnVip(namespaceName, ipDnatVipName, bfdSubnetName, "", "", "") - ipDnatVip = vipClient.CreateSync(ipDnatVip) - framework.ExpectNotEmpty(ipDnatVip.Status.V4ip) - ginkgo.By("Creating ovn eip " + ipDnatEipName) - ipDnatEip := makeOvnEip(ipDnatEipName, underlaySubnetName, "", "", "", util.OvnEipTypeNAT) - ipDnatEip = ovnEipClient.CreateSync(ipDnatEip) - framework.ExpectNotEmpty(ipDnatEip.Status.V4Ip) - ginkgo.By("Creating ovn dnat " + ipDnatName) - ipDnat := makeOvnDnat(ipDnatName, ipDnatEipName, "", "", bfdVpcName, ipDnatVip.Status.V4ip, "80", "8080", "tcp") - ipDnat = ovnDnatRuleClient.CreateSync(ipDnat) - framework.ExpectEqual(ipDnat.Status.Vpc, bfdVpcName) - framework.ExpectEqual(ipDnat.Status.V4Eip, ipDnatEip.Status.V4Ip) - framework.ExpectEqual(ipDnat.Status.V4Ip, ipDnatVip.Status.V4ip) - - ginkgo.By("Test ovn snat with eip name and ip cidr") - ginkgo.By("Creating ovn eip " + cidrSnatEipName) - cidrSnatEip := makeOvnEip(cidrSnatEipName, underlaySubnetName, "", "", "", "") - cidrSnatEip = ovnEipClient.CreateSync(cidrSnatEip) - framework.ExpectNotEmpty(cidrSnatEip.Status.V4Ip) - ginkgo.By("Creating ovn snat mapping with subnet cidr" + bfdSubnetV4Cidr) - cidrSnat := makeOvnSnat(cidrSnatName, cidrSnatEipName, "", "", bfdVpcName, bfdSubnetV4Cidr) - cidrSnat = ovnSnatRuleClient.CreateSync(cidrSnat) - framework.ExpectEqual(cidrSnat.Status.Vpc, bfdVpcName) - framework.ExpectEqual(cidrSnat.Status.V4Eip, cidrSnatEip.Status.V4Ip) - framework.ExpectEqual(cidrSnat.Status.V4IpCidr, bfdSubnetV4Cidr) - - ginkgo.By("Test ovn snat with eip name and ip") - ginkgo.By("Creating ovn vip " + ipSnatVipName) - ipSnatVip := makeOvnVip(namespaceName, ipSnatVipName, bfdSubnetName, "", "", "") - ipSnatVip = vipClient.CreateSync(ipSnatVip) - framework.ExpectNotEmpty(ipSnatVip.Status.V4ip) - ginkgo.By("Creating ovn eip " + ipSnatEipName) - ipSnatEip := makeOvnEip(ipSnatEipName, underlaySubnetName, "", "", "", "") - ipSnatEip = ovnEipClient.CreateSync(ipSnatEip) - framework.ExpectNotEmpty(ipSnatEip.Status.V4Ip) - ginkgo.By("Creating ovn snat " + ipSnatName) - ipSnat := makeOvnSnat(ipSnatName, ipSnatEipName, "", "", bfdVpcName, ipSnatVip.Status.V4ip) - ipSnat = ovnSnatRuleClient.CreateSync(ipSnat) - framework.ExpectEqual(ipSnat.Status.Vpc, bfdVpcName) - framework.ExpectEqual(ipSnat.Status.V4IpCidr, ipSnatVip.Status.V4ip) - - k8sNodes, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - for _, node := range k8sNodes.Items { - // label should be true after setup node external gw - framework.ExpectHaveKeyWithValue(node.Labels, util.NodeExtGwLabel, "true") - } - // make sure vpc has bfd external static routes - bfdVpc = vpcClient.Get(bfdVpcName) - for _, route := range bfdVpc.Spec.StaticRoutes { - framework.ExpectEqual(route.RouteTable, util.MainRouteTable) - framework.ExpectEqual(route.ECMPMode, util.StaticRouteBfdEcmp) - framework.ExpectEqual(route.Policy, kubeovnv1.PolicySrc) - framework.ExpectNotEmpty(route.CIDR) - } - - for _, node := range nodeNames { - podOnNodeName := fmt.Sprintf("bfd-%s", node) - ginkgo.By("Creating bfd pod " + podOnNodeName + " with subnet " + bfdSubnetName) - annotations := map[string]string{util.LogicalSwitchAnnotation: bfdSubnetName} - cmd := []string{"sh", "-c", "sleep infinity"} - pod := framework.MakePod(namespaceName, podOnNodeName, nil, annotations, f.KubeOVNImage, cmd, nil) - pod.Spec.NodeName = node - _ = podClient.CreateSync(pod) - } - ginkgo.By("3. Updating config map ovn-external-gw-config for distributed case") - cmData = map[string]string{ - "enable-external-gw": "true", - "external-gw-nodes": externalGwNodes, - "type": kubeovnv1.GWDistributedType, - "external-gw-nic": "eth1", - "external-gw-addr": strings.Join(cidr, ","), - } - // TODO:// external-gw-nodes could be auto managed by recognizing gw chassis node which has the external-gw-nic - configMap = &corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: util.ExternalGatewayConfig, - Namespace: framework.KubeOvnNamespace, - }, - Data: cmData, - } - _, err = cs.CoreV1().ConfigMaps(framework.KubeOvnNamespace).Update(context.Background(), configMap, metav1.UpdateOptions{}) - framework.ExpectNoError(err, "failed to update ConfigMap") - - ginkgo.By("Getting kind nodes") - nodes, err := kind.ListNodes(clusterName, "") - framework.ExpectNoError(err, "getting nodes in kind cluster") - framework.ExpectNotEmpty(nodes) - ginkgo.By("4. Creating crd in distributed case") - for _, node := range nodeNames { - podOnNodeName := fmt.Sprintf("bfd-%s", node) - eipOnNodeName := fmt.Sprintf("eip-on-node-%s", node) - fipOnNodeName := fmt.Sprintf("fip-on-node-%s", node) - ipName := ovs.PodNameToPortName(podOnNodeName, namespaceName, bfdSubnet.Spec.Provider) - ginkgo.By("Get pod ip" + ipName) - ip := ipClient.Get(ipName) - ginkgo.By("Creating ovn eip " + eipOnNodeName) - eipOnNode := makeOvnEip(eipOnNodeName, underlaySubnetName, "", "", "", "") - _ = ovnEipClient.CreateSync(eipOnNode) - ginkgo.By("Creating ovn fip " + fipOnNodeName) - fip := makeOvnFip(fipOnNodeName, eipOnNodeName, "", ip.Name, "", "") - _ = ovnFipClient.CreateSync(fip) - } - // wait here to have an insight into all the ovn nat resources - ginkgo.By("5. Deleting pod") - for _, node := range nodeNames { - podOnNodeName := fmt.Sprintf("bfd-%s", node) - ginkgo.By("Deleting pod " + podOnNodeName) - podClient.DeleteSync(podOnNodeName) - podOnNodeName = fmt.Sprintf("no-bfd-%s", node) - ginkgo.By("Deleting pod " + podOnNodeName) - podClient.DeleteSync(podOnNodeName) - podOnNodeName = fmt.Sprintf("no-bfd-extra-%s", node) - ginkgo.By("Deleting pod " + podOnNodeName) - podClient.DeleteSync(podOnNodeName) - } - - ginkgo.By("6. Deleting crd in distributed case") - for _, node := range nodeNames { - ginkgo.By("Deleting node external gw ovn eip " + node) - ovnEipClient.DeleteSync(node) - podOnNodeName := fmt.Sprintf("on-node-%s", node) - eipOnNodeName := fmt.Sprintf("eip-on-node-%s", node) - fipOnNodeName := fmt.Sprintf("fip-on-node-%s", node) - ginkgo.By("Deleting node ovn fip " + fipOnNodeName) - ovnFipClient.DeleteSync(fipOnNodeName) - ginkgo.By("Deleting node ovn eip " + eipOnNodeName) - ovnEipClient.DeleteSync(eipOnNodeName) - ipName := ovs.PodNameToPortName(podOnNodeName, namespaceName, bfdSubnet.Spec.Provider) - ginkgo.By("Deleting pod ip" + ipName) - ipClient.DeleteSync(ipName) - } - - ginkgo.By("Disable ovn eip snat external gateway") - ginkgo.By("Deleting configmap") - err = cs.CoreV1().ConfigMaps(configMap.Namespace).Delete(context.Background(), configMap.Name, metav1.DeleteOptions{}) - framework.ExpectNoError(err, "failed to delete ConfigMap") - - lrpEipName := fmt.Sprintf("%s-%s", bfdVpcName, underlaySubnetName) - ginkgo.By("Deleting ovn eip " + lrpEipName) - ovnEipClient.DeleteSync(lrpEipName) - - defaultVpcLrpEipName := fmt.Sprintf("%s-%s", util.DefaultVpc, underlaySubnetName) - ginkgo.By("Deleting ovn eip " + defaultVpcLrpEipName) - ovnEipClient.DeleteSync(defaultVpcLrpEipName) - - k8sNodes, err = e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - time.Sleep(5 * time.Second) - for _, node := range k8sNodes.Items { - // label should be false after remove node external gw - framework.ExpectHaveKeyWithValue(node.Labels, util.NodeExtGwLabel, "false") - } - }) -}) - -func init() { - klog.SetOutput(ginkgo.GinkgoWriter) - - // Register flags. - config.CopyFlags(config.Flags, flag.CommandLine) - k8sframework.RegisterCommonFlags(flag.CommandLine) - k8sframework.RegisterClusterFlags(flag.CommandLine) -} - -func TestE2E(t *testing.T) { - k8sframework.AfterReadingAllFlags(&k8sframework.TestContext) - e2e.RunE2ETests(t) -} diff --git a/test/e2e/security/e2e_test.go b/test/e2e/security/e2e_test.go deleted file mode 100644 index 6f3cf5daf17..00000000000 --- a/test/e2e/security/e2e_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package security - -import ( - "bytes" - "context" - "flag" - "fmt" - "net" - "strconv" - "strings" - "testing" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/klog/v2" - "k8s.io/kubernetes/test/e2e" - k8sframework "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/config" - "k8s.io/kubernetes/test/e2e/framework/deployment" - e2enode "k8s.io/kubernetes/test/e2e/framework/node" - - "github.com/onsi/ginkgo/v2" - - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -func init() { - klog.SetOutput(ginkgo.GinkgoWriter) - - // Register flags. - config.CopyFlags(config.Flags, flag.CommandLine) - k8sframework.RegisterCommonFlags(flag.CommandLine) - k8sframework.RegisterClusterFlags(flag.CommandLine) -} - -func TestE2E(t *testing.T) { - k8sframework.AfterReadingAllFlags(&k8sframework.TestContext) - e2e.RunE2ETests(t) -} - -func checkDeployment(f *framework.Framework, name, process string, ports ...string) { - ginkgo.GinkgoHelper() - - ginkgo.By("Getting deployment " + name) - deploy, err := f.ClientSet.AppsV1().Deployments(framework.KubeOvnNamespace).Get(context.TODO(), name, metav1.GetOptions{}) - framework.ExpectNoError(err, "failed to to get deployment") - err = deployment.WaitForDeploymentComplete(f.ClientSet, deploy) - framework.ExpectNoError(err, "deployment failed to complete") - - ginkgo.By("Getting pods") - pods, err := deployment.GetPodsForDeployment(context.Background(), f.ClientSet, deploy) - framework.ExpectNoError(err, "failed to get pods") - framework.ExpectNotEmpty(pods.Items) - - checkPods(f, pods.Items, process, ports...) -} - -func checkPods(f *framework.Framework, pods []corev1.Pod, process string, ports ...string) { - ginkgo.GinkgoHelper() - - ginkgo.By("Parsing environment variable") - var envValue string - for _, env := range pods[0].Spec.Containers[0].Env { - if env.Name == "ENABLE_BIND_LOCAL_IP" { - envValue = env.Value - break - } - } - if envValue == "" { - envValue = "false" - } - listenPodIP, err := strconv.ParseBool(envValue) - framework.ExpectNoError(err) - - if listenPodIP && len(pods[0].Status.PodIPs) != 1 && (process != "ovsdb-server" || f.VersionPriorTo(1, 12)) { - // ovn db processes support listening on both ipv4 and ipv6 addresses in versions >= 1.12 - listenPodIP = false - } - - ginkgo.By("Validating " + process + " listen addresses") - cmd := fmt.Sprintf(`ss -Hntpl | grep -wE pid=$(pidof %s | sed "s/ /|pid=/g") | awk '{print $4}'`, process) - if len(ports) != 0 { - cmd += fmt.Sprintf(`| grep -E ':%s$'`, strings.Join(ports, `$|:`)) - } - for _, pod := range pods { - stdout, _, err := framework.KubectlExec(pod.Namespace, pod.Name, cmd) - framework.ExpectNoError(err) - - listenAddresses := strings.Split(string(bytes.TrimSpace(stdout)), "\n") - if len(ports) != 0 { - expected := make([]string, 0, len(pod.Status.PodIPs)*len(ports)) - for _, port := range ports { - if listenPodIP { - for _, ip := range pod.Status.PodIPs { - expected = append(expected, net.JoinHostPort(ip.IP, port)) - } - } else { - expected = append(expected, net.JoinHostPort("*", port)) - } - } - framework.ExpectConsistOf(listenAddresses, expected) - } else { - podIPPrefix := strings.TrimSuffix(net.JoinHostPort(pod.Status.PodIP, "999"), "999") - for _, addr := range listenAddresses { - if listenPodIP { - framework.ExpectTrue(strings.HasPrefix(addr, podIPPrefix)) - } else { - framework.ExpectTrue(strings.HasPrefix(addr, "*:")) - } - } - } - } -} - -var _ = framework.Describe("[group:security]", func() { - f := framework.NewDefaultFramework("security") - f.SkipNamespaceCreation = true - - var cs clientset.Interface - ginkgo.BeforeEach(func() { - f.SkipVersionPriorTo(1, 9, "Support for listening on Pod IP was introduced in v1.9") - cs = f.ClientSet - }) - - framework.ConformanceIt("ovn db should listen on specified addresses for client connections", func() { - checkDeployment(f, "ovn-central", "ovsdb-server", "6641", "6642") - }) - - framework.ConformanceIt("kube-ovn-controller should listen on specified addresses", func() { - checkDeployment(f, "kube-ovn-controller", "kube-ovn-controller") - }) - - framework.ConformanceIt("kube-ovn-monitor should listen on specified addresses", func() { - checkDeployment(f, "kube-ovn-monitor", "kube-ovn-monitor") - }) - - framework.ConformanceIt("kube-ovn-cni should listen on specified addresses", func() { - ginkgo.By("Getting nodes") - nodeList, err := e2enode.GetReadySchedulableNodes(context.Background(), cs) - framework.ExpectNoError(err) - framework.ExpectNotEmpty(nodeList.Items) - - ginkgo.By("Getting daemonset kube-ovn-cni") - daemonSetClient := f.DaemonSetClientNS(framework.KubeOvnNamespace) - ds := daemonSetClient.Get("kube-ovn-cni") - - ginkgo.By("Getting kube-ovn-cni pods") - pods := make([]corev1.Pod, 0, len(nodeList.Items)) - for _, node := range nodeList.Items { - pod, err := daemonSetClient.GetPodOnNode(ds, node.Name) - framework.ExpectNoError(err, "failed to get kube-ovn-cni pod running on node %s", node.Name) - pods = append(pods, *pod) - } - - checkPods(f, pods, "kube-ovn-daemon") - }) -}) diff --git a/test/e2e/vip/e2e_test.go b/test/e2e/vip/e2e_test.go deleted file mode 100644 index 453ba99c673..00000000000 --- a/test/e2e/vip/e2e_test.go +++ /dev/null @@ -1,397 +0,0 @@ -package vip - -import ( - "context" - "flag" - "fmt" - "sort" - "strings" - "testing" - "time" - - "k8s.io/klog/v2" - "k8s.io/kubernetes/test/e2e" - k8sframework "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/config" - - "github.com/onsi/ginkgo/v2" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -func makeOvnVip(namespaceName, name, subnet, v4ip, v6ip, vipType string) *apiv1.Vip { - return framework.MakeVip(namespaceName, name, subnet, v4ip, v6ip, vipType) -} - -func makeSecurityGroup(name string, allowSameGroupTraffic bool, ingressRules, egressRules []*apiv1.SgRule) *apiv1.SecurityGroup { - return framework.MakeSecurityGroup(name, allowSameGroupTraffic, ingressRules, egressRules) -} - -func testConnectivity(ip, namespaceName, srcPod, dstPod string, f *framework.Framework) { - ginkgo.GinkgoHelper() - - // other pods can communicate with the allow address pair pod through vip - var addIP, delIP, command string - switch util.CheckProtocol(ip) { - case apiv1.ProtocolIPv4: - addIP = fmt.Sprintf("ip addr add %s/24 dev eth0", ip) - delIP = fmt.Sprintf("ip addr del %s/24 dev eth0", ip) - command = fmt.Sprintf("ping -W 1 -c 1 %s", ip) - case apiv1.ProtocolIPv6: - addIP = fmt.Sprintf("ip addr add %s/96 dev eth0", ip) - delIP = fmt.Sprintf("ip addr del %s/96 dev eth0", ip) - command = fmt.Sprintf("ping6 -c 1 %s", ip) - default: - framework.Failf("unexpected ip address: %q", ip) - } - // check srcPod ping dstPod through vip - stdout, stderr, err := framework.ExecShellInPod(context.Background(), f, namespaceName, dstPod, addIP) - framework.ExpectNoError(err, "exec %q failed, err: %q, stderr: %q, stdout: %q", addIP, err, stderr, stdout) - stdout, stderr, err = framework.ExecShellInPod(context.Background(), f, namespaceName, srcPod, command) - framework.ExpectNoError(err, "exec %q failed, err: %q, stderr: %q, stdout: %q", command, err, stderr, stdout) - // srcPod can not ping dstPod vip when ip is deleted - stdout, stderr, err = framework.ExecShellInPod(context.Background(), f, namespaceName, dstPod, delIP) - framework.ExpectNoError(err, "exec %q failed, err: %q, stderr: %q, stdout: %q", delIP, err, stderr, stdout) - _, _, err = framework.ExecShellInPod(context.Background(), f, namespaceName, srcPod, command) - framework.ExpectError(err) - // check dstPod ping srcPod through vip - stdout, stderr, err = framework.ExecShellInPod(context.Background(), f, namespaceName, srcPod, addIP) - framework.ExpectNoError(err, "exec %q failed, err: %q, stderr: %q, stdout: %q", addIP, err, stderr, stdout) - stdout, stderr, err = framework.ExecShellInPod(context.Background(), f, namespaceName, dstPod, command) - framework.ExpectNoError(err, "exec %q failed, err: %q, stderr: %q, stdout: %q", command, err, stderr, stdout) - // dstPod can not ping srcPod vip when ip is deleted - stdout, stderr, err = framework.ExecShellInPod(context.Background(), f, namespaceName, srcPod, delIP) - framework.ExpectNoError(err, "exec %q failed, err: %q, stderr: %q, stdout: %q", delIP, err, stderr, stdout) - _, _, err = framework.ExecShellInPod(context.Background(), f, namespaceName, dstPod, command) - framework.ExpectError(err) -} - -func testVipWithSG(ip, namespaceName, allowPod, denyPod, aapPod, securityGroupName string, f *framework.Framework) { - ginkgo.GinkgoHelper() - - // check if security group working - var sgCheck, conditions string - switch util.CheckProtocol(ip) { - case apiv1.ProtocolIPv4: - sgCheck = fmt.Sprintf("ping -W 1 -c 1 %s", ip) - conditions = fmt.Sprintf("name=ovn.sg.%s.associated.v4", strings.ReplaceAll(securityGroupName, "-", ".")) - case apiv1.ProtocolIPv6: - sgCheck = fmt.Sprintf("ping6 -c 1 %s", ip) - conditions = fmt.Sprintf("name=ovn.sg.%s.associated.v6", strings.ReplaceAll(securityGroupName, "-", ".")) - } - // allowPod can ping aapPod with security group - stdout, stderr, err := framework.ExecShellInPod(context.Background(), f, namespaceName, allowPod, sgCheck) - framework.ExpectNoError(err, "exec %q failed, err: %q, stderr: %q, stdout: %q", sgCheck, err, stderr, stdout) - // denyPod can not ping aapPod with security group - _, _, err = framework.ExecShellInPod(context.Background(), f, namespaceName, denyPod, sgCheck) - framework.ExpectError(err) - - ginkgo.By("Checking ovn address_set and lsp port_security") - // address_set should have allow address pair ip - cmd := "ovn-nbctl --format=list --data=bare --no-heading --columns=addresses find Address_Set " + conditions - output, _, err := framework.NBExec(cmd) - framework.ExpectNoError(err) - addressSet := strings.Split(strings.ReplaceAll(string(output), "\n", ""), " ") - framework.ExpectContainElement(addressSet, ip) - // port_security should have allow address pair IP - cmd = fmt.Sprintf("ovn-nbctl --format=list --data=bare --no-heading --columns=port_security list Logical_Switch_Port %s.%s", aapPod, namespaceName) - output, _, err = framework.NBExec(cmd) - framework.ExpectNoError(err) - portSecurity := strings.Split(strings.ReplaceAll(string(output), "\n", ""), " ") - framework.ExpectContainElement(portSecurity, ip) - // TODO: Checking allow address pair connectivity with security group - // AAP does not work fine with security group in kind test env for now -} - -var _ = framework.Describe("[group:vip]", func() { - f := framework.NewDefaultFramework("vip") - - var vpcClient *framework.VpcClient - var subnetClient *framework.SubnetClient - var vipClient *framework.VipClient - var vpc *apiv1.Vpc - var subnet *apiv1.Subnet - var podClient *framework.PodClient - var securityGroupClient *framework.SecurityGroupClient - var namespaceName, vpcName, subnetName, cidr string - - // test switch lb vip, which ip is in the vpc subnet cidr - // switch lb vip use gw mac to trigger lb nat flows - var switchLbVip1Name, switchLbVip2Name string - - // test allowed address pair vip - var countingVipName, vip1Name, vip2Name, aapPodName1, aapPodName2, aapPodName3 string - - // test ipv6 vip - var lowerCaseStaticIpv6VipName, upperCaseStaticIpv6VipName, lowerCaseV6IP, upperCaseV6IP string - - // test allowed address pair connectivity in the security group scenario - var securityGroupName string - - ginkgo.BeforeEach(func() { - vpcClient = f.VpcClient() - subnetClient = f.SubnetClient() - vipClient = f.VipClient() - podClient = f.PodClient() - securityGroupClient = f.SecurityGroupClient() - namespaceName = f.Namespace.Name - cidr = framework.RandomCIDR(f.ClusterIPFamily) - - // should create lower case static ipv6 address vip in ovn-default - lowerCaseStaticIpv6VipName = "lower-case-static-ipv6-vip-" + framework.RandomSuffix() - lowerCaseV6IP = "fd00:10:16::a1" - // should not create upper case static ipv6 address vip in ovn-default - upperCaseStaticIpv6VipName = "Upper-Case-Static-Ipv6-Vip-" + framework.RandomSuffix() - upperCaseV6IP = "fd00:10:16::A1" - - // should have the same mac, which mac is the same as its vpc overlay subnet gw mac - randomSuffix := framework.RandomSuffix() - switchLbVip1Name = "switch-lb-vip1-" + randomSuffix - switchLbVip2Name = "switch-lb-vip2-" + randomSuffix - - // subnet status counting vip - countingVipName = "counting-vip-" + randomSuffix - - // should have different mac - vip1Name = "vip1-" + randomSuffix - vip2Name = "vip2-" + randomSuffix - - aapPodName1 = "pod1-" + randomSuffix - aapPodName2 = "pod2-" + randomSuffix - aapPodName3 = "pod3-" + randomSuffix - - securityGroupName = "sg-" + randomSuffix - - vpcName = "vpc-" + randomSuffix - subnetName = "subnet-" + randomSuffix - ginkgo.By("Creating vpc " + vpcName) - vpc = framework.MakeVpc(vpcName, "", false, false, []string{namespaceName}) - vpc = vpcClient.CreateSync(vpc) - ginkgo.By("Creating subnet " + subnetName) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", vpcName, "", nil, nil, []string{namespaceName}) - subnet = subnetClient.CreateSync(subnet) - }) - - ginkgo.AfterEach(func() { - ginkgo.By("Deleting switch lb vip " + switchLbVip1Name) - vipClient.DeleteSync(switchLbVip1Name) - ginkgo.By("Deleting switch lb vip " + switchLbVip2Name) - vipClient.DeleteSync(switchLbVip2Name) - ginkgo.By("Deleting allowed address pair vip " + vip1Name) - vipClient.DeleteSync(vip1Name) - ginkgo.By("Deleting allowed address pair vip " + vip2Name) - vipClient.DeleteSync(vip2Name) - - // clean fip pod - ginkgo.By("Deleting pod " + aapPodName1) - podClient.DeleteSync(aapPodName1) - ginkgo.By("Deleting pod " + aapPodName2) - podClient.DeleteSync(aapPodName2) - ginkgo.By("Deleting pod " + aapPodName3) - podClient.DeleteSync(aapPodName3) - ginkgo.By("Deleting subnet " + subnetName) - subnetClient.DeleteSync(subnetName) - ginkgo.By("Deleting vpc " + vpcName) - vpcClient.DeleteSync(vpcName) - // clean security group - ginkgo.By("Deleting security group " + securityGroupName) - securityGroupClient.DeleteSync(securityGroupName) - }) - - framework.ConformanceIt("Test vip", func() { - // f.SkipVersionPriorTo(1, 13, "This feature was introduced in v1.13") - ginkgo.By("0. Test subnet status counting vip") - oldSubnet := subnetClient.Get(subnetName) - countingVip := makeOvnVip(namespaceName, countingVipName, subnetName, "", "", "") - _ = vipClient.CreateSync(countingVip) - time.Sleep(3 * time.Second) - newSubnet := subnetClient.Get(subnetName) - if newSubnet.Spec.Protocol == apiv1.ProtocolIPv4 { - framework.ExpectEqual(oldSubnet.Status.V4AvailableIPs-1, newSubnet.Status.V4AvailableIPs) - framework.ExpectEqual(oldSubnet.Status.V4UsingIPs+1, newSubnet.Status.V4UsingIPs) - framework.ExpectNotEqual(oldSubnet.Status.V4AvailableIPRange, newSubnet.Status.V4AvailableIPRange) - framework.ExpectNotEqual(oldSubnet.Status.V4UsingIPRange, newSubnet.Status.V4UsingIPRange) - } else { - framework.ExpectEqual(oldSubnet.Status.V6AvailableIPs-1, newSubnet.Status.V6AvailableIPs) - framework.ExpectEqual(oldSubnet.Status.V6UsingIPs+1, newSubnet.Status.V6UsingIPs) - framework.ExpectNotEqual(oldSubnet.Status.V6AvailableIPRange, newSubnet.Status.V6AvailableIPRange) - framework.ExpectNotEqual(oldSubnet.Status.V6UsingIPRange, newSubnet.Status.V6UsingIPRange) - } - oldSubnet = newSubnet - // delete counting vip - vipClient.DeleteSync(countingVipName) - time.Sleep(3 * time.Second) - newSubnet = subnetClient.Get(subnetName) - if newSubnet.Spec.Protocol == apiv1.ProtocolIPv4 { - framework.ExpectEqual(oldSubnet.Status.V4AvailableIPs+1, newSubnet.Status.V4AvailableIPs) - framework.ExpectEqual(oldSubnet.Status.V4UsingIPs-1, newSubnet.Status.V4UsingIPs) - framework.ExpectNotEqual(oldSubnet.Status.V4AvailableIPRange, newSubnet.Status.V4AvailableIPRange) - framework.ExpectNotEqual(oldSubnet.Status.V4UsingIPRange, newSubnet.Status.V4UsingIPRange) - } else { - framework.ExpectEqual(oldSubnet.Status.V6AvailableIPs+1, newSubnet.Status.V6AvailableIPs) - framework.ExpectEqual(oldSubnet.Status.V6UsingIPs-1, newSubnet.Status.V6UsingIPs) - framework.ExpectNotEqual(oldSubnet.Status.V6AvailableIPRange, newSubnet.Status.V6AvailableIPRange) - framework.ExpectNotEqual(oldSubnet.Status.V6UsingIPRange, newSubnet.Status.V6UsingIPRange) - } - ginkgo.By("1. Test allowed address pair vip") - if f.IsIPv6() { - ginkgo.By("Should create lower case static ipv6 address vip " + lowerCaseStaticIpv6VipName) - lowerCaseStaticIpv6Vip := makeOvnVip(namespaceName, lowerCaseStaticIpv6VipName, util.DefaultSubnet, "", lowerCaseV6IP, "") - lowerCaseStaticIpv6Vip = vipClient.CreateSync(lowerCaseStaticIpv6Vip) - framework.ExpectEqual(lowerCaseStaticIpv6Vip.Status.V6ip, lowerCaseV6IP) - ginkgo.By("Should not create upper case static ipv6 address vip " + upperCaseStaticIpv6VipName) - upperCaseStaticIpv6Vip := makeOvnVip(namespaceName, upperCaseStaticIpv6VipName, util.DefaultSubnet, "", upperCaseV6IP, "") - _ = vipClient.Create(upperCaseStaticIpv6Vip) - time.Sleep(10 * time.Second) - upperCaseStaticIpv6Vip = vipClient.Get(upperCaseStaticIpv6VipName) - framework.ExpectEqual(upperCaseStaticIpv6Vip.Status.V6ip, "") - framework.ExpectFalse(upperCaseStaticIpv6Vip.Status.Ready) - } - // create vip1 and vip2, should have different ip and mac - ginkgo.By("Creating allowed address pair vip, should have different ip and mac") - ginkgo.By("Creating allowed address pair vip " + vip1Name) - vip1 := makeOvnVip(namespaceName, vip1Name, subnetName, "", "", "") - vip1 = vipClient.CreateSync(vip1) - - ginkgo.By("Creating allowed address pair vip " + vip2Name) - vip2 := makeOvnVip(namespaceName, vip2Name, subnetName, "", "", "") - vip2 = vipClient.CreateSync(vip2) - virtualIP1 := util.GetStringIP(vip1.Status.V4ip, vip1.Status.V6ip) - virtualIP2 := util.GetStringIP(vip2.Status.V4ip, vip2.Status.V6ip) - framework.ExpectNotEqual(virtualIP1, virtualIP2) - framework.ExpectNotEqual(vip1.Status.Mac, vip2.Status.Mac) - - annotations := map[string]string{util.AAPsAnnotation: vip1Name} - cmd := []string{"sh", "-c", "sleep infinity"} - ginkgo.By("Creating pod1 support allowed address pair using " + vip1Name) - aapPod1 := framework.MakePrivilegedPod(namespaceName, aapPodName1, nil, annotations, f.KubeOVNImage, cmd, nil) - aapPod1 = podClient.CreateSync(aapPod1) - ginkgo.By("Creating pod2 support allowed address pair using " + vip1Name) - aapPod2 := framework.MakePrivilegedPod(namespaceName, aapPodName2, nil, annotations, f.KubeOVNImage, cmd, nil) - _ = podClient.CreateSync(aapPod2) - // logical switch port with type virtual should be created - conditions := fmt.Sprintf("type=virtual name=%s options:virtual-ip=%q", vip1Name, virtualIP1) - nbctlCmd := "ovn-nbctl --format=list --data=bare --no-heading --columns=options find logical-switch-port " + conditions - output, _, err := framework.NBExec(nbctlCmd) - framework.ExpectNoError(err) - framework.ExpectNotEmpty(strings.TrimSpace(string(output))) - // virtual parents should be set correctlly - pairs := strings.Split(string(output), " ") - options := make(map[string]string) - for _, pair := range pairs { - keyValue := strings.Split(pair, "=") - if len(keyValue) == 2 { - options[keyValue[0]] = strings.ReplaceAll(keyValue[1], "\n", "") - } - } - virtualParents := strings.Split(options["virtual-parents"], ",") - sort.Strings(virtualParents) - expectVirtualParents := []string{fmt.Sprintf("%s.%s", aapPodName1, namespaceName), fmt.Sprintf("%s.%s", aapPodName2, namespaceName)} - sort.Strings(expectVirtualParents) - framework.ExpectEqual(expectVirtualParents, virtualParents) - - ginkgo.By("Test allow address pair connectivity") - if f.HasIPv4() { - ginkgo.By("Test pod ping allow address pair " + vip1.Status.V4ip) - testConnectivity(vip1.Status.V4ip, namespaceName, aapPodName2, aapPodName1, f) - } - if f.HasIPv6() { - ginkgo.By("Test pod ping allow address pair " + vip1.Status.V6ip) - testConnectivity(vip1.Status.V6ip, namespaceName, aapPodName2, aapPodName1, f) - } - - ginkgo.By("3. Test vip with security group") - ginkgo.By("Creating security group " + securityGroupName) - gatewayV4, gatewayV6 := util.SplitStringIP(aapPod1.Annotations[util.GatewayAnnotation]) - allowAddressV4, allowAddressV6 := util.SplitStringIP(aapPod1.Annotations[util.IPAddressAnnotation]) - rules := make([]*apiv1.SgRule, 0) - if f.HasIPv4() { - // gateway should be added for pinger - rules = append(rules, &apiv1.SgRule{ - IPVersion: "ipv4", - Protocol: apiv1.ProtocolALL, - Priority: 1, - RemoteType: apiv1.SgRemoteTypeAddress, - RemoteAddress: gatewayV4, - Policy: apiv1.PolicyAllow, - }) - // aapPod1 should be allowed by aapPod3 for security group allow address pair test - rules = append(rules, &apiv1.SgRule{ - IPVersion: "ipv4", - Protocol: apiv1.ProtocolALL, - Priority: 1, - RemoteType: apiv1.SgRemoteTypeAddress, - RemoteAddress: allowAddressV4, - Policy: apiv1.PolicyAllow, - }) - } - if f.HasIPv6() { - // gateway should be added for pinger - rules = append(rules, &apiv1.SgRule{ - IPVersion: "ipv6", - Protocol: apiv1.ProtocolALL, - Priority: 1, - RemoteType: apiv1.SgRemoteTypeAddress, - RemoteAddress: gatewayV6, - Policy: apiv1.PolicyAllow, - }) - // aapPod1 should be allowed by aapPod3 for security group allow address pair test - rules = append(rules, &apiv1.SgRule{ - IPVersion: "ipv6", - Protocol: apiv1.ProtocolALL, - Priority: 1, - RemoteType: apiv1.SgRemoteTypeAddress, - RemoteAddress: allowAddressV6, - Policy: apiv1.PolicyAllow, - }) - } - sg := makeSecurityGroup(securityGroupName, true, rules, rules) - _ = securityGroupClient.CreateSync(sg) - - ginkgo.By("Creating pod3 support allowed address pair with security group") - annotations[util.PortSecurityAnnotation] = "true" - annotations[fmt.Sprintf(util.SecurityGroupAnnotationTemplate, "ovn")] = securityGroupName - aapPod3 := framework.MakePod(namespaceName, aapPodName3, nil, annotations, f.KubeOVNImage, cmd, nil) - aapPod3 = podClient.CreateSync(aapPod3) - v4ip, v6ip := util.SplitStringIP(aapPod3.Annotations[util.IPAddressAnnotation]) - if f.HasIPv4() { - ginkgo.By("Test allow address pair with security group for ipv4") - testVipWithSG(v4ip, namespaceName, aapPodName1, aapPodName2, aapPodName3, securityGroupName, f) - } - if f.HasIPv6() { - ginkgo.By("Test allow address pair with security group for ipv6") - testVipWithSG(v6ip, namespaceName, aapPodName1, aapPodName2, aapPodName3, securityGroupName, f) - } - - ginkgo.By("3. Test switch lb vip") - ginkgo.By("Creating two arp proxy vips, should have the same mac which is from gw subnet mac") - ginkgo.By("Creating arp proxy switch lb vip " + switchLbVip1Name) - switchLbVip1 := makeOvnVip(namespaceName, switchLbVip1Name, subnetName, "", "", util.SwitchLBRuleVip) - switchLbVip1 = vipClient.CreateSync(switchLbVip1) - ginkgo.By("Creating arp proxy switch lb vip " + switchLbVip2Name) - switchLbVip2 := makeOvnVip(namespaceName, switchLbVip2Name, subnetName, "", "", util.SwitchLBRuleVip) - switchLbVip2 = vipClient.CreateSync(switchLbVip2) - // arp proxy vip only used in switch lb rule, the lb vip use the subnet gw mac to use lb nat flow - framework.ExpectEqual(switchLbVip1.Status.Mac, switchLbVip2.Status.Mac) - if vip1.Status.V4ip != "" { - framework.ExpectNotEqual(vip1.Status.V4ip, vip2.Status.V4ip) - } else { - framework.ExpectNotEqual(vip1.Status.V6ip, vip2.Status.V6ip) - } - }) -}) - -func init() { - klog.SetOutput(ginkgo.GinkgoWriter) - // Register flags. - config.CopyFlags(config.Flags, flag.CommandLine) - k8sframework.RegisterCommonFlags(flag.CommandLine) - k8sframework.RegisterClusterFlags(flag.CommandLine) -} - -func TestE2E(t *testing.T) { - k8sframework.AfterReadingAllFlags(&k8sframework.TestContext) - e2e.RunE2ETests(t) -} diff --git a/test/e2e/webhook/e2e_test.go b/test/e2e/webhook/e2e_test.go deleted file mode 100644 index 5fa0ec1b93f..00000000000 --- a/test/e2e/webhook/e2e_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package webhook - -import ( - "flag" - "testing" - - "k8s.io/klog/v2" - "k8s.io/kubernetes/test/e2e" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/config" - - "github.com/onsi/ginkgo/v2" - - // Import tests. - _ "github.com/kubeovn/kube-ovn/test/e2e/webhook/pod" - _ "github.com/kubeovn/kube-ovn/test/e2e/webhook/subnet" - _ "github.com/kubeovn/kube-ovn/test/e2e/webhook/vip" -) - -func init() { - klog.SetOutput(ginkgo.GinkgoWriter) - - // Register flags. - config.CopyFlags(config.Flags, flag.CommandLine) - framework.RegisterCommonFlags(flag.CommandLine) - framework.RegisterClusterFlags(flag.CommandLine) -} - -func TestE2E(t *testing.T) { - framework.AfterReadingAllFlags(&framework.TestContext) - e2e.RunE2ETests(t) -} diff --git a/test/e2e/webhook/pod/pod.go b/test/e2e/webhook/pod/pod.go deleted file mode 100644 index 5791f9461e2..00000000000 --- a/test/e2e/webhook/pod/pod.go +++ /dev/null @@ -1,117 +0,0 @@ -package pod - -import ( - "context" - "fmt" - "math/big" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/onsi/ginkgo/v2" - - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -var _ = framework.Describe("[group:webhook-pod]", func() { - f := framework.NewDefaultFramework("webhook-pod") - - var podClient *framework.PodClient - var subnetClient *framework.SubnetClient - var namespaceName, subnetName, podName string - var cidr, conflictName, firstIPv4, lastIPv4 string - - ginkgo.BeforeEach(func() { - podClient = f.PodClient() - subnetClient = f.SubnetClient() - namespaceName = f.Namespace.Name - subnetName = "subnet-" + framework.RandomSuffix() - podName = "pod-" + framework.RandomSuffix() - conflictName = podName + "-conflict" - cidr = framework.RandomCIDR(f.ClusterIPFamily) - cidrV4, _ := util.SplitStringIP(cidr) - if cidrV4 == "" { - firstIPv4 = "" - lastIPv4 = "" - } else { - firstIPv4, _ = util.FirstIP(cidrV4) - lastIPv4, _ = util.LastIP(cidrV4) - } - - ginkgo.By("Creating subnet " + subnetName) - subnet := framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, []string{namespaceName}) - _ = subnetClient.CreateSync(subnet) - }) - ginkgo.AfterEach(func() { - ginkgo.By("Deleting pod " + podName) - podClient.DeleteSync(podName) - - ginkgo.By("Deleting pod " + conflictName) - podClient.DeleteSync(conflictName) - - ginkgo.By("Deleting subnet " + subnetName) - subnetClient.DeleteSync(subnetName) - }) - - framework.ConformanceIt("validate static ip by pod annotation", func() { - ginkgo.By("Creating pod " + podName) - cmd := []string{"sh", "-c", "sleep infinity"} - - ginkgo.By("validate ip validation") - annotations := map[string]string{ - util.IPAddressAnnotation: "10.10.10.10.10", - } - pod := framework.MakePod(namespaceName, podName, nil, annotations, f.KubeOVNImage, cmd, nil) - _, err := podClient.PodInterface.Create(context.TODO(), pod, metav1.CreateOptions{}) - framework.ExpectError(err, "ip %s is not a valid %s", annotations[util.IPAddressAnnotation], util.IPAddressAnnotation) - - ginkgo.By("validate pod ip not in subnet cidr") - staticIP := util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(lastIPv4), big.NewInt(10))) - annotations = map[string]string{ - util.CidrAnnotation: cidr, - util.IPAddressAnnotation: staticIP, - } - framework.Logf("validate ip not in subnet range, cidr %s, staticip %s", cidr, staticIP) - pod.Annotations = annotations - - _, err = podClient.PodInterface.Create(context.TODO(), pod, metav1.CreateOptions{}) - framework.ExpectError(err, "%s not in cidr %s", staticIP, cidr) - - ginkgo.By("validate pod ippool not in subnet cidr") - startIP := util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(lastIPv4), big.NewInt(10))) - endIP := util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(lastIPv4), big.NewInt(20))) - ipPool := startIP + "," + endIP - annotations = map[string]string{ - util.CidrAnnotation: cidr, - util.IPPoolAnnotation: ipPool, - } - framework.Logf("validate ippool not in subnet range, cidr %s, ippool %s", cidr, ipPool) - pod.Annotations = annotations - _, err = podClient.PodInterface.Create(context.TODO(), pod, metav1.CreateOptions{}) - framework.ExpectError(err, "%s not in cidr %s", ipPool, cidr) - - ginkgo.By("validate pod static ip success") - staticIP = util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(firstIPv4), big.NewInt(10))) - annotations = map[string]string{ - util.LogicalSwitchAnnotation: subnetName, - util.CidrAnnotation: cidr, - util.IPAddressAnnotation: staticIP, - } - pod.Annotations = annotations - _ = podClient.CreateSync(pod) - ipCR := podName + "." + namespaceName - - framework.WaitUntil(2*time.Second, time.Minute, func(ctx context.Context) (bool, error) { - checkPod, err := podClient.PodInterface.Get(ctx, podName, metav1.GetOptions{}) - framework.ExpectNoError(err) - return checkPod.Annotations[util.RoutedAnnotation] == "true", nil - }, fmt.Sprintf("pod's annotation %s is true", util.RoutedAnnotation)) - - ginkgo.By("validate pod ip conflict") - framework.Logf("validate ip conflict, pod %s, ip cr %s, conflict pod %s", podName, ipCR, conflictName) - conflictPod := framework.MakePod(namespaceName, conflictName, nil, annotations, f.KubeOVNImage, cmd, nil) - _, err = podClient.PodInterface.Create(context.TODO(), conflictPod, metav1.CreateOptions{}) - framework.ExpectError(err, "annotation static-ip %s is conflict with ip crd %s, ip %s", staticIP, ipCR, staticIP) - }) -}) diff --git a/test/e2e/webhook/subnet/subnet.go b/test/e2e/webhook/subnet/subnet.go deleted file mode 100644 index e56dcc70032..00000000000 --- a/test/e2e/webhook/subnet/subnet.go +++ /dev/null @@ -1,93 +0,0 @@ -package subnet - -import ( - "context" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/onsi/ginkgo/v2" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -var _ = framework.Describe("[group:webhook-subnet]", func() { - f := framework.NewDefaultFramework("webhook-subnet") - - var subnetName, cidr, cidrV4, cidrV6, firstIPv4, firstIPv6 string - var gateways []string - var subnetClient *framework.SubnetClient - - ginkgo.BeforeEach(func() { - subnetClient = f.SubnetClient() - subnetName = "subnet-" + framework.RandomSuffix() - cidr = framework.RandomCIDR(f.ClusterIPFamily) - cidrV4, cidrV6 = util.SplitStringIP(cidr) - gateways = nil - - if cidrV4 == "" { - firstIPv4 = "" - } else { - firstIPv4, _ = util.FirstIP(cidrV4) - gateways = append(gateways, firstIPv4) - } - if cidrV6 == "" { - firstIPv6 = "" - } else { - firstIPv6, _ = util.FirstIP(cidrV6) - gateways = append(gateways, firstIPv6) - } - }) - ginkgo.AfterEach(func() { - ginkgo.By("Deleting subnet " + subnetName) - subnetClient.DeleteSync(subnetName) - }) - - framework.ConformanceIt("check create subnet with different errors", func() { - ginkgo.By("Creating subnet " + subnetName) - subnet := framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, nil) - - ginkgo.By("Validating subnet gateway") - subnet.Spec.Gateway = "100.16.0.1" - _, err := subnetClient.SubnetInterface.Create(context.TODO(), subnet, metav1.CreateOptions{}) - framework.ExpectError(err, "gateway %s is not in cidr %s", subnet.Spec.Gateway, subnet.Spec.CIDRBlock) - - ginkgo.By("Validating subnet cidr conflict with known addresses") - subnet.Spec.Gateway = "" - subnet.Spec.CIDRBlock = util.IPv4Loopback - _, err = subnetClient.SubnetInterface.Create(context.TODO(), subnet, metav1.CreateOptions{}) - framework.ExpectError(err, "%s conflict with v4 loopback cidr %s", subnet.Spec.CIDRBlock, util.IPv4Loopback) - - ginkgo.By("Validating subnet excludeIPs") - subnet.Spec.CIDRBlock = cidr - ipr := "10.1.1.11..10.1.1.30..10.1.1.50" - subnet.Spec.ExcludeIps = []string{ipr} - _, err = subnetClient.SubnetInterface.Create(context.TODO(), subnet, metav1.CreateOptions{}) - framework.ExpectError(err, "%s in excludeIps is not a valid ip range", ipr) - - ginkgo.By("Validating subnet gateway type") - subnet.Spec.ExcludeIps = []string{} - subnet.Spec.GatewayType = "test" - _, err = subnetClient.SubnetInterface.Create(context.TODO(), subnet, metav1.CreateOptions{}) - framework.ExpectError(err, "%s is not a valid gateway type", subnet.Spec.GatewayType) - - ginkgo.By("Validating subnet protocol") - subnet.Spec.GatewayType = apiv1.GWDistributedType - subnet.Spec.Protocol = "test" - _, err = subnetClient.SubnetInterface.Create(context.TODO(), subnet, metav1.CreateOptions{}) - framework.ExpectError(err, "%s is not a valid protocol", subnet.Spec.Protocol) - - ginkgo.By("Validating subnet allowSubnets") - subnet.Spec.Protocol = "" - subnet.Spec.AllowSubnets = []string{"10.1.1.302/24"} - _, err = subnetClient.SubnetInterface.Create(context.TODO(), subnet, metav1.CreateOptions{}) - framework.ExpectError(err, "%s in allowSubnets is not a valid address", subnet.Spec.AllowSubnets[0]) - - ginkgo.By("Validating subnet cidr") - subnet.Spec.AllowSubnets = []string{} - subnet.Spec.CIDRBlock = "10.1.1.32/24," - _, err = subnetClient.SubnetInterface.Create(context.TODO(), subnet, metav1.CreateOptions{}) - framework.ExpectError(err, "subnet %s cidr %s is invalid", subnet.Name, subnet.Spec.CIDRBlock) - }) -}) diff --git a/test/e2e/webhook/vip/vip.go b/test/e2e/webhook/vip/vip.go deleted file mode 100644 index d8c89270cc0..00000000000 --- a/test/e2e/webhook/vip/vip.go +++ /dev/null @@ -1,86 +0,0 @@ -package vip - -import ( - "context" - "math/big" - - "github.com/onsi/ginkgo/v2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - apiv1 "github.com/kubeovn/kube-ovn/pkg/apis/kubeovn/v1" - "github.com/kubeovn/kube-ovn/pkg/util" - "github.com/kubeovn/kube-ovn/test/e2e/framework" -) - -var _ = framework.Describe("[group:webhook-vip]", func() { - f := framework.NewDefaultFramework("webhook-vip") - - var vip *apiv1.Vip - var subnet *apiv1.Subnet - var vipClient *framework.VipClient - var subnetClient *framework.SubnetClient - var vipName, subnetName, namespaceName string - var cidr, lastIPv4 string - - ginkgo.BeforeEach(func() { - subnetClient = f.SubnetClient() - subnetName = "subnet-" + framework.RandomSuffix() - cidr = framework.RandomCIDR(f.ClusterIPFamily) - cidrV4, _ := util.SplitStringIP(cidr) - if cidrV4 == "" { - lastIPv4 = "" - } else { - lastIPv4, _ = util.LastIP(cidrV4) - } - - vipClient = f.VipClient() - subnetClient = f.SubnetClient() - vipName = "vip-" + framework.RandomSuffix() - namespaceName = f.Namespace.Name - - ginkgo.By("Creating subnet " + subnetName) - subnet = framework.MakeSubnet(subnetName, "", cidr, "", "", "", nil, nil, []string{namespaceName}) - subnet = subnetClient.CreateSync(subnet) - }) - - ginkgo.AfterEach(func() { - ginkgo.By("Deleting vip " + vipName) - vipClient.Delete(vipName) - - ginkgo.By("Deleting subnet " + subnetName) - subnetClient.DeleteSync(subnetName) - }) - - framework.ConformanceIt("check create vip with different errors", func() { - ginkgo.By("Creating vip " + vipName) - vip = framework.MakeVip(namespaceName, vipName, "", "", "", "") - - ginkgo.By("validating subnet") - vip.Spec.Subnet = "" - _, err := vipClient.VipInterface.Create(context.TODO(), vip, metav1.CreateOptions{}) - framework.ExpectError(err, "subnet parameter cannot be empty") - - ginkgo.By("validating wrong subnet") - vip.Spec.Subnet = "abc" - _, err = vipClient.VipInterface.Create(context.TODO(), vip, metav1.CreateOptions{}) - framework.ExpectError(err, `Subnet.kubeovn.io "%s" not found`, vip.Spec.Subnet) - - ginkgo.By("Validating vip usage with wrong v4ip") - vip.Spec.Subnet = subnetName - vip.Spec.V4ip = "10.10.10.10.10" - _, err = vipClient.VipInterface.Create(context.TODO(), vip, metav1.CreateOptions{}) - framework.ExpectError(err, "%s is not a valid ip", vip.Spec.V4ip) - - ginkgo.By("Validating vip usage with wrong v6ip") - vip.Spec.V4ip = "" - vip.Spec.V6ip = "2001:250:207::eff2::2" - _, err = vipClient.VipInterface.Create(context.TODO(), vip, metav1.CreateOptions{}) - framework.ExpectError(err, "%s is not a valid ip", vip.Spec.V6ip) - - ginkgo.By("validate ip not in subnet cidr") - vip.Spec.V6ip = "" - vip.Spec.V4ip = util.BigInt2Ip(big.NewInt(0).Add(util.IP2BigInt(lastIPv4), big.NewInt(10))) - _, err = vipClient.VipInterface.Create(context.TODO(), vip, metav1.CreateOptions{}) - framework.ExpectError(err, "%s is not in the range of subnet %s", vip.Spec.V4ip, vip.Spec.Subnet) - }) -})