diff --git a/Makefile b/Makefile index e4093d9b..f8ba0161 100644 --- a/Makefile +++ b/Makefile @@ -78,7 +78,11 @@ ENVTEST_ASSETS_DIR=$(shell pwd)/testbin test: manifests generate check ## Run tests. mkdir -p ${ENVTEST_ASSETS_DIR} test -f ${ENVTEST_ASSETS_DIR}/setup-envtest.sh || curl -sSLo ${ENVTEST_ASSETS_DIR}/setup-envtest.sh https://raw.githubusercontent.com/kubernetes-sigs/controller-runtime/v0.7.0/hack/setup-envtest.sh - source ${ENVTEST_ASSETS_DIR}/setup-envtest.sh; fetch_envtest_tools $(ENVTEST_ASSETS_DIR); setup_envtest_env $(ENVTEST_ASSETS_DIR); go test ./... -coverprofile cover.out + source ${ENVTEST_ASSETS_DIR}/setup-envtest.sh; fetch_envtest_tools $(ENVTEST_ASSETS_DIR); setup_envtest_env $(ENVTEST_ASSETS_DIR); go test ./pkg/... -coverprofile cover.out + +##@ e2e +e2e: $(GOBIN)/ginkgo $(GOBIN)/kind helm + PATH="${GOBIN}:${PATH}" ./hack/e2e.sh ##@ Build @@ -122,7 +126,10 @@ tools: $(GOBIN)/goimports \ $(GOBIN)/gofumpt \ $(GOBIN)/golangci-lint \ $(GOBIN)/controller-gen \ - $(GOBIN)/kustomize + $(GOBIN)/kustomize \ + $(GOBIN)/ginkgo \ + $(GOBIN)/kind \ + helm $(GOBIN)/goimports: $(call go-get-tool,$(GOBIN)/goimports,golang.org/x/tools/cmd/goimports) @@ -148,6 +155,18 @@ $(GOBIN)/controller-gen: $(GOBIN)/kustomize: $(call go-get-tool,$(GOBIN)/kustomize,sigs.k8s.io/kustomize/kustomize/v3@v3.8.7) +$(GOBIN)/ginkgo: + $(call go-get-tool,$(GOBIN)/kustomize,github.com/onsi/ginkgo/ginkgo@v1.16.2) + +$(GOBIN)/kind: + $(call go-get-tool,$(GOBIN)/kustomize,sigs.k8s.io/kind@v0.10.0) + +helm: + @[ -f /usr/local/bin/helm ] || { \ + set -e ;\ + curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash ;\ + } + # go-get-tool will 'go get' any package $2 and install it to $1. define go-get-tool @[ -f $(1) ] || { \ diff --git a/go.mod b/go.mod index 17bce0ef..a75bed31 100644 --- a/go.mod +++ b/go.mod @@ -7,6 +7,7 @@ require ( github.com/gin-gonic/gin v1.6.3 github.com/go-logr/logr v0.3.0 github.com/google/go-cmp v0.5.2 + github.com/onsi/ginkgo v1.14.1 github.com/onsi/gomega v1.10.2 github.com/openkruise/kruise-api v0.8.0 github.com/pkg/errors v0.9.1 diff --git a/go.sum b/go.sum index b7c76714..ae06bc27 100644 --- a/go.sum +++ b/go.sum @@ -146,11 +146,13 @@ github.com/docker/docker v1.4.2-0.20200309214505-aa6a9891b09c/go.mod h1:eEKB0N0r github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= @@ -833,6 +835,7 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2 h1:orlkJ3myw8CN1nVQHBFfloD+L3egixIa4FvUP6RosSA= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -891,6 +894,7 @@ k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H k8s.io/kube-proxy v0.19.2/go.mod h1:/07XChnL0EkYAQyZ7noQuyPYX5QOUBQECa9dsm9ScyY= k8s.io/kube-scheduler v0.19.2 h1:xV2Yj76g62n4+g6IR7/8Nba5VfCI53CCrD6qnQ4yaH0= k8s.io/kube-scheduler v0.19.2/go.mod h1:Mh/QNfmP0eqt7JtNUyIAsGhU2zO4j1EWel8TFizxZKo= +k8s.io/kubectl v0.19.2 h1:/Dxz9u7S0GnchLA6Avqi5k1qhZH4Fusgecj8dHsSnbk= k8s.io/kubectl v0.19.2/go.mod h1:4ib3oj5ma6gF95QukTvC7ZBMxp60+UEAhDPjLuBIrV4= k8s.io/kubelet v0.19.2/go.mod h1:FHHoByVWzh6kNaarXaDPAa751Oz6REcOVRyFT84L1Is= k8s.io/kubernetes v1.19.2 h1:sEvBYVM1/H5hqejFR10u8ndreYARV3DiTrqi2AY31ok= diff --git a/hack/e2e.sh b/hack/e2e.sh new file mode 100755 index 00000000..73126e69 --- /dev/null +++ b/hack/e2e.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash + +# Copyright 2021 Vesoft Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +ROOT=$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd) + +INSTALL_KUBERNETES=${INSTALL_KUBERNETES:-true} +UNINSTALL_KUBERNETES=${UNINSTALL_KUBERNETES:-true} +INSTALL_CERT_MANAGER=${INSTALL_CERT_MANAGER:-true} +INSTALL_CERT_MANAGER_VERSION=${INSTALL_CERT_MANAGER_VERSION:-v1.3.1} +INSTALL_KRUISE=${INSTALL_KRUISE:-true} +INSTALL_KRUISE_VERSION=${INSTALL_KRUISE_VERSION:-v0.8.1} +INSTALL_NEBULA_OPERATOR=${INSTALL_NEBULA_OPERATOR:-true} +KIND_NAME=${KIND_NAME:-e2e-test} +KIND_CONFIG=${KIND_CONFIG:-${ROOT}/hack/kind-config.yaml} +STORAGE_CLASS=${STORAGE_CLASS:-} +NEBULA_VERSION=${NEBULA_VERSION:-v2-nightly} + +if [[ "${INSTALL_KUBERNETES}" == "true" ]];then + KUBECONFIG=~/.kube/${KIND_NAME}.kind.config +else + KUBECONFIG=${KUBECONFIG:-~/.kube/config} +fi +DELETE_NAMESPACE=${DELETE_NAMESPACE:-true} +DELETE_NAMESPACE_ON_FAILURE=${DELETE_NAMESPACE_ON_FAILURE:-false} + +echo "starting e2e tests" +echo "INSTALL_KUBERNETES: ${INSTALL_KUBERNETES}" +echo "UNINSTALL_KUBERNETES: ${UNINSTALL_KUBERNETES}" +echo "INSTALL_CERT_MANAGER: ${INSTALL_CERT_MANAGER}" +echo "INSTALL_CERT_MANAGER_VERSION: ${INSTALL_CERT_MANAGER_VERSION}" +echo "INSTALL_KRUISE: ${INSTALL_KRUISE}" +echo "INSTALL_KRUISE_VERSION: ${INSTALL_KRUISE_VERSION}" +echo "INSTALL_NEBULA_OPERATOR: ${INSTALL_NEBULA_OPERATOR}" +echo "KIND_NAME: ${KIND_NAME}" +echo "KIND_CONFIG: ${KIND_CONFIG}" +echo "STORAGE_CLASS: ${STORAGE_CLASS}" +echo "NEBULA_VERSION: ${NEBULA_VERSION}" + +echo "KUBECONFIG: ${KUBECONFIG}" +echo "DELETE_NAMESPACE: ${DELETE_NAMESPACE}" +echo "INSTALL_KUBERNETES: ${INSTALL_KUBERNETES}" +echo "DELETE_NAMESPACE_ON_FAILURE: ${DELETE_NAMESPACE_ON_FAILURE}" + +ginkgo ./tests/e2e \ + -- \ + --install-kubernetes="${INSTALL_KUBERNETES}" \ + --uninstall-kubernetes="${UNINSTALL_KUBERNETES}" \ + --install-cert-manager="${INSTALL_CERT_MANAGER}" \ + --install-cert-manager-version="${INSTALL_CERT_MANAGER_VERSION}" \ + --install-kruise="${INSTALL_KRUISE}" \ + --install-kruise-version="${INSTALL_KRUISE_VERSION}" \ + --install-nebula-operator="${INSTALL_NEBULA_OPERATOR}" \ + --kind-name="${KIND_NAME}" \ + --kind-config="${KIND_CONFIG}" \ + --storage-class="${STORAGE_CLASS}" \ + --nebula-version="${NEBULA_VERSION}" \ + --kubeconfig="${KUBECONFIG}" \ + --delete-namespace="${DELETE_NAMESPACE}" \ + --delete-namespace-on-failure="${DELETE_NAMESPACE_ON_FAILURE}" \ + "${@}" diff --git a/hack/kind-config.yaml b/hack/kind-config.yaml new file mode 100644 index 00000000..ccb61222 --- /dev/null +++ b/hack/kind-config.yaml @@ -0,0 +1,22 @@ +# Copyright 2021 Vesoft Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane +- role: worker +- role: worker +- role: worker +- role: worker +- role: worker \ No newline at end of file diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index e9074ede..843e3544 100644 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -1,5 +1,19 @@ #!/usr/bin/env bash +# Copyright 2021 Vesoft Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + set -o errexit set -o nounset set -o pipefail diff --git a/hack/version.sh b/hack/version.sh old mode 100644 new mode 100755 diff --git a/tests/e2e/config/config.go b/tests/e2e/config/config.go new file mode 100644 index 00000000..aade4d70 --- /dev/null +++ b/tests/e2e/config/config.go @@ -0,0 +1,69 @@ +/* +Copyright 2021 Vesoft Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "flag" +) + +const ( + DefaultKindName = "e2e-test" + DefaultInstallCertManagerVersion = "v1.3.1" + DefaultInstallKruiseVersion = "v0.8.1" +) + +var TestConfig Config + +type Config struct { + InstallKubernetes bool + UninstallKubernetes bool + InstallCertManager bool + InstallCertManagerVersion string + InstallKruise bool + InstallKruiseVersion string + InstallNebulaOperator bool + KindName string + KindConfig string + StorageClass string + NebulaVersion string +} + +func RegisterClusterFlags(flags *flag.FlagSet) { + flags.BoolVar(&TestConfig.InstallKubernetes, "install-kubernetes", true, + "If true tests will install kubernetes.") + flags.BoolVar(&TestConfig.UninstallKubernetes, "uninstall-kubernetes", true, + "If true tests will uninstall kubernetes. Ignored when --install-kubernetes is false.") + flags.BoolVar(&TestConfig.InstallCertManager, "install-cert-manager", true, + "If true tests will install cert-manager.") + flags.StringVar(&TestConfig.InstallCertManagerVersion, "install-cert-manager-version", DefaultInstallCertManagerVersion, + "The cert-manager version to install.") + flags.BoolVar(&TestConfig.InstallKruise, "install-kruise", true, + "If true tests will install kruise.") + flags.StringVar(&TestConfig.InstallKruiseVersion, "install-kruise-version", DefaultInstallKruiseVersion, + "The kruise version to install.") + flags.BoolVar(&TestConfig.InstallNebulaOperator, "install-nebula-operator", true, + "If true tests will install nebula operator.") + flags.StringVar(&TestConfig.KindName, "kind-name", DefaultKindName, + "The kind name to install.") + flags.StringVar(&TestConfig.KindConfig, "kind-config", "../../hack/kind-config.yaml", + "The kind config to install.") + flags.StringVar(&TestConfig.StorageClass, "storage-class", "", + "The storage class to use to install nebula cluster."+ + "If don't configure, use the default storage class and then the others in the kubernetes.") + flags.StringVar(&TestConfig.NebulaVersion, "nebula-version", "v2-nightly", + "The nebula version.") +} diff --git a/tests/e2e/e2e.go b/tests/e2e/e2e.go new file mode 100644 index 00000000..c2542960 --- /dev/null +++ b/tests/e2e/e2e.go @@ -0,0 +1,42 @@ +/* +Copyright 2021 Vesoft Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "testing" + + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" +) + +var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { + setupSuite() + return nil +}, func(_ []byte) { + setupSuitePerGinkgoNode() +}) + +var _ = ginkgo.SynchronizedAfterSuite(func() { + cleanupSuitePerGinkgoNode() +}, func() { + cleanupSuite() +}) + +func RunE2ETests(t *testing.T) { + gomega.RegisterFailHandler(ginkgo.Fail) + ginkgo.RunSpecs(t, "nebula-operator e2e suite") +} diff --git a/tests/e2e/e2e_test.go b/tests/e2e/e2e_test.go new file mode 100644 index 00000000..09e5ba5f --- /dev/null +++ b/tests/e2e/e2e_test.go @@ -0,0 +1,56 @@ +/* +Copyright 2021 Vesoft Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "flag" + "math/rand" + "os" + "testing" + "time" + + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/config" + + e2econfig "github.com/vesoft-inc/nebula-operator/tests/e2e/config" + _ "github.com/vesoft-inc/nebula-operator/tests/e2e/nebulacluster" +) + +func handleFlags() { + config.CopyFlags(config.Flags, flag.CommandLine) + framework.RegisterCommonFlags(flag.CommandLine) + framework.RegisterClusterFlags(flag.CommandLine) + e2econfig.RegisterClusterFlags(flag.CommandLine) + flag.Parse() +} + +func TestMain(m *testing.M) { + handleFlags() + + flag.CommandLine.VisitAll(func(flag *flag.Flag) { + framework.Logf("FLAG: --%s=%q", flag.Name, flag.Value) + }) + + framework.AfterReadingAllFlags(&framework.TestContext) + + rand.Seed(time.Now().UnixNano()) + os.Exit(m.Run()) +} + +func TestE2E(t *testing.T) { + RunE2ETests(t) +} diff --git a/tests/e2e/framework/framework.go b/tests/e2e/framework/framework.go new file mode 100644 index 00000000..7215c268 --- /dev/null +++ b/tests/e2e/framework/framework.go @@ -0,0 +1,48 @@ +/* +Copyright 2021 Vesoft Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "github.com/onsi/ginkgo" + "k8s.io/kubernetes/test/e2e/framework" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Framework struct { + *framework.Framework + + RuntimeClient client.Client +} + +func NewDefaultFramework(baseName string) *Framework { + bf := framework.NewDefaultFramework(baseName) + f := &Framework{ + Framework: bf, + } + var runtimeClient client.Client + + ginkgo.BeforeEach(func() { + var err error + clientConfig := f.ClientConfig() + + runtimeClient, err = client.New(clientConfig, client.Options{}) + framework.ExpectNoError(err) + f.RuntimeClient = runtimeClient + }) + + return f +} diff --git a/tests/e2e/nebulacluster/nebulacluster.go b/tests/e2e/nebulacluster/nebulacluster.go new file mode 100644 index 00000000..f4396669 --- /dev/null +++ b/tests/e2e/nebulacluster/nebulacluster.go @@ -0,0 +1,229 @@ +/* +Copyright 2021 Vesoft Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nebulacluster + +import ( + "context" + "fmt" + "time" + + "github.com/onsi/ginkgo" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/rest" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/vesoft-inc/nebula-operator/apis/apps/v1alpha1" + "github.com/vesoft-inc/nebula-operator/pkg/label" + e2eframework "github.com/vesoft-inc/nebula-operator/tests/e2e/framework" +) + +var _ = ginkgo.Describe("NebulaCluster", func() { + f := e2eframework.NewDefaultFramework("nebulacluster") + + var ns string + var clientConfig *rest.Config + var runtimeClient client.Client + + ginkgo.BeforeEach(func() { + ns = f.Namespace.Name + clientConfig = f.ClientConfig() + runtimeClient = f.RuntimeClient + }) + + ginkgo.Describe("difference spec", func() { + kruiseReference := v1alpha1.WorkloadReference{ + Name: "statefulsets.apps.kruise.io", + Version: "v1alpha1", + } + nebulaSchedulerName := "nebula-scheduler" + + testCases := []struct { + Name string + GraphdReplicas int32 + MetadReplicas int32 + StoragedReplicas int32 + Reference *v1alpha1.WorkloadReference + SchedulerName string + }{ + { + Name: "test1-1-3", + GraphdReplicas: 1, + MetadReplicas: 1, + StoragedReplicas: 3, + }, { + Name: "test1-1-3", + GraphdReplicas: 1, + MetadReplicas: 1, + StoragedReplicas: 3, + Reference: &kruiseReference, + SchedulerName: nebulaSchedulerName, + }, { + Name: "test1-3-3", + GraphdReplicas: 1, + MetadReplicas: 3, + StoragedReplicas: 3, + SchedulerName: nebulaSchedulerName, + }, { + Name: "test1-3-3", + GraphdReplicas: 1, + MetadReplicas: 3, + StoragedReplicas: 3, + Reference: &kruiseReference, + }, { + Name: "test2-3-4", + GraphdReplicas: 2, + MetadReplicas: 3, + StoragedReplicas: 4, + SchedulerName: nebulaSchedulerName, + }, + } + + for i, tc := range testCases { + tc := tc + ginkgo.Context(fmt.Sprintf("%d: %#v", i, tc), func() { + ginkgo.It("should deploy, scale out, and scale in successfully", func() { + ginkgo.By("Deploy NebulaCluster") + + var err error + // init the NebulaCluster Resource for testing + nc := getNebulaCluster(runtimeClient, ns, tc.Name) + nc.Spec.Graphd.Replicas = pointer.Int32Ptr(tc.GraphdReplicas) + nc.Spec.Metad.Replicas = pointer.Int32Ptr(tc.MetadReplicas) + nc.Spec.Storaged.Replicas = pointer.Int32Ptr(tc.StoragedReplicas) + if tc.Reference != nil { + nc.Spec.Reference = *tc.Reference + } + if tc.SchedulerName != "" { + nc.Spec.SchedulerName = tc.SchedulerName + } + + err = runtimeClient.Create(context.TODO(), nc) + framework.ExpectNoError(err, "failed to create NebulaCluster %s/%s", ns, nc.Name) + + ginkgo.By("Wait for NebulaCluster ready") + + err = waitForNebulaClusterReady(nc, 30*time.Minute, 30*time.Second, runtimeClient) + framework.ExpectNoError(err, "failed to wait for NebulaCluster %s/%s ready", ns, nc.Name) + + ginkgo.By("Create port forward for NebulaCluster") + + graphLocalAddress := "127.0.0.1" + var graphLocalPort int + var stopCh chan<- struct{} + graphLocalPort, stopCh, err = portForwardNebulaClusterGraphd(nc, graphLocalAddress, clientConfig) + framework.ExpectNoError(err, "failed to port forward for graphd of NebulaCluster %s/%s", ns, nc.Name) + defer close(stopCh) + framework.Logf("create port forward %s:%d for graphd of NebulaCluster %s/%s", graphLocalAddress, graphLocalPort, ns, nc.Name) + + ginkgo.By("Init space and insert samples for NebulaCluster") + replicaFactor := 3 + if tc.StoragedReplicas < 3 { + replicaFactor = 1 + } + executeSchema := fmt.Sprintf( + "CREATE SPACE IF NOT EXISTS e2e_test(partition_num=%d,replica_factor=%d);", + 5*tc.StoragedReplicas, replicaFactor) + + "USE e2e_test;" + + "CREATE TAG IF NOT EXISTS person(name string, age int);" + + "CREATE EDGE IF NOT EXISTS like(likeness double);" + err = waitForExecuteNebulaSchema(30*time.Second, 2*time.Second, graphLocalAddress, graphLocalPort, "user", "pass", executeSchema) + framework.ExpectNoError(err, "failed to init space after deploy for NebulaCluster %s/%s", ns, nc.Name) + time.Sleep(10 * time.Second) + + executeSchema = "USE e2e_test;" + + "INSERT VERTEX person(name, age) VALUES " + + "'Bob':('Bob', 10), " + + "'Lily':('Lily', 9), " + + "'Tom':('Tom', 10), " + + "'Jerry':('Jerry', 13), " + + "'John':('John', 11);" + + "INSERT EDGE like(likeness) VALUES " + + "'Bob'->'Lily':(80.0);" + err = waitForExecuteNebulaSchema(30*time.Second, 2*time.Second, graphLocalAddress, graphLocalPort, "user", "pass", executeSchema) + framework.ExpectNoError(err, "failed to insert samples after deploy for NebulaCluster %s/%s", ns, nc.Name) + + ginkgo.By("Query from NebulaCluster") + time.Sleep(2 * time.Second) + executeSchema = "USE e2e_test;" + + "GO FROM 'Bob' OVER like YIELD $^.person.name, $^.person.age, like.likeness;" + err = waitForExecuteNebulaSchema(30*time.Second, 2*time.Second, graphLocalAddress, graphLocalPort, "user", "pass", executeSchema) + framework.ExpectNoError(err, "failed to insert samples after scale out for NebulaCluster %s/%s", ns, nc.Name) + + ginkgo.By("Delete NebulaCluster") + err = runtimeClient.Delete(context.TODO(), nc) + framework.ExpectNoError(err, "failed to create NebulaCluster %s/%s", ns, nc.Name) + + ginkgo.By("Wait for NebulaCluster to be deleted") + err = waitForNebulaClusterDeleted(nc, 10*time.Minute, 10*time.Second, runtimeClient) + framework.ExpectNoError(err, "failed to wait for NebulaCluster %s/%s to be deleted", ns, nc.Name) + }) + }) + } + }) + + ginkgo.It("Deleted resources controlled by NebulaCluster will be recovered", func() { + ginkgo.By("Deploy NebulaCluster") + var err error + + nc := getNebulaCluster(runtimeClient, ns, "test-recovery") + nc.Spec.Graphd.Replicas = pointer.Int32Ptr(2) + nc.Spec.Metad.Replicas = pointer.Int32Ptr(3) + nc.Spec.Storaged.Replicas = pointer.Int32Ptr(4) + err = runtimeClient.Create(context.TODO(), nc) + framework.ExpectNoError(err, "failed to create NebulaCluster %s/%s", ns, nc.Name) + + ginkgo.By("Wait for NebulaCluster ready") + + err = waitForNebulaClusterReady(nc, 30*time.Minute, 30*time.Second, runtimeClient) + framework.ExpectNoError(err, "failed to wait for NebulaCluster %s/%s ready", ns, nc.Name) + + ginkgo.By("Delete StatefulSet/Service") + listOptions := client.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set(label.New().Cluster(nc.GetClusterName()))), + Namespace: ns, + } + stsList := appsv1.StatefulSetList{} + err = runtimeClient.List(context.TODO(), &stsList, &listOptions) + framework.ExpectNoError(err, "failed to list StatefulSet with option: %+v", listOptions) + + framework.Logf("delete StatefulSet %s/%d successfully", ns, len(stsList.Items)) + for i := range stsList.Items { + sts := stsList.Items[i] + err := runtimeClient.Delete(context.TODO(), &sts, &client.DeleteOptions{}) + framework.ExpectNoError(err, "failed to delete StatefulSet %s/%s", ns, sts.Name) + framework.Logf("delete StatefulSet %s/%s successfully", ns, sts.Name) + } + + svcList := corev1.ServiceList{} + err = runtimeClient.List(context.TODO(), &svcList, &listOptions) + framework.ExpectNoError(err, "failed to list Service with option: %+v", listOptions) + for i := range svcList.Items { + svc := svcList.Items[i] + err := runtimeClient.Delete(context.TODO(), &svc, &client.DeleteOptions{}) + framework.ExpectNoError(err, "failed to delete Service %s/%s", ns, svc.Name) + } + + ginkgo.By("Wait for NebulaCluster ready") + + err = waitForNebulaClusterReady(nc, 15*time.Minute, 10*time.Second, runtimeClient) + framework.ExpectNoError(err, "failed to wait for NebulaCluster %s/%s ready", ns, nc.Name) + }) +}) diff --git a/tests/e2e/nebulacluster/util.go b/tests/e2e/nebulacluster/util.go new file mode 100644 index 00000000..996d82cd --- /dev/null +++ b/tests/e2e/nebulacluster/util.go @@ -0,0 +1,326 @@ +/* +Copyright 2021 Vesoft Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package nebulacluster + +import ( + "context" + "fmt" + "io" + "os" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/rest" + storageutil "k8s.io/kubernetes/pkg/apis/storage/util" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + + nebula "github.com/vesoft-inc/nebula-go" + "github.com/vesoft-inc/nebula-operator/apis/apps/v1alpha1" + "github.com/vesoft-inc/nebula-operator/pkg/label" + e2econfig "github.com/vesoft-inc/nebula-operator/tests/e2e/config" + e2eutil "github.com/vesoft-inc/nebula-operator/tests/e2e/util" +) + +func getNebulaCluster(runtimeClient client.Client, namespace, name string) *v1alpha1.NebulaCluster { + imagePullPolicy := corev1.PullIfNotPresent + storageClassName := getStorageClassName(runtimeClient) + nebulaVersion := e2econfig.TestConfig.NebulaVersion + return &v1alpha1.NebulaCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: v1alpha1.NebulaClusterSpec{ + Graphd: &v1alpha1.GraphdSpec{ + PodSpec: v1alpha1.PodSpec{ + Replicas: pointer.Int32Ptr(1), + Image: "vesoft/nebula-graphd", + Version: nebulaVersion, + }, + StorageClaim: &v1alpha1.StorageClaim{ + StorageClassName: &storageClassName, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + Metad: &v1alpha1.MetadSpec{ + PodSpec: v1alpha1.PodSpec{ + Replicas: pointer.Int32Ptr(1), + Image: "vesoft/nebula-metad", + Version: nebulaVersion, + }, + StorageClaim: &v1alpha1.StorageClaim{ + StorageClassName: &storageClassName, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + Storaged: &v1alpha1.StoragedSpec{ + PodSpec: v1alpha1.PodSpec{ + Replicas: pointer.Int32Ptr(1), + Image: "vesoft/nebula-storaged", + Version: nebulaVersion, + }, + StorageClaim: &v1alpha1.StorageClaim{ + StorageClassName: &storageClassName, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1Gi"), + }, + }, + }, + }, + Reference: v1alpha1.WorkloadReference{ + Name: "statefulsets.apps", + Version: "v1", + }, + SchedulerName: corev1.DefaultSchedulerName, + ImagePullPolicy: &imagePullPolicy, + }, + } +} + +func getStorageClassName(runtimeClient client.Client) string { + if e2econfig.TestConfig.StorageClass != "" { + return e2econfig.TestConfig.StorageClass + } + + var scList storagev1.StorageClassList + err := runtimeClient.List(context.TODO(), &scList) + framework.ExpectNoError(err, "failed to list StorageClass") + framework.ExpectNotEqual(len(scList.Items), 0, "don't find StorageClass") + var scName string + for i := range scList.Items { + sc := scList.Items[i] + if storageutil.IsDefaultAnnotation(sc.ObjectMeta) { + return sc.GetName() + } + if scName == "" { + scName = sc.GetName() + } + } + return scName +} + +func waitForNebulaClusterReady( + nc *v1alpha1.NebulaCluster, + timeout, pollInterval time.Duration, + runtimeClient client.Client, +) error { + return wait.PollImmediate(pollInterval, timeout, func() (bool, error) { + var err error + var actual v1alpha1.NebulaCluster + key := client.ObjectKey{Namespace: nc.Namespace, Name: nc.Name} + err = runtimeClient.Get(context.TODO(), key, &actual) + if err != nil { + return false, nil + } + + if !actual.MetadComponent().IsReady() { + framework.Logf("Metad is not ready for NebulaCluster %s", key) + return false, nil + } + if actual.Status.Metad.Phase != v1alpha1.RunningPhase { + framework.Logf("Metad is not in %s phase nor %s for NebulaCluster %s ", + actual.Status.Metad.Phase, v1alpha1.RunningPhase, key) + return false, nil + } + framework.Logf("Metad is ready and in %s phase for NebulaCluster %s", actual.Status.Metad.Phase, key) + + if !actual.StoragedComponent().IsReady() { + framework.Logf("Storaged is not ready for NebulaCluster %s", key) + return false, nil + } + if actual.Status.Storaged.Phase != v1alpha1.RunningPhase { + framework.Logf("Storaged is not in %s phase nor %s for NebulaCluster %s ", + actual.Status.Storaged.Phase, v1alpha1.RunningPhase, key) + return false, nil + } + framework.Logf("Storaged is ready and in %s phase for NebulaCluster %s", actual.Status.Storaged.Phase, key) + + if !actual.GraphdComponent().IsReady() { + framework.Logf("Graphd is not ready for NebulaCluster %s", key) + return false, nil + } + if actual.Status.Graphd.Phase != v1alpha1.RunningPhase { + framework.Logf("Graphd is in %s phase nor %s for NebulaCluster %s ", + actual.Status.Graphd.Phase, v1alpha1.RunningPhase, key) + return false, nil + } + framework.Logf("Graphd is ready and in %s phase for NebulaCluster %s", actual.Status.Graphd.Phase, key) + + return true, nil + }) +} + +func waitForNebulaClusterDeleted( + nc *v1alpha1.NebulaCluster, + timeout, pollInterval time.Duration, + runtimeClient client.Client, +) error { + key := client.ObjectKeyFromObject(nc) + listOptions := client.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set(label.New().Cluster(nc.GetClusterName()))), + Namespace: nc.Namespace, + } + + checkIfDeleted := func(list client.ObjectList) (bool, error) { + err := runtimeClient.List(context.TODO(), list, &listOptions) + if err != nil { + return false, err + } + switch v := list.(type) { + case *appsv1.StatefulSetList: + return len(v.Items) == 0, nil + case *corev1.ServiceList: + return len(v.Items) == 0, nil + case *corev1.PersistentVolumeClaimList: + return len(v.Items) == 0, nil + } + return false, fmt.Errorf("unkonw ObjectList %T", list) + } + + objectLists := []client.ObjectList{ + &appsv1.StatefulSetList{}, + &corev1.ServiceList{}, + &corev1.PersistentVolumeClaimList{}, + } + + return wait.PollImmediate(pollInterval, timeout, func() (bool, error) { + var err error + var actual v1alpha1.NebulaCluster + err = runtimeClient.Get(context.TODO(), key, &actual) + if !apierrors.IsNotFound(err) { + return false, nil + } + + for _, list := range objectLists { + if ok, _ := checkIfDeleted(list); !ok { + return false, nil + } + } + + return true, nil + }) +} + +type nebulaLog struct{} + +func (l nebulaLog) Info(msg string) { framework.Logf(msg) } +func (l nebulaLog) Warn(msg string) { framework.Logf(msg) } +func (l nebulaLog) Error(msg string) { framework.Logf(msg) } +func (l nebulaLog) Fatal(msg string) { framework.Logf(msg) } + +func waitForExecuteNebulaSchema( + timeout, pollInterval time.Duration, + address string, + port int, + username, password, schema string, +) error { + return wait.PollImmediate(pollInterval, timeout, func() (bool, error) { + err := executeNebulaSchema(address, port, username, password, schema) + if err != nil { + return false, nil + } + return true, nil + }) +} + +func executeNebulaSchema(address string, port int, username, password, schema string) error { + hostAddress := nebula.HostAddress{Host: address, Port: port} + hostList := []nebula.HostAddress{hostAddress} + testPoolConfig := nebula.GetDefaultConf() + + pool, err := nebula.NewConnectionPool(hostList, testPoolConfig, nebulaLog{}) + if err != nil { + framework.Logf("failed to initialize the connection pool, host: %s, port: %d, %s", address, port, err.Error()) + return err + } + defer pool.Close() + + session, err := pool.GetSession(username, password) + if err != nil { + framework.Logf("failed to create a new session from connection pool, username: %s, password: %s, %s", username, password, err.Error()) + return err + } + defer session.Release() + + resultSet, err := session.Execute(schema) + if err != nil { + framework.Logf("failed to execute schema %s, %s", schema, err.Error()) + return err + } + if !resultSet.IsSucceed() { + framework.Logf("failed to execute schema %s, ErrorCode: %v, ErrorMsg: %s", schema, resultSet.GetErrorCode(), resultSet.GetErrorMsg()) + return fmt.Errorf("execute schema ErrorCode: %v, ErrorMsg: %s", resultSet.GetErrorCode(), resultSet.GetErrorMsg()) + } + return nil +} + +func portForwardNebulaClusterGraphd( + nc *v1alpha1.NebulaCluster, + localAddress string, + config *rest.Config, +) (port int, stopChan chan<- struct{}, err error) { + localPort, err := e2eutil.GetFreePort() + if err != nil { + return 0, nil, err + } + + stopCh := make(chan struct{}, 1) + readyCh := make(chan struct{}) + errCh := make(chan error) + go func() { + var out io.Writer + err := (&e2eutil.PortForwardOptions{ + Namespace: nc.Namespace, + PodName: nc.GraphdComponent().GetPodName(0), + Config: config, + Address: []string{localAddress}, + Ports: []string{fmt.Sprintf("%d:%d", localPort, nc.GraphdComponent().GetPort(v1alpha1.GraphdPortNameThrift))}, + Out: out, + ErrOut: os.Stderr, + StopChannel: stopCh, + ReadyChannel: readyCh, + }).RunPortForward() + if err != nil { + errCh <- err + } + }() + + select { + case <-readyCh: + return localPort, stopCh, nil + case err := <-errCh: + return 0, nil, err + } +} diff --git a/tests/e2e/suites.go b/tests/e2e/suites.go new file mode 100644 index 00000000..326f7e3b --- /dev/null +++ b/tests/e2e/suites.go @@ -0,0 +1,288 @@ +/* +Copyright 2021 Vesoft Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "bufio" + "context" + "fmt" + "path" + "strings" + "time" + + "github.com/onsi/ginkgo" + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/kubernetes/test/e2e/framework" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/vesoft-inc/nebula-operator/apis/apps/v1alpha1" + e2econfig "github.com/vesoft-inc/nebula-operator/tests/e2e/config" +) + +func setupSuite() { + framework.Logf("running BeforeSuite actions on node 1") + + ginkgo.By("Add NebulaCluster scheme") + err := v1alpha1.AddToScheme(scheme.Scheme) + framework.ExpectNoError(err, "failed to add NebulaCluster scheme") + + ginkgo.By("Setup kubernetes") + setupKubernetes() + + ginkgo.By("Setup cert manager") + setupCertManager() + + ginkgo.By("Setup kruise") + setupKruise() + + ginkgo.By("Setup nebula operator") + setupNebulaOperator() +} + +func setupSuitePerGinkgoNode() { + framework.Logf("running BeforeSuite actions on all nodes") +} + +func cleanupSuite() { + framework.Logf("running AfterSuite actions on all nodes") + framework.RunCleanupActions() + + ginkgo.By("Cleanup kubernetes") + cleanupKubernetes() +} + +func cleanupSuitePerGinkgoNode() { + framework.Logf("running AfterSuite actions on node 1") +} + +func setupKubernetes() { + if !e2econfig.TestConfig.InstallKubernetes { + return + } + framework.Logf("install kubernetes") + + kindName := e2econfig.TestConfig.KindName + if kindName == "" { + kindName = e2econfig.DefaultKindName + } + stdout, _, err := framework.RunCmd("kind", "get", "clusters") + framework.ExpectNoError(err, "failed to get kind clusters") + isInstall := false + sc := bufio.NewScanner(strings.NewReader(stdout)) + for sc.Scan() { + if sc.Text() == kindName { + isInstall = true + } + } + if err := sc.Err(); err != nil { + framework.ExpectNoError(err) + } + + if !isInstall { + cmd := []string{"kind", "create", "cluster", "--name", kindName} + if framework.TestContext.KubeConfig != "" { + cmd = append(cmd, "--kubeconfig", framework.TestContext.KubeConfig) + } + if e2econfig.TestConfig.KindConfig != "" { + cmd = append(cmd, "--config", e2econfig.TestConfig.KindConfig) + } + _, _, err := framework.RunCmd(cmd[0], cmd[1:]...) + framework.ExpectNoError(err, "failed to install kubernetes %s", kindName) + } + + clientConfig, err := framework.LoadConfig() + framework.ExpectNoError(err, "failed load config") + runtimeClient, err := client.New(clientConfig, client.Options{}) + framework.ExpectNoError(err) + + err = waitForKubernetesWorkloadsReady("", 30*time.Minute, 10*time.Second, runtimeClient) + framework.ExpectNoError(err, "failed to wait for kubernetes workloads ready") +} + +func setupCertManager() { + if !e2econfig.TestConfig.InstallCertManager { + return + } + + helmName := "cert-manager" // nolint: goconst + helmNamespace := "cert-manager" + workloadNamespaces := []string{helmNamespace} + helmArgs := []string{ + "install", helmName, + "cert-manager", + "--repo", "https://charts.jetstack.io", + "--namespace", helmNamespace, + "--create-namespace", + "--version", e2econfig.TestConfig.InstallCertManagerVersion, + "--set", "installCRDs=true", + } + helmInstall(helmName, helmNamespace, workloadNamespaces, helmArgs...) +} + +func setupKruise() { + if !e2econfig.TestConfig.InstallKruise { + return + } + helmName := "kruise" + helmNamespace := "default" + workloadNamespaces := []string{"kruise-system"} + helmArgs := []string{ + "install", helmName, + fmt.Sprintf("https://github.com/openkruise/kruise/releases/download/%s/kruise-chart.tgz", e2econfig.TestConfig.InstallKruiseVersion), + } + helmInstall(helmName, helmNamespace, workloadNamespaces, helmArgs...) +} + +func setupNebulaOperator() { + if !e2econfig.TestConfig.InstallNebulaOperator { + return + } + helmName := "nebula-operator" + helmNamespace := "nebula-operator-system" + workloadNamespaces := []string{helmNamespace} + helmArgs := []string{ + "install", helmName, + path.Join(framework.TestContext.RepoRoot, "charts/nebula-operator"), + "--namespace", helmNamespace, + "--create-namespace", + } + helmInstall(helmName, helmNamespace, workloadNamespaces, helmArgs...) +} + +func cleanupKubernetes() { + if !e2econfig.TestConfig.InstallKubernetes || !e2econfig.TestConfig.UninstallKubernetes { + return + } + framework.Logf("uninstall kubernetes") + + kindName := e2econfig.TestConfig.KindName + if kindName == "" { + kindName = e2econfig.DefaultKindName + } + _, _, err := framework.RunCmd("kind", "delete", "cluster", "--name", kindName) + framework.ExpectNoError(err, "failed to uninstall kubernetes") +} + +func isHelmInstalled(name, namespace string) bool { + args := []string{"status", name, "--namespace", namespace} + + if framework.TestContext.KubeConfig != "" { + args = append(args, "--kubeconfig", framework.TestContext.KubeConfig) + } + + _, _, err := framework.RunCmd("helm", args...) + if err != nil { + if !strings.Contains(err.Error(), "release: not found") { + framework.ExpectNoError(err, "failed to helm status %s%s", namespace, name) + } + return false + } + return true +} + +func helmInstall(name, namespace string, workloadNamespaces []string, args ...string) { + framework.Logf("install %s", name) + if !isHelmInstalled(name, namespace) { + if framework.TestContext.KubeConfig != "" { + args = append(args, "--kubeconfig", framework.TestContext.KubeConfig) + } + _, _, err := framework.RunCmd("helm", args...) + framework.ExpectNoError(err, "failed to install helm %s/%s", namespace, name) + } else { + framework.Logf("helm %s/%s already installed", namespace, name) + } + + clientConfig, err := framework.LoadConfig() + framework.ExpectNoError(err, "failed load config") + runtimeClient, err := client.New(clientConfig, client.Options{}) + framework.ExpectNoError(err) + + for _, ns := range workloadNamespaces { + err = waitForKubernetesWorkloadsReady(ns, 20*time.Minute, 5*time.Second, runtimeClient) + framework.ExpectNoError(err, "failed to wait for %s/%s ready", ns, name) + } +} + +func waitForKubernetesWorkloadsReady(namespace string, timeout, pollInterval time.Duration, runtimeClient client.Client) error { + start := time.Now() + listOptions := client.ListOptions{} + nsStr := "all namespace" + if namespace != "" { + listOptions.Namespace = namespace + nsStr = fmt.Sprintf("namespace %q", namespace) + } + + getNotReadyKeys := func(list client.ObjectList) ([]string, string, error) { + err := runtimeClient.List(context.TODO(), list, &listOptions) + if err != nil { + return nil, "", err + } + var notReady []string + switch v := list.(type) { + case *appsv1.DeploymentList: + for i := range v.Items { + item := v.Items[i] + if item.Status.Replicas != item.Status.ReadyReplicas { + notReady = append(notReady, client.ObjectKeyFromObject(&item).String()) + } + } + return notReady, "Deployment", nil + case *appsv1.StatefulSetList: + for i := range v.Items { + item := v.Items[i] + if item.Status.Replicas != item.Status.ReadyReplicas { + notReady = append(notReady, client.ObjectKeyFromObject(&item).String()) + } + } + return notReady, "StatefulSet", nil + case *appsv1.DaemonSetList: + for i := range v.Items { + item := v.Items[i] + if item.Status.DesiredNumberScheduled != item.Status.NumberReady { + notReady = append(notReady, client.ObjectKeyFromObject(&item).String()) + } + } + return notReady, "DaemonSet", nil + } + return nil, "", fmt.Errorf("unkonw ObjectList %T", list) + } + + objectLists := []client.ObjectList{ + &appsv1.DeploymentList{}, + &appsv1.StatefulSetList{}, + &appsv1.DaemonSetList{}, + } + + return wait.PollImmediate(pollInterval, timeout, func() (bool, error) { + success := true + framework.Logf("waiting up to %v for workloads in %s to ready", timeout, nsStr) + for _, list := range objectLists { + notReady, kind, err := getNotReadyKeys(list) + if err != nil { + framework.Logf("failed get %q in %s: %v", kind, nsStr, err) + success = false + } + if len(notReady) > 0 { + framework.Logf("there are not ready %q in %s: %v (%d seconds elapsed)", kind, nsStr, notReady, int(time.Since(start).Seconds())) + success = false + } + } + return success, nil + }) +} diff --git a/tests/e2e/util/portforward.go b/tests/e2e/util/portforward.go new file mode 100644 index 00000000..388f9139 --- /dev/null +++ b/tests/e2e/util/portforward.go @@ -0,0 +1,87 @@ +/* +Copyright 2021 Vesoft Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "context" + "fmt" + "io" + "net" + "net/http" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/transport/spdy" +) + +type PortForwardOptions struct { + Namespace string + PodName string + Config *rest.Config + Address []string + Ports []string + Out io.Writer + ErrOut io.Writer + StopChannel chan struct{} + ReadyChannel chan struct{} +} + +func (o *PortForwardOptions) RunPortForward() error { + clientSet, err := clientset.NewForConfig(o.Config) + if err != nil { + return err + } + pod, err := clientSet.CoreV1().Pods(o.Namespace).Get(context.TODO(), o.PodName, metav1.GetOptions{}) + if err != nil { + return err + } + + if pod.Status.Phase != corev1.PodRunning { + return fmt.Errorf("unable to forward port because pod is not running. Current status=%v", pod.Status.Phase) + } + + transport, upgrader, err := spdy.RoundTripperFor(o.Config) + if err != nil { + return err + } + req := clientSet.CoreV1().RESTClient().Post().Resource("pods").Namespace(o.Namespace).Name(pod.Name).SubResource("portforward") + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, req.URL()) + fw, err := portforward.NewOnAddresses(dialer, o.Address, o.Ports, o.StopChannel, o.ReadyChannel, o.Out, o.ErrOut) + if err != nil { + return err + } + return fw.ForwardPorts() +} + +func GetFreePort() (int, error) { + addr, err := net.ResolveTCPAddr("tcp", "localhost:0") + if err != nil { + return 0, err + } + + l, err := net.ListenTCP("tcp", addr) + if err != nil { + return 0, err + } + defer func() { + _ = l.Close() + }() + return l.Addr().(*net.TCPAddr).Port, nil +}