diff --git a/.dockerignore b/.dockerignore index f5297e56..4d8b28e2 100644 --- a/.dockerignore +++ b/.dockerignore @@ -4,11 +4,14 @@ Dockerfile .github/ .gitlab-ci.yml .idea/ +.vscode/ CHANGES.md LICENSE README.md e2etests/ deploy/ +hack/ mock/ script/ **/*_test.go +skaffold.yaml diff --git a/.github/workflows/test_e2e.yml b/.github/workflows/test_e2e.yml index 57a12d8e..460773e0 100644 --- a/.github/workflows/test_e2e.yml +++ b/.github/workflows/test_e2e.yml @@ -1,12 +1,23 @@ name: Run e2e tests -on: [push] +on: + pull_request: + branches: [master] + push: + branches: [master] + jobs: test: runs-on: ubuntu-latest strategy: matrix: - k8s: [ k8s-1.19.10, k8s-1.20.6, k8s-1.21.0, k8s-1.22.0 ] - name: k8s ${{ matrix.k8s }} + k8s: + - test_version: v1.19.14 + major: 19 + - test_version: v1.20.10 + major: 20 + - test_version: v1.21.4 + major: 21 + name: k8s v1.${{ matrix.k8s.major }} steps: - uses: actions/setup-go@v2 with: @@ -31,22 +42,59 @@ jobs: env: HCLOUD_TOKEN: ${{ secrets.HCLOUD_TOKEN }} run: | + # If HCLOUD_TOKEN has multiple items (delimited with a comma) then grab a random one. + HCLOUD_TOKEN=$(echo $HCLOUD_TOKEN | tr ',' '\n' | shuf -n1) echo "HCLOUD_TOKEN=$HCLOUD_TOKEN" >> $GITHUB_ENV + echo "::add-mask::$HCLOUD_TOKEN" - name: Blow up if no HCLOUD_TOKEN or TTS_TOKEN set. if: steps.check_tts.outputs.hcloud_token_set == 'false' && steps.check_tts.outputs.tts_token_set == 'false' run: | echo "::error ::Couldn't determine HCLOUD_TOKEN. Check your repository secrets are setup correctly." - - uses: actions/cache@v2 + - name: cache deps + uses: actions/cache@v2 with: path: | - ~/go/pkg/mod - ~/.cache/go-build - key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-go- - - name: Run tests + ~/bin/k3sup + ~/bin/skaffold + # Use this workflow file as cache key in case we change the versions we download below. + key: bin-deps-${{ hashFiles('.github/workflows/test_e2e.yml') }} + - name: install deps + run: | + mkdir -p ~/bin + [[ ! -f ~/bin/skaffold ]] && curl -Lo ~/bin/skaffold https://storage.googleapis.com/skaffold/releases/latest/skaffold-linux-amd64 && chmod +x ~/bin/skaffold + [[ ! -f ~/bin/k3sup ]] && curl -Lo ~/bin/k3sup https://github.com/alexellis/k3sup/releases/download/0.11.0/k3sup && chmod +x ~/bin/k3sup + echo "$HOME/bin" >> $GITHUB_PATH + - name: Log in to the Container registry + uses: docker/login-action@f054a8b539a109f9f41c372932f1ae047eff08c9 + if: ${{ github.repository_owner != 'hetznercloud' }} + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - uses: 3bit/setup-hcloud@v1 + - if: ${{ github.repository_owner != 'hetznercloud' }} + env: + REPO_OWNER: ${{ github.repository_owner }} + run: | + echo "SKAFFOLD_DEFAULT_REPO=ghcr.io/$REPO_OWNER" >> $GITHUB_ENV + - name: run e2e tests env: - K8S_VERSION: ${{ matrix.k8s }} + K3S_CHANNEL: v1.${{ matrix.k8s.major }} + SCOPE: ci-${{ matrix.k8s.major }}-${{ github.run_id }} + SSH_KEYS: ${{ secrets.SSH_KEYS }} + KUBE_VERSION: ${{ matrix.k8s.test_version }} run: | - go test $(go list ./... | grep e2etests) -v -timeout 60m - ./script/delete-token.sh $HCLOUD_TOKEN + set -uex -o pipefail + eval $(INSTANCES=1 hack/dev-up.sh) + skaffold run + hack/run-e2e-tests.sh + - name: cleanup environment + if: always() + env: + SCOPE: ci-${{ matrix.k8s.major }}-${{ github.run_id }} + run: hack/dev-down.sh + - name: Cleanup TTS token + if: always() && steps.check_tts.outputs.tts_token_set == 'true' + env: + TTS_TOKEN: ${{ secrets.TTS_TOKEN }} + run: ./script/delete-token.sh $HCLOUD_TOKEN diff --git a/Dockerfile b/Dockerfile index 813c313f..dde64753 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,9 +3,14 @@ WORKDIR /csi ADD go.mod go.sum /csi/ RUN go mod download ADD . /csi/ -RUN CGO_ENABLED=0 go build -o driver.bin github.com/hetznercloud/csi-driver/cmd/driver +RUN ls -al +# `skaffold debug` sets SKAFFOLD_GO_GCFLAGS to disable compiler optimizations +ARG SKAFFOLD_GO_GCFLAGS + +RUN CGO_ENABLED=0 go build -gcflags="${SKAFFOLD_GO_GCFLAGS}" -o driver.bin github.com/hetznercloud/csi-driver/cmd/driver FROM alpine:3.13 RUN apk add --no-cache ca-certificates e2fsprogs xfsprogs blkid xfsprogs-extra e2fsprogs-extra btrfs-progs +ENV GOTRACEBACK=all COPY --from=builder /csi/driver.bin /bin/hcloud-csi-driver ENTRYPOINT ["/bin/hcloud-csi-driver"] diff --git a/README.md b/README.md index 2868ce90..fd8c400d 100644 --- a/README.md +++ b/README.md @@ -104,26 +104,56 @@ tests for a specific version. You can run the tests with the following commands. Keep in mind, that these tests run on real cloud servers and will create volumes that will be billed. -**Test Server Setup**: +**Development/Testing** -1x CPX21 (Ubuntu 18.04) +For local development, you will need the following tools installed: -**Requirements: Docker and Go 1.16** + * Docker + * Golang 1.16 + * [Skaffold](https://skaffold.dev/) + * [k3sup](https://github.com/alexellis/k3sup#readme) + * [hcloud CLI](https://github.com/hetznercloud/cli#readme) -1. Configure your environment correctly - ```bash - export HCLOUD_TOKEN= - export K8S_VERSION=1.21.0 # The specific (latest) version is needed here - export USE_SSH_KEYS=key1,key2 # Name or IDs of your SSH Keys within the Hetzner Cloud, the servers will be accessable with that keys - ``` -2. Run the tests - ```bash - go test $(go list ./... | grep e2etests) -v -timeout 60m - ``` +You will also need to set a `HCLOUD_TOKEN` in your shell session: + +```sh + $ export HCLOUD_TOKEN= +``` + +You can quickly bring up a dev cluster test environment in Hetzner Cloud. + +```sh + $ eval $(INSTANCES=3 hack/dev-up.sh) + # In about a minute, you should have a 3 node cluster of CPX11 instances (cost is around 2 cents per hour) + $ kubectl get nodes + # Now let's run the app. + $ SKAFFOLD_DEFAULT_REPO=my_dockerhub_username skaffold dev + # In a minute or two, the project should be built into an image, deployed into the test cluster. + # Logs will now be tailing out to your shell. + # If you make changes to the project, the image will be rebuilt and pushed to the cluster, restarting pods as needed. + # You can even debug the containers running remotely in The Cloud(tm) using standard Golang delve. + ^C + $ skaffold debug + # The logs will indicate which debug ports are available. + # IMPORTANT! The created servers are not automatically cleaned up. You must remember to delete everything yourself: + $ hack/dev-down.sh +``` -The tests will now run, this will take a while (~30 min). +### A note about `SKAFFOLD_DEFAULT_REPO` -**If the tests fail, make sure to clean up the project with the Hetzner Cloud Console or the hcloud cli.** +When you use Skaffold to deploy the driver to a remote cluster in Hetzner Cloud, you need somewhere to host the images. The default image repository is owned by Hetzner, and thus cannot be used for local development purposes. Instead, you can point Skaffold at your own Docker Hub, ghcr.io, Quay.io, or whatever. The Skaffold docks talk more about [Image Repository Handling](https://skaffold.dev/docs/environment/image-registries/) in gory detail, if you need more information. + +Please see the [Skaffold Documentation](https://skaffold.dev/docs/) for more information on the things you can do with Skaffold. + +### Running end-to-end tests + +Note, these tests will create and detach a *lot* of volumes. You will likely run into API request limits if you run this too frequently. +The tests take 10-20 minutes. + + +```sh +hack/run-e2e-tests.sh +``` ## License diff --git a/deploy/kustomization.yaml b/deploy/kustomization.yaml new file mode 100644 index 00000000..90bbb9d9 --- /dev/null +++ b/deploy/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - kubernetes/hcloud-csi.yml diff --git a/e2etests/.gitignore b/e2etests/.gitignore deleted file mode 100644 index 8dfc4ceb..00000000 --- a/e2etests/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -ci-hcloud-csi-driver.tar -ssh_key -kubeconfig -join.txt diff --git a/e2etests/e2e_test.go b/e2etests/e2e_test.go deleted file mode 100644 index 8929f597..00000000 --- a/e2etests/e2e_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package e2etests - -import ( - "fmt" - "os" - "testing" -) - -var testCluster TestCluster - -func TestMain(m *testing.M) { - if err := testCluster.Start(); err != nil { - fmt.Printf("%v\n", err) - os.Exit(1) - } - - rc := m.Run() - - if err := testCluster.Stop(rc > 0); err != nil { - fmt.Printf("%v\n", err) - os.Exit(1) - } - os.Exit(rc) -} - -func TestOfficialTestsuite(t *testing.T) { - t.Run("parallel tests", func(t *testing.T) { - err := RunCommandVisibleOnServer(testCluster.setup.privKey, testCluster.setup.MainNode, fmt.Sprintf("KUBECONFIG=/root/.kube/config ./ginkgo -p -v -focus='External.Storage' -skip='\\[Feature:|\\[Disruptive\\]|\\[Serial\\]' ./e2e.test -- -storage.testdriver=test-driver.yml")) - if err != nil { - t.Error(err) - } - }) - t.Run("serial tests", func(t *testing.T) { - err := RunCommandVisibleOnServer(testCluster.setup.privKey, testCluster.setup.MainNode, fmt.Sprintf("KUBECONFIG=/root/.kube/config ./ginkgo -v -focus='External.Storage.*(\\[Feature:|\\[Serial\\])' ./e2e.test -- -storage.testdriver=test-driver.yml")) - if err != nil { - t.Error(err) - } - }) -} diff --git a/e2etests/setup.go b/e2etests/setup.go deleted file mode 100644 index b8f2379c..00000000 --- a/e2etests/setup.go +++ /dev/null @@ -1,550 +0,0 @@ -package e2etests - -import ( - "bytes" - "context" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "fmt" - "html/template" - "io" - "io/ioutil" - "net" - "os" - "os/exec" - "strings" - "sync" - "time" - - "github.com/hetznercloud/hcloud-go/hcloud" - "golang.org/x/crypto/ssh" -) - -type K8sDistribution string - -const ( - K8sDistributionK8s K8sDistribution = "k8s" - K8sDistributionK3s K8sDistribution = "k3s" -) - -var instanceType = "cpx21" - -type hcloudK8sSetup struct { - Hcloud *hcloud.Client - HcloudToken string - K8sVersion string - K8sDistribution K8sDistribution - TestIdentifier string - ImageName string - KeepOnFailure bool - MainNode *hcloud.Server - WorkerNodes []*hcloud.Server - privKey string - sshKey *hcloud.SSHKey - clusterJoinCMD string - testLabels map[string]string -} - -type cloudInitTmpl struct { - K8sVersion string - HcloudToken string - IsClusterServer bool - JoinCMD string -} - -// PrepareTestEnv setups a test environment for the CSI Driver -// This includes the creation of a SSH Key, a "Cluster Node" and a defined amount of Worker nodes -// The servers will be created with a Cloud Init UserData -// The template can be found under e2etests/templates/cloudinit_.txt.tpl -func (s *hcloudK8sSetup) PrepareTestEnv(ctx context.Context, additionalSSHKeys []*hcloud.SSHKey) error { - const op = "hcloudK8sSetup/PrepareTestEnv" - - s.testLabels = map[string]string{"K8sDistribution": string(s.K8sDistribution), "K8sVersion": strings.ReplaceAll(s.K8sVersion, "+", ""), "test": s.TestIdentifier} - err := s.getSSHKey(ctx) - if err != nil { - return fmt.Errorf("%s getSSHKey: %s", op, err) - } - - srv, err := s.createClusterServer(ctx, "cluster-node", instanceType, additionalSSHKeys) - if err != nil { - return fmt.Errorf("%s: create cluster node: %v", op, err) - } - s.MainNode = srv - - s.waitUntilSSHable(s.MainNode) - - err = s.waitForCloudInit(s.MainNode) - if err != nil { - return err - } - - joinCmd, err := s.getJoinCmd() - if err != nil { - return err - } - s.clusterJoinCMD = joinCmd - - err = s.transferDockerImage(s.MainNode) - if err != nil { - return fmt.Errorf("%s: %s", op, err) - } - - fmt.Printf("[cluster-node] %s Load Image:\n", op) - transferCmd := "ctr -n=k8s.io image import ci-hcloud-csi-driver.tar" - err = RunCommandOnServer(s.privKey, s.MainNode, transferCmd) - if err != nil { - return fmt.Errorf("%s: Load image %s", op, err) - } - - var workers = 1 // Change this value if you want to have more workers for the test - var wg sync.WaitGroup - for worker := 1; worker <= workers; worker++ { - wg.Add(1) - go s.createClusterWorker(ctx, additionalSSHKeys, &wg, worker) - } - wg.Wait() - return nil -} - -func (s *hcloudK8sSetup) createClusterWorker(ctx context.Context, additionalSSHKeys []*hcloud.SSHKey, wg *sync.WaitGroup, worker int) { - const op = "hcloudK8sSetup/createClusterWorker" - defer wg.Done() - - workerName := fmt.Sprintf("cluster-worker-%d", worker) - fmt.Printf("[%s] %s Create worker node:\n", workerName, op) - - userData, err := s.getCloudInitConfig(false) - if err != nil { - fmt.Printf("[%s] %s getCloudInitConfig: %s", workerName, op, err) - return - } - srv, err := s.createServer(ctx, workerName, instanceType, additionalSSHKeys, err, userData) - if err != nil { - fmt.Printf("[%s] %s createServer: %s", workerName, op, err) - return - } - s.WorkerNodes = append(s.WorkerNodes, srv) - - s.waitUntilSSHable(srv) - - err = s.waitForCloudInit(srv) - if err != nil { - fmt.Printf("[%s] %s: wait for cloud init on worker: %v", srv.Name, op, err) - return - } - - err = s.transferDockerImage(srv) - if err != nil { - fmt.Printf("[%s] %s: transfer image on worker: %v", srv.Name, op, err) - return - } - - fmt.Printf("[%s] %s Load Image\n", srv.Name, op) - - transferCmd := "ctr -n=k8s.io image import ci-hcloud-csi-driver.tar" - - err = RunCommandOnServer(s.privKey, srv, transferCmd) - if err != nil { - fmt.Printf("[%s] %s: load image on worker: %v", srv.Name, op, err) - return - } -} - -func (s *hcloudK8sSetup) waitUntilSSHable(server *hcloud.Server) { - const op = "hcloudK8sSetup/PrepareTestEnv" - fmt.Printf("[%s] %s: Waiting for server to be sshable:\n", server.Name, op) - for { - conn, err := net.Dial("tcp", fmt.Sprintf("%s:22", server.PublicNet.IPv4.IP.String())) - if err != nil { - time.Sleep(1 * time.Second) - continue - } - _ = conn.Close() - fmt.Printf("[%s] %s: SSH Connection successful\n", server.Name, op) - break - } -} - -func (s *hcloudK8sSetup) createClusterServer(ctx context.Context, name, typ string, additionalSSHKeys []*hcloud.SSHKey) (*hcloud.Server, error) { - const op = "e2etest/createClusterServer" - - userData, err := s.getCloudInitConfig(true) - if err != nil { - return nil, fmt.Errorf("%s getCloudInitConfig: %s", op, err) - } - srv, err := s.createServer(ctx, name, typ, additionalSSHKeys, err, userData) - if err != nil { - return nil, fmt.Errorf("%s createServer: %s", op, err) - } - return srv, nil -} - -func (s *hcloudK8sSetup) createServer(ctx context.Context, name string, typ string, additionalSSHKeys []*hcloud.SSHKey, err error, userData string) (*hcloud.Server, error) { - const op = "e2etest/createServer" - sshKeys := []*hcloud.SSHKey{s.sshKey} - for _, additionalSSHKey := range additionalSSHKeys { - sshKeys = append(sshKeys, additionalSSHKey) - } - - res, _, err := s.Hcloud.Server.Create(ctx, hcloud.ServerCreateOpts{ - Name: fmt.Sprintf("%s-%s", name, s.TestIdentifier), - ServerType: &hcloud.ServerType{Name: typ}, - Image: &hcloud.Image{Name: "ubuntu-20.04"}, - SSHKeys: sshKeys, - UserData: userData, - Labels: s.testLabels, - }) - if err != nil { - return nil, fmt.Errorf("%s Hcloud.Server.Create: %s", op, err) - } - - _, errCh := s.Hcloud.Action.WatchProgress(ctx, res.Action) - if err := <-errCh; err != nil { - return nil, fmt.Errorf("%s WatchProgress Action %s: %s", op, res.Action.Command, err) - } - - for _, nextAction := range res.NextActions { - _, errCh := s.Hcloud.Action.WatchProgress(ctx, nextAction) - if err := <-errCh; err != nil { - return nil, fmt.Errorf("%s WatchProgress NextAction %s: %s", op, nextAction.Command, err) - } - } - srv, _, err := s.Hcloud.Server.GetByID(ctx, res.Server.ID) - if err != nil { - return nil, fmt.Errorf("%s Hcloud.Server.GetByID: %s", op, err) - } - return srv, nil -} - -// PrepareK8s patches an existing kubernetes cluster with the correct -// CSI Driver version from this test run. -// This should only run on the cluster main node -func (s *hcloudK8sSetup) PrepareK8s() (string, error) { - const op = "hcloudK8sSetup/PrepareK8s" - - err := s.prepareCSIDriverDeploymentFile() - if err != nil { - return "", fmt.Errorf("%s: %s", op, err) - } - - fmt.Printf("[%s] %s: Apply csi-driver deployment\n", s.MainNode.Name, op) - err = RunCommandOnServer(s.privKey, s.MainNode, "KUBECONFIG=/root/.kube/config kubectl apply -f csi-driver.yml") - if err != nil { - return "", fmt.Errorf("%s Deploy csi: %s", op, err) - } - - patch := `{"spec":{"template":{"spec":{"containers":[{"name":"hcloud-csi-driver","env":[{"name":"LOG_LEVEL","value":"debug"}]}]}}}}` - fmt.Printf("[%s] %s: Patch deployment for debug logging\n", s.MainNode.Name, op) - err = RunCommandOnServer(s.privKey, s.MainNode, fmt.Sprintf("KUBECONFIG=/root/.kube/config kubectl patch statefulset hcloud-csi-controller -n kube-system --patch '%s'", patch)) - if err != nil { - return "", fmt.Errorf("%s Patch StatefulSet: %s", op, err) - } - err = RunCommandOnServer(s.privKey, s.MainNode, fmt.Sprintf("KUBECONFIG=/root/.kube/config kubectl patch daemonset hcloud-csi-node -n kube-system --patch '%s'", patch)) - if err != nil { - return "", fmt.Errorf("%s Patch DaemonSet: %s", op, err) - } - - fmt.Printf("[%s] %s: Ensure Server is not labeled as master\n", s.MainNode.Name, op) - err = RunCommandOnServer(s.privKey, s.MainNode, "KUBECONFIG=/root/.kube/config kubectl label nodes --all node-role.kubernetes.io/master-") - if err != nil { - return "", fmt.Errorf("%s Ensure Server is not labeled as master: %s", op, err) - } - - fmt.Printf("[%s] %s: Read test-driver.yml configuration file\n", s.MainNode.Name, op) - testDriverFile, err := ioutil.ReadFile("templates/testdrivers/1.18.yml") - if err != nil { - return "", fmt.Errorf("%s read testdriverfile file: %s %v", op, "templates/testdrivers/1.18.yml", err) - } - - fmt.Printf("[%s] %s: Transfer test-driver.yml configuration file\n", s.MainNode.Name, op) - err = RunCommandOnServer(s.privKey, s.MainNode, fmt.Sprintf("echo '%s' >> test-driver.yml", testDriverFile)) - if err != nil { - return "", fmt.Errorf("%s send testdriverfile file: %s %v", op, "templates/testdrivers/1.18.yml", err) - } - fmt.Printf("[%s] %s: Download kubeconfig\n", s.MainNode.Name, op) - err = scp("ssh_key", fmt.Sprintf("root@%s:/root/.kube/config", s.MainNode.PublicNet.IPv4.IP.String()), "kubeconfig") - if err != nil { - return "", fmt.Errorf("%s download kubeconfig: %s", op, err) - } - - fmt.Printf("[%s] %s: Ensure correct server is set\n", s.MainNode.Name, op) - kubeconfigBefore, err := ioutil.ReadFile("kubeconfig") - if err != nil { - return "", fmt.Errorf("%s reading kubeconfig: %s", op, err) - } - kubeconfigAfterwards := strings.Replace(string(kubeconfigBefore), "127.0.0.1", s.MainNode.PublicNet.IPv4.IP.String(), -1) - err = ioutil.WriteFile("kubeconfig", []byte(kubeconfigAfterwards), 0) - if err != nil { - return "", fmt.Errorf("%s writing kubeconfig: %s", op, err) - } - return "kubeconfig", nil -} - -func scp(identityFile, src, dest string) error { - const op = "e2etests/scp" - - err := runCmd( - "/usr/bin/scp", - []string{ - "-F", "/dev/null", // ignore $HOME/.ssh/config - "-i", identityFile, - "-o", "IdentitiesOnly=yes", // only use the identities passed on the command line - "-o", "UserKnownHostsFile=/dev/null", - "-o", "StrictHostKeyChecking=no", - src, - dest, - }, - nil, - ) - if err != nil { - return fmt.Errorf("%s: %v", op, err) - } - return nil -} - -func runCmd(name string, argv []string, env []string) error { - cmd := exec.Command(name, argv...) - if os.Getenv("TEST_DEBUG_MODE") != "" { - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - } - if env != nil { - cmd.Env = append(os.Environ(), env...) - } - if err := cmd.Run(); err != nil { - return fmt.Errorf("run cmd: %s %s: %v", name, strings.Join(argv, " "), err) - } - return nil -} - -// prepareCSIDriverDeploymentFile patches the Cloud Controller Deployment file -// It replaces the used image and the pull policy to always use the local image -// from this test run -func (s *hcloudK8sSetup) prepareCSIDriverDeploymentFile() error { - const op = "hcloudK8sSetup/prepareCSIDriverDeploymentFile" - fmt.Printf("[%s] %s: Read master deployment file\n", s.MainNode.Name, op) - deploymentFile, err := ioutil.ReadFile("../deploy/kubernetes/hcloud-csi-master.yml") - if err != nil { - return fmt.Errorf("%s: read csi driver deployment file %s: %v", op, "../deploy/kubernetes/hcloud-csi-master.yml", err) - } - - fmt.Printf("[%s] %s: Prepare deployment file and transfer it\n", s.MainNode.Name, op) - deploymentFile = []byte(strings.ReplaceAll(string(deploymentFile), "hetznercloud/hcloud-csi-driver:latest", fmt.Sprintf("hcloud-csi:ci_%s", s.TestIdentifier))) - deploymentFile = []byte(strings.ReplaceAll(string(deploymentFile), " imagePullPolicy: Always", " imagePullPolicy: IfNotPresent")) - - err = RunCommandOnServer(s.privKey, s.MainNode, fmt.Sprintf("echo '%s' >> csi-driver.yml", deploymentFile)) - if err != nil { - return fmt.Errorf("%s: Prepare deployment file and transfer it: %s", op, err) - } - return nil -} - -// transferDockerImage transfers the local build docker image tar via SCP -func (s *hcloudK8sSetup) transferDockerImage(server *hcloud.Server) error { - const op = "hcloudK8sSetup/transferDockerImage" - fmt.Printf("[%s] %s: Transfer docker image\n", server.Name, op) - err := WithSSHSession(s.privKey, server.PublicNet.IPv4.IP.String(), func(session *ssh.Session) error { - file, err := os.Open("ci-hcloud-csi-driver.tar") - if err != nil { - return fmt.Errorf("%s read ci-hcloud-ccm.tar: %s", op, err) - } - defer file.Close() - stat, err := file.Stat() - if err != nil { - return fmt.Errorf("%s file.Stat: %s", op, err) - } - wg := sync.WaitGroup{} - wg.Add(1) - - go func() { - hostIn, _ := session.StdinPipe() - defer hostIn.Close() - fmt.Fprintf(hostIn, "C0664 %d %s\n", stat.Size(), "ci-hcloud-csi-driver.tar") - io.Copy(hostIn, file) - fmt.Fprint(hostIn, "\x00") - wg.Done() - }() - - err = session.Run("/usr/bin/scp -t /root") - if err != nil { - return fmt.Errorf("%s copy via scp: %s", op, err) - } - wg.Wait() - return err - }) - return err -} - -// waitForCloudInit waits on cloud init on the server. -// when cloud init is ready we can assume that the server -// and the plain k8s installation is ready -func (s *hcloudK8sSetup) waitForCloudInit(server *hcloud.Server) error { - const op = "hcloudK8sSetup/PrepareTestEnv" - fmt.Printf("[%s] %s: Wait for cloud-init\n", server.Name, op) - err := RunCommandOnServer(s.privKey, server, fmt.Sprintf("cloud-init status --wait > /dev/null")) - if err != nil { - return fmt.Errorf("[%s] %s: Wait for cloud-init: %s", server.Name, op, err) - } - return nil -} - -// waitForCloudInit waits on cloud init on the server. -// when cloud init is ready we can assume that the server -// and the plain k8s installation is ready -func (s *hcloudK8sSetup) getJoinCmd() (string, error) { - const op = "hcloudK8sSetup/getJoinCmd" - fmt.Printf("[%s] %s: Download join cmd\n", s.MainNode.Name, op) - err := scp("ssh_key", fmt.Sprintf("root@%s:/root/join.txt", s.MainNode.PublicNet.IPv4.IP.String()), "join.txt") - if err != nil { - return "", fmt.Errorf("[%s] %s download join cmd: %s", s.MainNode.Name, op, err) - } - cmd, err := ioutil.ReadFile("join.txt") - if err != nil { - return "", fmt.Errorf("[%s] %s reading join cmd file: %s", s.MainNode.Name, op, err) - } - return string(cmd), nil -} - -// TearDown deletes all created resources within the Hetzner Cloud -// there is no need to "shutdown" the k8s cluster before -// so we just delete all created resources -func (s *hcloudK8sSetup) TearDown(testFailed bool) error { - const op = "hcloudK8sSetup/TearDown" - - if s.KeepOnFailure && testFailed { - fmt.Println("Skipping tear-down for further analysis.") - fmt.Println("Please clean-up afterwards ;-)") - return nil - } - - ctx := context.Background() - for _, wn := range s.WorkerNodes { - _, err := s.Hcloud.Server.Delete(ctx, wn) - if err != nil { - return fmt.Errorf("[%s] %s Hcloud.Server.Delete: %s", wn.Name, op, err) - } - } - _, err := s.Hcloud.Server.Delete(ctx, s.MainNode) - if err != nil { - return fmt.Errorf("[cluster-node] %s Hcloud.Server.Delete: %s", op, err) - } - s.MainNode = nil - - _, err = s.Hcloud.SSHKey.Delete(ctx, s.sshKey) - if err != nil { - return fmt.Errorf("%s Hcloud.SSHKey.Delete: %s", err, err) - } - s.sshKey = nil - return nil -} - -// getCloudInitConfig returns the generated cloud init configuration -func (s *hcloudK8sSetup) getCloudInitConfig(isClusterServer bool) (string, error) { - const op = "hcloudK8sSetup/getCloudInitConfig" - - str, err := ioutil.ReadFile(fmt.Sprintf("templates/cloudinit_%s.txt.tpl", s.K8sDistribution)) - if err != nil { - return "", fmt.Errorf("%s: read template file %s: %v", "templates/cloudinit.txt.tpl", op, err) - } - tmpl, err := template.New("cloud_init").Parse(string(str)) - if err != nil { - return "", fmt.Errorf("%s: parsing template file %s: %v", "templates/cloudinit.txt.tpl", op, err) - } - var buf bytes.Buffer - if err := tmpl.Execute(&buf, cloudInitTmpl{K8sVersion: s.K8sVersion, HcloudToken: s.HcloudToken, IsClusterServer: isClusterServer, JoinCMD: s.clusterJoinCMD}); err != nil { - return "", fmt.Errorf("%s: execute template: %v", op, err) - } - return buf.String(), nil -} - -//getSSHKey create and get the Hetzner Cloud SSH Key for the test -func (s *hcloudK8sSetup) getSSHKey(ctx context.Context) error { - const op = "hcloudK8sSetup/getSSHKey" - pubKey, privKey, err := makeSSHKeyPair() - if err != nil { - return err - } - sshKey, _, err := s.Hcloud.SSHKey.Create(ctx, hcloud.SSHKeyCreateOpts{ - Name: fmt.Sprintf("s-%s", s.TestIdentifier), - PublicKey: pubKey, - Labels: s.testLabels, - }) - if err != nil { - return fmt.Errorf("%s: creating ssh key: %v", op, err) - } - s.privKey = privKey - s.sshKey = sshKey - err = ioutil.WriteFile("ssh_key", []byte(s.privKey), 0600) - if err != nil { - return fmt.Errorf("%s: writing ssh key private key: %v", op, err) - } - return nil -} - -// makeSSHKeyPair generate a SSH key pair -func makeSSHKeyPair() (string, string, error) { - privateKey, err := rsa.GenerateKey(rand.Reader, 1024) - if err != nil { - return "", "", err - } - - // generate and write private key as PEM - var privKeyBuf strings.Builder - - privateKeyPEM := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)} - if err := pem.Encode(&privKeyBuf, privateKeyPEM); err != nil { - return "", "", err - } - - // generate and write public key - pub, err := ssh.NewPublicKey(&privateKey.PublicKey) - if err != nil { - return "", "", err - } - - var pubKeyBuf strings.Builder - pubKeyBuf.Write(ssh.MarshalAuthorizedKey(pub)) - - return pubKeyBuf.String(), privKeyBuf.String(), nil -} -func RunCommandOnServer(privKey string, server *hcloud.Server, command string) error { - return WithSSHSession(privKey, server.PublicNet.IPv4.IP.String(), func(session *ssh.Session) error { - if ok := os.Getenv("TEST_DEBUG_MODE"); ok != "" { - session.Stdout = os.Stdout - } - return session.Run(command) - }) -} -func RunCommandVisibleOnServer(privKey string, server *hcloud.Server, command string) error { - return WithSSHSession(privKey, server.PublicNet.IPv4.IP.String(), func(session *ssh.Session) error { - session.Stdout = os.Stdout - return session.Run(command) - }) -} - -func WithSSHSession(privKey string, host string, fn func(*ssh.Session) error) error { - signer, err := ssh.ParsePrivateKey([]byte(privKey)) - if err != nil { - return err - } - - client, err := ssh.Dial("tcp", net.JoinHostPort(host, "22"), &ssh.ClientConfig{ - User: "root", - Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)}, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), - Timeout: 1 * time.Second, - }) - if err != nil { - return err - } - - session, err := client.NewSession() - if err != nil { - return err - } - defer session.Close() - - return fn(session) -} diff --git a/e2etests/templates/cloudinit_k8s.txt.tpl b/e2etests/templates/cloudinit_k8s.txt.tpl deleted file mode 100644 index 3103cb2a..00000000 --- a/e2etests/templates/cloudinit_k8s.txt.tpl +++ /dev/null @@ -1,67 +0,0 @@ -#cloud-config -write_files: -- content: | - overlay - br_netfilter - path: /etc/modules-load.d/containerd.conf -- content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - net.ipv4.ip_forward = 1 - path: /etc/sysctl.d/k8s.conf -- content: | - apiVersion: kubeadm.k8s.io/v1beta2 - kind: ClusterConfiguration - kubernetesVersion: v{{.K8sVersion}} - networking: - podSubnet: "10.244.0.0/16" - path: /tmp/kubeadm-config.yaml -- content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--cloud-provider=external" - path: /etc/systemd/system/kubelet.service.d/20-hcloud.conf -- content: | - alias k="kubectl" - alias ksy="kubectl -n kube-system" - alias kgp="kubectl get pods" - alias kgs="kubectl get services" - alias cilog="cat /var/log/cloud-init-output.log" - export HCLOUD_TOKEN={{.HcloudToken}} - path: /root/.bashrc -runcmd: -- export HOME=/root -- modprobe overlay -- modprobe br_netfilter -- sysctl --system -- apt install -y apt-transport-https curl -- curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - -- echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list -- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg -- echo "deb [arch=amd64 signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null -- apt update -- apt install -y kubectl={{.K8sVersion}}-00 kubeadm={{.K8sVersion}}-00 kubelet={{.K8sVersion}}-00 containerd.io -- systemctl daemon-reload -- mkdir -p /etc/containerd -- containerd config default | tee /etc/containerd/config.toml -- systemctl restart containerd -- systemctl restart kubelet -# Download and install latest hcloud cli release for easier debugging on host -- curl -s https://api.github.com/repos/hetznercloud/cli/releases/latest | grep browser_download_url | grep linux-amd64 | cut -d '"' -f 4 | wget -qi - -- tar xvzf hcloud-linux-amd64.tar.gz && cp hcloud /usr/bin/hcloud && chmod +x /usr/bin/hcloud -{{if .IsClusterServer}} -- kubeadm init --config /tmp/kubeadm-config.yaml -- mkdir -p /root/.kube -- cp -i /etc/kubernetes/admin.conf /root/.kube/config -- until KUBECONFIG=/root/.kube/config kubectl get node; do sleep 2;done -- KUBECONFIG=/root/.kube/config kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml -- KUBECONFIG=/root/.kube/config kubectl -n kube-system patch ds kube-flannel-ds --type json -p '[{"op":"add","path":"/spec/template/spec/tolerations/-","value":{"key":"node.cloudprovider.kubernetes.io/uninitialized","value":"true","effect":"NoSchedule"}}]' -- KUBECONFIG=/root/.kube/config kubectl -n kube-system create secret generic hcloud-csi --from-literal=token={{.HcloudToken}} -- KUBECONFIG=/root/.kube/config kubectl -n kube-system create secret generic hcloud --from-literal=token={{.HcloudToken}} -- KUBECONFIG=/root/.kube/config kubectl apply -f https://raw.githubusercontent.com/hetznercloud/hcloud-cloud-controller-manager/master/deploy/ccm.yaml -- cd /root/ && curl -s --location https://dl.k8s.io/v{{.K8sVersion}}/kubernetes-test-linux-amd64.tar.gz | tar --strip-components=3 -zxf - kubernetes/test/bin/e2e.test kubernetes/test/bin/ginkgo -- KUBECONFIG=/root/.kube/config kubectl taint nodes --all node-role.kubernetes.io/master- -- kubeadm token create --print-join-command >> /root/join.txt -{{else}} -- {{.JoinCMD}} -- sleep 10 # to get the joining work -{{end}} diff --git a/e2etests/testing.go b/e2etests/testing.go deleted file mode 100644 index 99862ab5..00000000 --- a/e2etests/testing.go +++ /dev/null @@ -1,241 +0,0 @@ -package e2etests - -import ( - "context" - "fmt" - "math/rand" - "os" - "strings" - "sync" - "time" - - "k8s.io/client-go/tools/clientcmd" - - "github.com/hetznercloud/hcloud-go/hcloud" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" -) - -var rng *rand.Rand - -func init() { - rng = rand.New(rand.NewSource(time.Now().UnixNano())) -} - -type TestCluster struct { - KeepOnFailure bool - setup *hcloudK8sSetup - k8sClient *kubernetes.Clientset - started bool - - mu sync.Mutex -} - -func (tc *TestCluster) initialize() error { - const op = "e2tests/TestCluster.initialize" - - if tc.started { - return nil - } - - fmt.Printf("%s: Starting Testsuite\n", op) - - isUsingGithubActions := os.Getenv("GITHUB_ACTIONS") - isUsingGitlabCI := os.Getenv("CI_JOB_ID") - testIdentifier := "" - if isUsingGithubActions == "true" { - testIdentifier = fmt.Sprintf("gh-%s-%d", os.Getenv("GITHUB_RUN_ID"), rng.Int()) - fmt.Printf("%s: Running in Github Action\n", op) - } - if isUsingGitlabCI != "" { - testIdentifier = fmt.Sprintf("gl-%s", isUsingGitlabCI) - fmt.Printf("%s: Running in Gitlab CI\n", op) - } - if testIdentifier == "" { - testIdentifier = fmt.Sprintf("local-%d", rng.Int()) - fmt.Printf("%s: Running local\n", op) - } - - k8sVersion := os.Getenv("K8S_VERSION") - if k8sVersion == "" { - k8sVersion = "k8s-1.18.9" - } - - k8sVersionsDetails := strings.Split(k8sVersion, "-") - if len(k8sVersionsDetails) != 2 { - return fmt.Errorf("%s: invalid k8s version: %v should be format -", op, k8sVersion) - } - - token := os.Getenv("HCLOUD_TOKEN") - if len(token) != 64 { - return fmt.Errorf("%s: No valid HCLOUD_TOKEN found", op) - } - tc.KeepOnFailure = os.Getenv("KEEP_SERVER_ON_FAILURE") == "yes" - - var additionalSSHKeys []*hcloud.SSHKey - - opts := []hcloud.ClientOption{ - hcloud.WithToken(token), - hcloud.WithApplication("hcloud-ccm-testsuite", "1.0"), - } - hcloudClient := hcloud.NewClient(opts...) - additionalSSHKeysIDOrName := os.Getenv("USE_SSH_KEYS") - if additionalSSHKeysIDOrName != "" { - idsOrNames := strings.Split(additionalSSHKeysIDOrName, ",") - for _, idOrName := range idsOrNames { - additionalSSHKey, _, err := hcloudClient.SSHKey.Get(context.Background(), idOrName) - if err != nil { - return fmt.Errorf("%s: %s", op, err) - } - additionalSSHKeys = append(additionalSSHKeys, additionalSSHKey) - } - } - - fmt.Printf("%s: Test against %s\n", op, k8sVersion) - - imageName := os.Getenv("CSI_IMAGE_NAME") - buildImage := false - if imageName == "" { - imageName = fmt.Sprintf("hcloud-csi:ci_%s", testIdentifier) - buildImage = true - } - if buildImage { - fmt.Printf("%s: Building image\n", op) - if err := runCmd("docker", []string{"build", "-t", imageName, "../"}, nil); err != nil { - return fmt.Errorf("%s: %v", op, err) - } - } - - fmt.Printf("%s: Saving image to disk\n", op) - if err := runCmd("docker", []string{"save", "--output", "ci-hcloud-csi-driver.tar", imageName}, nil); err != nil { - return fmt.Errorf("%s: %v", op, err) - } - - tc.setup = &hcloudK8sSetup{ - Hcloud: hcloudClient, - K8sDistribution: K8sDistribution(k8sVersionsDetails[0]), - K8sVersion: k8sVersionsDetails[1], - TestIdentifier: testIdentifier, - ImageName: imageName, - HcloudToken: token, - KeepOnFailure: tc.KeepOnFailure, - } - fmt.Printf("%s: Setting up test env\n", op) - - err := tc.setup.PrepareTestEnv(context.Background(), additionalSSHKeys) - if err != nil { - return fmt.Errorf("%s: %s", op, err) - } - - kubeconfigPath, err := tc.setup.PrepareK8s() - if err != nil { - return fmt.Errorf("%s: %s", op, err) - } - - config, err := clientcmd.BuildConfigFromFlags("", kubeconfigPath) - if err != nil { - return fmt.Errorf("%s: clientcmd.BuildConfigFromFlags: %s", op, err) - } - - tc.k8sClient, err = kubernetes.NewForConfig(config) - if err != nil { - return fmt.Errorf("%s: kubernetes.NewForConfig: %s", op, err) - } - - tc.started = true - return nil -} - -func (tc *TestCluster) Start() error { - const op = "e2etests/TestCluster.Start" - - tc.mu.Lock() - defer tc.mu.Unlock() - - if err := tc.initialize(); err != nil { - return fmt.Errorf("%s: %v", op, err) - } - if err := tc.ensureNodesReady(); err != nil { - return fmt.Errorf("%s: %v", op, err) - } - if err := tc.ensurePodsReady(); err != nil { - return fmt.Errorf("%s: %v", op, err) - } - return nil -} - -func (tc *TestCluster) Stop(testFailed bool) error { - const op = "e2etests/TestCluster.Stop" - - tc.mu.Lock() - defer tc.mu.Unlock() - - if !tc.started { - return nil - } - - if err := tc.setup.TearDown(testFailed); err != nil { - fmt.Printf("%s: Tear Down: %s", op, err) - } - return nil -} - -func (tc *TestCluster) ensureNodesReady() error { - const op = "e2etests/ensureNodesReady" - - err := wait.Poll(1*time.Second, 5*time.Minute, func() (bool, error) { - var totalNodes = len(tc.setup.WorkerNodes) + 1 // Number Worker Nodes + 1 Cluster Node - var readyNodes int - nodes, err := tc.k8sClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) - if err != nil { - return false, err - } - for _, node := range nodes.Items { - for _, cond := range node.Status.Conditions { - if cond.Type == corev1.NodeReady && cond.Status == corev1.ConditionTrue { - readyNodes++ - } - } - } - pendingNodes := totalNodes - readyNodes - fmt.Printf("Waiting for %d/%d nodes\n", pendingNodes, totalNodes) - return pendingNodes == 0, err - }) - - if err != nil { - return fmt.Errorf("%s: %s", op, err) - } - return nil -} - -func (tc *TestCluster) ensurePodsReady() error { - const op = "e2etests/ensurePodsReady" - - err := wait.Poll(1*time.Second, 10*time.Minute, func() (bool, error) { - pods, err := tc.k8sClient.CoreV1().Pods("kube-system").List(context.Background(), metav1.ListOptions{}) - if err != nil { - return false, err - } - totalPods := len(pods.Items) - - var readyPods int - for _, pod := range pods.Items { - for _, cond := range pod.Status.Conditions { - if cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue { - readyPods++ - } - } - } - - pendingPods := totalPods - readyPods - fmt.Printf("Waiting for %d/%d pods\n", pendingPods, totalPods) - return pendingPods == 0, err - }) - - if err != nil { - return fmt.Errorf("%s: %s", op, err) - } - return nil -} diff --git a/hack/.gitignore b/hack/.gitignore new file mode 100644 index 00000000..f9d7355a --- /dev/null +++ b/hack/.gitignore @@ -0,0 +1,3 @@ +.ssh* +.kubeconfig* +.kube-test/ diff --git a/hack/dev-down.sh b/hack/dev-down.sh new file mode 100755 index 00000000..0fc75071 --- /dev/null +++ b/hack/dev-down.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +set -ue -o pipefail + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" +scope="${SCOPE:-dev}" +label="managedby=hack" + +if [[ "${ALL:-}" == "" ]]; then + label="$label,scope=$scope" + rm -f $SCRIPT_DIR/.ssh-$scope $SCRIPT_DIR/.kubeconfig-$scope +else + rm -f $SCRIPT_DIR/.ssh* $SCRIPT_DIR/.kubeconfig* +fi + +for instance in $(hcloud server list -o noheader -o columns=id -l $label); do + ( + hcloud server delete $instance + ) & +done + + +for key in $(hcloud ssh-key list -o noheader -o columns=name -l $label); do + ( + hcloud ssh-key delete $key + ) & +done + + +wait diff --git a/hack/dev-up.sh b/hack/dev-up.sh new file mode 100755 index 00000000..139ad34c --- /dev/null +++ b/hack/dev-up.sh @@ -0,0 +1,145 @@ +#!/usr/bin/env bash +set -ueo pipefail + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" + +# Redirect all stdout to stderr. +{ + if ! hcloud version >/dev/null; then + echo 'ERROR: `hcloud` CLI not found, please install it and make it available on your $PATH' + exit 1 + fi + + if ! k3sup version >/dev/null; then + echo 'ERROR: `k3sup` not found, please install it and make it available on your $PATH' + exit 1 + fi + + if [[ "${HCLOUD_TOKEN:-}" == "" ]]; then + echo 'ERROR: please set $HCLOUD_TOKEN' + exit 1 + fi + + # We run a lot of subshells below for speed. If any encounter an error, we shut down the whole process group, pronto. + function error() { + echo 'Onoes, something went wrong! :( The output above might have some clues.' + kill 0 + } + + trap error ERR + + image_name=${IMAGE_NAME:-ubuntu-20.04} + instance_count=${INSTANCES:-1} + instance_type=${INSTANCE_TYPE:-cpx11} + location=${LOCATION:-fsn1} + ssh_keys=${SSH_KEYS:-} + channel=${K3S_CHANNEL:-stable} + + scope="${SCOPE:-dev}" + label="managedby=hack,scope=$scope" + ssh_private_key="$SCRIPT_DIR/.ssh-$scope" + k3s_opts=${K3S_OPTS:-"--kubelet-arg cloud-provider=external"} + k3s_server_opts=${K3S_SERVER_OPTS:-"--disable-cloud-controller --cluster-cidr 10.244.0.0/16"} + + scope_name=csi-driver-${scope} + + export KUBECONFIG="$SCRIPT_DIR/.kubeconfig-$scope" + + ssh_command="ssh -i $ssh_private_key -o StrictHostKeyChecking=off -o BatchMode=yes -o ConnectTimeout=5" + + # Generate SSH keys and upload publkey to Hetzner Cloud. + ( trap error ERR + [[ ! -f $ssh_private_key ]] && ssh-keygen -t ed25519 -f $ssh_private_key -C '' -N '' + [[ ! -f $ssh_private_key.pub ]] && ssh-keygen -y -f $ssh_private_key > $ssh_private_key.pub + if ! hcloud ssh-key describe $scope_name >/dev/null 2>&1; then + hcloud ssh-key create --label $label --name $scope_name --public-key-from-file $ssh_private_key.pub + fi + ) & + + for num in $(seq $instance_count); do + # Create server and initialize Kubernetes on it with k3sup. + ( trap error ERR + + server_name="$scope_name-$num" + + # Maybe cluster is already up and node is already there. + if kubectl get node $server_name >/dev/null 2>&1; then + exit 0 + fi + + ip=$(hcloud server ip $server_name 2>/dev/null || true) + + if [[ -z "${ip:-}" ]]; then + # Wait for SSH key + until hcloud ssh-key describe $scope_name >/dev/null 2>&1; do sleep 1; done + + createcmd="hcloud server create --image $image_name --label $label --location $location --name $server_name --ssh-key=$scope_name --type $instance_type" + for key in $ssh_keys; do + createcmd+=" --ssh-key $key" + done + $createcmd + ip=$(hcloud server ip $server_name) + fi + + # Wait for SSH. + until [ "$($ssh_command root@$ip echo ok 2>/dev/null)" = "ok" ]; do + sleep 1 + done + + if [[ "$num" == "1" ]]; then + # First node is control plane. + k3sup install --print-config=false --ip $ip --k3s-channel $channel --k3s-extra-args "${k3s_server_opts} ${k3s_opts}" --local-path $KUBECONFIG --ssh-key $ssh_private_key + else + # All subsequent nodes are initialized as workers. + + # Can't go any further until control plane has bootstrapped a bit though. + until $ssh_command root@$(hcloud server ip $scope_name-1 || true) stat /etc/rancher/node/password >/dev/null 2>&1; do + sleep 1 + done + + k3sup join --server-ip $(hcloud server ip $scope_name-1) --ip $ip --k3s-channel $channel --k3s-extra-args "${k3s_opts}" --ssh-key $ssh_private_key + fi + ) & + + # Wait for this node to show up in the cluster. + ( trap error ERR; set +x + until kubectl wait --for=condition=Ready node/$scope_name-$num >/dev/null 2>&1; do sleep 1; done + echo $scope_name-$num is up and in cluster + ) & + done + + ( trap error ERR + # Control plane init tasks. + # This is running in parallel with the server init, above. + + # Wait for control plane to look alive. + until kubectl get nodes >/dev/null 2>&1; do sleep 1; done; + + # Install flannel. + ( trap error ERR + if ! kubectl get -n kube-system ds/kube-flannel-ds >/dev/null 2>&1; then + kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml + kubectl -n kube-system patch ds kube-flannel-ds --type json -p '[{"op":"add","path":"/spec/template/spec/tolerations/-","value":{"key":"node.cloudprovider.kubernetes.io/uninitialized","value":"true","effect":"NoSchedule"}}]' + fi) & + + # Create HCLOUD_TOKEN Secret for hcloud-cloud-controller-manager. + ( trap error ERR + if ! kubectl -n kube_system get secret hcloud >/dev/null 2>&1; then + kubectl -n kube-system create secret generic hcloud --from-literal="token=$HCLOUD_TOKEN" + fi) & + + # Install hcloud-cloud-controller-manager. + ( trap error ERR + if ! kubectl get -n kube-system deploy/hcloud-cloud-controller-manager >/dev/null 2>&1; then + kubectl apply -f https://raw.githubusercontent.com/hetznercloud/hcloud-cloud-controller-manager/master/deploy/ccm.yaml + fi) & + wait + ) & + + wait + echo "Success - cluster fully initialized and ready, why not see for yourself?" + echo '$ kubectl get nodes' + kubectl get nodes +} >&2 + +echo "export KUBECONFIG=$KUBECONFIG" diff --git a/e2etests/templates/testdrivers/1.18.yml b/hack/e2e-storage-driver.yml similarity index 100% rename from e2etests/templates/testdrivers/1.18.yml rename to hack/e2e-storage-driver.yml diff --git a/hack/kustomization.yaml b/hack/kustomization.yaml new file mode 100644 index 00000000..70d3f0ec --- /dev/null +++ b/hack/kustomization.yaml @@ -0,0 +1,32 @@ +# This Kustomization is specifically designed for the Skaffold-powered dev environment. +# $ eval $(hack/dev-up.sh) && skaffold dev + +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: + - ../deploy +patches: + - target: + kind: DaemonSet + name: hcloud-csi-node + patch: |- + - op: add + path: /spec/template/spec/containers/1/env/- + value: + name: LOG_LEVEL + value: info + - op: replace + path: /spec/template/spec/containers/1/env/2/valueFrom/secretKeyRef/name + value: hcloud + - target: + kind: StatefulSet + name: hcloud-csi-controller + patch: |- + - op: add + path: /spec/template/spec/containers/3/env/- + value: + name: LOG_LEVEL + value: info + - op: replace + path: /spec/template/spec/containers/3/env/3/valueFrom/secretKeyRef/name + value: hcloud diff --git a/hack/run-e2e-tests.sh b/hack/run-e2e-tests.sh new file mode 100755 index 00000000..e8ca1bef --- /dev/null +++ b/hack/run-e2e-tests.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +set -uex -o pipefail + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" + +kube_version=${KUBE_VERSION:-v1.21.3} + +testdriver="$SCRIPT_DIR/e2e-storage-driver.yml" + +image="k8s.gcr.io/conformance-amd64:$kube_version" + +docker run -v $testdriver:$testdriver -v $KUBECONFIG:$KUBECONFIG -e "KUBECONFIG=$KUBECONFIG" $image \ + /usr/local/bin/ginkgo -succinct \ + -focus='External.Storage.*(\[Feature:|\[Serial\])' \ + -flakeAttempts=2 \ + /usr/local/bin/e2e.test -- \ + -storage.testdriver=$testdriver + +docker run -v $testdriver:$testdriver -v $KUBECONFIG:$KUBECONFIG -e "KUBECONFIG=$KUBECONFIG" $image \ + /usr/local/bin/ginkgo -succinct \ + -nodes=3 \ + -focus='External.Storage' \ + -skip='\[Feature:|\[Disruptive\]|\[Serial\]' \ + -flakeAttempts=2 \ + /usr/local/bin/e2e.test -- \ + -storage.testdriver=$testdriver diff --git a/skaffold.yaml b/skaffold.yaml new file mode 100644 index 00000000..1b8f4482 --- /dev/null +++ b/skaffold.yaml @@ -0,0 +1,16 @@ +apiVersion: skaffold/v2beta19 +kind: Config +metadata: + name: csi-driver +build: + artifacts: + - image: hetznercloud/hcloud-csi-driver + docker: + dockerfile: Dockerfile + cacheFrom: + - hetznercloud/hcloud-csi-driver:buildcache + local: + useBuildkit: true +deploy: + kustomize: + paths: [hack/]