diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 237b8662d..f21467dbf 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -25,5 +25,8 @@ jobs: - name: Set netfilter conntrack max run: sudo sysctl -w net.netfilter.nf_conntrack_max=131072 + - name: Run Cluster Class e2e tests + run: yes | GINKGO_FOCUS="\[Cluster-Class\]" make test-e2e + - name: Run PR-Blocking e2e tests run: yes | GINKGO_FOCUS="\[PR-Blocking\]" make test-e2e diff --git a/test/e2e/config/provider.yaml b/test/e2e/config/provider.yaml index a8a4b8e79..410775944 100644 --- a/test/e2e/config/provider.yaml +++ b/test/e2e/config/provider.yaml @@ -75,6 +75,8 @@ providers: files: # Add a cluster template - sourcePath: "../data/infrastructure-provider-byoh/v1beta1/templates/e2e/cluster-template.yaml" + - sourcePath: "../data/infrastructure-provider-byoh/v1beta1/templates/e2e/clusterclass-quickstart.yaml" + - sourcePath: "../data/infrastructure-provider-byoh/v1beta1/templates/e2e/cluster-template-topology.yaml" - sourcePath: "../../../metadata.yaml" variables: # default variables for the e2e test; those values could be overridden via env variables, thus @@ -91,6 +93,7 @@ variables: CNI: "./data/cni/kindnet/kindnet.yaml" EXP_CLUSTER_RESOURCE_SET: "true" EXP_MACHINE_POOL: "true" + CLUSTER_TOPOLOGY: "true" KUBETEST_CONFIGURATION: "./data/kubetest/conformance.yaml" NODE_DRAIN_TIMEOUT: "60s" # NOTE: INIT_WITH_BINARY is used only by the clusterctl upgrade test to initialize the management cluster to be upgraded diff --git a/test/e2e/data/infrastructure-provider-byoh/v1beta1/templates/docker/cluster-template-topology.yaml b/test/e2e/data/infrastructure-provider-byoh/v1beta1/templates/docker/cluster-template-topology.yaml new file mode 100644 index 000000000..4b5980fab --- /dev/null +++ b/test/e2e/data/infrastructure-provider-byoh/v1beta1/templates/docker/cluster-template-topology.yaml @@ -0,0 +1,83 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + clusterNetwork: + services: + cidrBlocks: + - 10.128.0.0/12 + pods: + cidrBlocks: + - 192.168.0.0/16 + serviceDomain: cluster.local + topology: + class: quickstart + version: ${KUBERNETES_VERSION} + controlPlane: + metadata: {} + replicas: 1 + variables: + - name: bundleLookupBaseRegistry + value: "projects.registry.vmware.com/cluster_api_provider_bringyourownhost" + - name: controlPlaneIpAddr + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: kubeVipPodManifest + value: | + apiVersion: v1 + kind: Pod + metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system + spec: + containers: + - args: + - manager + env: + - name: cp_enable + value: "true" + - name: vip_arp + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_address + value: ${CONTROL_PLANE_ENDPOINT_IP} + - name: vip_interface + value: "{{ .DefaultNetworkInterfaceName }}" + - name: vip_leaseduration + value: "15" + - name: vip_renewdeadline + value: "10" + - name: vip_retryperiod + value: "2" + image: ghcr.io/kube-vip/kube-vip:v0.4.1 + imagePullPolicy: IfNotPresent + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostNetwork: true + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + type: FileOrCreate + name: kubeconfig + status: {} + workers: + machineDeployments: + - class: quickstart-worker + metadata: {} + name: md-0 + replicas: 1 diff --git a/test/e2e/data/infrastructure-provider-byoh/v1beta1/templates/e2e/clusterclass-quickstart-template.yaml b/test/e2e/data/infrastructure-provider-byoh/v1beta1/templates/docker/clusterclass-quickstart.yaml similarity index 70% rename from test/e2e/data/infrastructure-provider-byoh/v1beta1/templates/e2e/clusterclass-quickstart-template.yaml rename to test/e2e/data/infrastructure-provider-byoh/v1beta1/templates/docker/clusterclass-quickstart.yaml index f5cfae281..94ff9e075 100644 --- a/test/e2e/data/infrastructure-provider-byoh/v1beta1/templates/e2e/clusterclass-quickstart-template.yaml +++ b/test/e2e/data/infrastructure-provider-byoh/v1beta1/templates/docker/clusterclass-quickstart.yaml @@ -1,37 +1,38 @@ apiVersion: cluster.x-k8s.io/v1beta1 kind: ClusterClass metadata: - name: ${CLUSTER_CLASS_NAME} + name: quickstart + namespace: ${NAMESPACE} spec: controlPlane: ref: apiVersion: controlplane.cluster.x-k8s.io/v1beta1 kind: KubeadmControlPlaneTemplate - name: ${CLUSTER_CLASS_NAME}-control-plane + name: quickstart-control-plane machineInfrastructure: ref: kind: ByoMachineTemplate apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - name: ${CLUSTER_CLASS_NAME}-control-plane-machine + name: quickstart-control-plane-machine infrastructure: ref: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: ByoClusterTemplate - name: ${CLUSTER_CLASS_NAME}-cluster + name: quickstart-cluster workers: machineDeployments: - - class: ${CLUSTER_CLASS_NAME}-worker + - class: quickstart-worker template: bootstrap: ref: apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 kind: KubeadmConfigTemplate - name: ${CLUSTER_CLASS_NAME}-worker-bootstrap-template + name: quickstart-worker-bootstrap-template infrastructure: ref: apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 kind: ByoMachineTemplate - name: ${CLUSTER_CLASS_NAME}-worker-machinetemplate + name: quickstart-worker-machinetemplate variables: - name: bundleLookupBaseRegistry required: true @@ -92,35 +93,13 @@ spec: matchResources: controlPlane: true --- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: ByoClusterTemplate -metadata: - name: ${CLUSTER_CLASS_NAME}-cluster -spec: - template: - spec: {} ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: ByoMachineTemplate -metadata: - name: ${CLUSTER_CLASS_NAME}-worker-machinetemplate - namespace: default -spec: - template: - spec: {} ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: ByoMachineTemplate -metadata: - name: ${CLUSTER_CLASS_NAME}-control-plane-machine -spec: - template: - spec: {} ---- kind: KubeadmControlPlaneTemplate apiVersion: controlplane.cluster.x-k8s.io/v1beta1 metadata: - name: ${CLUSTER_CLASS_NAME}-control-plane + labels: + nodepool: pool0 + name: quickstart-control-plane + namespace: ${NAMESPACE} spec: template: spec: @@ -142,9 +121,9 @@ spec: nodeRegistration: criSocket: /var/run/containerd/containerd.sock ignorePreflightErrors: - - Swap - - DirAvailable--etc-kubernetes-manifests - - FileAvailable--etc-kubernetes-kubelet.conf + - Swap + - DirAvailable--etc-kubernetes-manifests + - FileAvailable--etc-kubernetes-kubelet.conf kubeletExtraArgs: cgroup-driver: cgroupfs eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% @@ -152,18 +131,77 @@ spec: nodeRegistration: criSocket: /var/run/containerd/containerd.sock ignorePreflightErrors: - - Swap - - DirAvailable--etc-kubernetes-manifests - - FileAvailable--etc-kubernetes-kubelet.conf + - Swap + - DirAvailable--etc-kubernetes-manifests + - FileAvailable--etc-kubernetes-kubelet.conf kubeletExtraArgs: cgroup-driver: cgroupfs eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% --- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: ByoMachineTemplate +metadata: + name: quickstart-control-plane-machine + namespace: ${NAMESPACE} +spec: + template: + spec: + installerRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: K8sInstallerConfigTemplate + name: quickstart-control-plane-machine + namespace: "${NAMESPACE}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: K8sInstallerConfigTemplate +metadata: + name: quickstart-control-plane-machine + namespace: ${NAMESPACE} +spec: + template: + spec: + bundleRepo: projects.registry.vmware.com/cluster_api_provider_bringyourownhost + bundleType: k8s +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: ByoClusterTemplate +metadata: + name: quickstart-cluster + namespace: ${NAMESPACE} +spec: + template: + spec: {} + +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: ByoMachineTemplate +metadata: + name: quickstart-worker-machinetemplate + namespace: ${NAMESPACE} +spec: + template: + spec: + installerRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: K8sInstallerConfigTemplate + name: ${CLUSTER_NAME}-md-0 + namespace: ${NAMESPACE} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: K8sInstallerConfigTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 +spec: + template: + spec: + bundleRepo: projects.registry.vmware.com/cluster_api_provider_bringyourownhost + bundleType: k8s +--- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 kind: KubeadmConfigTemplate metadata: - name: ${CLUSTER_CLASS_NAME}-worker-bootstrap-template - namespace: default + name: quickstart-worker-bootstrap-template + namespace: ${NAMESPACE} spec: template: spec: @@ -171,24 +209,4 @@ spec: nodeRegistration: kubeletExtraArgs: cgroup-driver: cgroupfs - eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% ---- -apiVersion: v1 -binaryData: null -data: ${CNI_RESOURCES} -kind: ConfigMap -metadata: - name: cni-${CLUSTER_NAME}-crs-0 ---- -apiVersion: addons.cluster.x-k8s.io/v1beta1 -kind: ClusterResourceSet -metadata: - name: ${CLUSTER_NAME}-crs-0 -spec: - clusterSelector: - matchLabels: - cni: ${CLUSTER_NAME}-crs-0 - resources: - - kind: ConfigMap - name: cni-${CLUSTER_NAME}-crs-0 - strategy: ApplyOnce + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% \ No newline at end of file diff --git a/test/e2e/data/infrastructure-provider-byoh/v1beta1/templates/e2e/cluster-template-topology.yaml b/test/e2e/data/infrastructure-provider-byoh/v1beta1/templates/e2e/cluster-template-topology.yaml index 879c7a3cd..06d60e0fe 100644 --- a/test/e2e/data/infrastructure-provider-byoh/v1beta1/templates/e2e/cluster-template-topology.yaml +++ b/test/e2e/data/infrastructure-provider-byoh/v1beta1/templates/e2e/cluster-template-topology.yaml @@ -11,13 +11,13 @@ spec: clusterNetwork: services: cidrBlocks: - - "10.128.0.0/12" + - 10.128.0.0/12 pods: cidrBlocks: - - "192.168.0.0/16" - serviceDomain: "cluster.local" + - 192.168.0.0/16 + serviceDomain: cluster.local topology: - class: ${CLUSTER_CLASS_NAME} + class: quickstart version: ${KUBERNETES_VERSION} controlPlane: metadata: {} @@ -26,7 +26,7 @@ spec: - name: bundleLookupBaseRegistry value: "projects.registry.vmware.com/cluster_api_provider_bringyourownhost" - name: controlPlaneIpAddr - value: ${CONTROL_PLANE_ENDPOINT} + value: ${CONTROL_PLANE_ENDPOINT_IP} - name: kubeVipPodManifest value: | apiVersion: v1 @@ -47,7 +47,7 @@ spec: - name: vip_leaderelection value: "true" - name: vip_address - value: ${CONTROL_PLANE_ENDPOINT} + value: ${CONTROL_PLANE_ENDPOINT_IP} - name: vip_interface value: "{{ .DefaultNetworkInterfaceName }}" - name: vip_leaseduration @@ -81,7 +81,142 @@ spec: status: {} workers: machineDeployments: - - class: ${CLUSTER_CLASS_NAME}-worker + - class: quickstart-worker metadata: { } name: md-0 replicas: 1 +--- +kind: KubeadmControlPlaneTemplate +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: quickstart-control-plane + namespace: ${NAMESPACE} +spec: + template: + spec: + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + certSANs: + - localhost + - 127.0.0.1 + - 0.0.0.0 + - host.docker.internal + controllerManager: + extraArgs: + enable-hostpath-provisioner: "true" + files: + - owner: root:root + path: /etc/kubernetes/manifests/kube-vip.yaml + initConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + ignorePreflightErrors: + - Swap + - DirAvailable--etc-kubernetes-manifests + - FileAvailable--etc-kubernetes-kubelet.conf + kubeletExtraArgs: + cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% + joinConfiguration: + nodeRegistration: + criSocket: /var/run/containerd/containerd.sock + ignorePreflightErrors: + - Swap + - DirAvailable--etc-kubernetes-manifests + - FileAvailable--etc-kubernetes-kubelet.conf + kubeletExtraArgs: + cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: ByoMachineTemplate +metadata: + name: quickstart-control-plane-machine + namespace: ${NAMESPACE} +spec: + template: + spec: + installerRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: K8sInstallerConfigTemplate + name: quickstart-control-plane-machine + namespace: "${NAMESPACE}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: K8sInstallerConfigTemplate +metadata: + name: quickstart-control-plane-machine + namespace: ${NAMESPACE} +spec: + template: + spec: + bundleRepo: projects.registry.vmware.com/cluster_api_provider_bringyourownhost + bundleType: k8s +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: ByoClusterTemplate +metadata: + name: quickstart-cluster + namespace: ${NAMESPACE} +spec: + template: + spec: {} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: quickstart-worker-bootstrap-template + namespace: ${NAMESPACE} +spec: + template: + spec: + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cgroup-driver: cgroupfs + eviction-hard: nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0% +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: ByoMachineTemplate +metadata: + name: quickstart-worker-machinetemplate + namespace: ${NAMESPACE} +spec: + template: + spec: + installerRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: K8sInstallerConfigTemplate + name: ${CLUSTER_NAME}-md-0 + namespace: ${NAMESPACE} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: K8sInstallerConfigTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 +spec: + template: + spec: + bundleRepo: projects.registry.vmware.com/cluster_api_provider_bringyourownhost + bundleType: k8s +--- +apiVersion: v1 +binaryData: null +data: ${CNI_RESOURCES} +kind: ConfigMap +metadata: + name: cni-${CLUSTER_NAME}-crs-0 +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: ${CLUSTER_NAME}-crs-0 +spec: + clusterSelector: + matchLabels: + cni: ${CLUSTER_NAME}-crs-0 + resources: + - kind: ConfigMap + name: cni-${CLUSTER_NAME}-crs-0 + strategy: ApplyOnce diff --git a/test/e2e/data/infrastructure-provider-byoh/v1beta1/templates/e2e/clusterclass-quickstart.yaml b/test/e2e/data/infrastructure-provider-byoh/v1beta1/templates/e2e/clusterclass-quickstart.yaml new file mode 100644 index 000000000..2af8ebafd --- /dev/null +++ b/test/e2e/data/infrastructure-provider-byoh/v1beta1/templates/e2e/clusterclass-quickstart.yaml @@ -0,0 +1,94 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: quickstart + namespace: ${NAMESPACE} +spec: + controlPlane: + ref: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + name: quickstart-control-plane + machineInfrastructure: + ref: + kind: ByoMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + name: quickstart-control-plane-machine + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: ByoClusterTemplate + name: quickstart-cluster + workers: + machineDeployments: + - class: quickstart-worker + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: quickstart-worker-bootstrap-template + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: ByoMachineTemplate + name: quickstart-worker-machinetemplate + variables: + - name: bundleLookupBaseRegistry + required: true + schema: + openAPIV3Schema: + type: string + default: "https://projects.registry.vmware.com/cluster_api_provider_bringyourownhost" + - name: controlPlaneIpAddr + required: true + schema: + openAPIV3Schema: + type: string + - name: kubeVipPodManifest + required: true + schema: + openAPIV3Schema: + description: kube-vip manifest for the control plane. + type: string + patches: + - name: bundleLookupBaseRegistry + description: "Sets the bundleLookupBaseRegistry used for the BYOCluster." + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: ByoClusterTemplate + matchResources: + infrastructureCluster: true + jsonPatches: + - op: add + path: "/spec/template/spec/bundleLookupBaseRegistry" + valueFrom: + variable: bundleLookupBaseRegistry + - name: controlPlaneEndpoint + description: "Sets control plane endpoint" + definitions: + - selector: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: ByoClusterTemplate + matchResources: + infrastructureCluster: true + jsonPatches: + - op: add + path: "/spec/template/spec/controlPlaneEndpoint" + valueFrom: + template: | + host: '{{ .controlPlaneIpAddr }}' + port: 6443 + - name: kubeVipEnabled + definitions: + - jsonPatches: + - op: add + path: /spec/template/spec/kubeadmConfigSpec/files/0/content + valueFrom: + variable: kubeVipPodManifest + selector: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + matchResources: + controlPlane: true diff --git a/test/e2e/e2e_clusterclass_test.go b/test/e2e/e2e_clusterclass_test.go new file mode 100644 index 000000000..8779698c3 --- /dev/null +++ b/test/e2e/e2e_clusterclass_test.go @@ -0,0 +1,178 @@ +// Copyright 2022 VMware, Inc. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 + +// nolint: testpackage +package e2e + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/pointer" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util" +) + +// creating a workload cluster +// This test is meant to provide a first, fast signal to detect regression; it is recommended to use it as a PR blocker test. +var _ = Describe("When BYOH joins existing cluster [Cluster-Class]", func() { + + var ( + ctx context.Context + specName = "quick-start" + namespace *corev1.Namespace + clusterName string + cancelWatches context.CancelFunc + clusterResources *clusterctl.ApplyClusterTemplateAndWaitResult + dockerClient *client.Client + err error + byohostContainerIDs []string + agentLogFile1 = "/tmp/host-agent1.log" + agentLogFile2 = "/tmp/host-agent2.log" + byoHostName1 = "byohost1" + byoHostName2 = "byohost2" + ) + + BeforeEach(func() { + + ctx = context.TODO() + Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) + + Expect(e2eConfig).NotTo(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName) + Expect(clusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. clusterctlConfigPath must be an existing file when calling %s spec", specName) + Expect(bootstrapClusterProxy).NotTo(BeNil(), "Invalid argument. bootstrapClusterProxy can't be nil when calling %s spec", specName) + Expect(os.MkdirAll(artifactFolder, 0755)).To(Succeed(), "Invalid argument. artifactFolder can't be created for %s spec", specName) + + Expect(e2eConfig.Variables).To(HaveKey(KubernetesVersion)) + + // set up a Namespace where to host objects for this spec and create a watcher for the namespace events. + namespace, cancelWatches = setupSpecNamespace(ctx, specName, bootstrapClusterProxy, artifactFolder) + clusterResources = new(clusterctl.ApplyClusterTemplateAndWaitResult) + }) + + It("Should create a workload cluster with single BYOH host", func() { + clusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6)) + + dockerClient, err = client.NewClientWithOpts(client.FromEnv) + Expect(err).NotTo(HaveOccurred()) + + runner := ByoHostRunner{ + Context: ctx, + clusterConName: clusterConName, + Namespace: namespace.Name, + PathToHostAgentBinary: pathToHostAgentBinary, + DockerClient: dockerClient, + NetworkInterface: "kind", + bootstrapClusterProxy: bootstrapClusterProxy, + CommandArgs: map[string]string{ + "--bootstrap-kubeconfig": "/bootstrap.conf", + "--namespace": namespace.Name, + "--v": "1", + }, + } + + var output types.HijackedResponse + runner.ByoHostName = byoHostName1 + runner.BootstrapKubeconfigData = generateBootstrapKubeconfig(runner.Context, bootstrapClusterProxy, clusterConName) + byohost, err := runner.SetupByoDockerHost() + Expect(err).NotTo(HaveOccurred()) + output, byohostContainerID, err := runner.ExecByoDockerHost(byohost) + Expect(err).NotTo(HaveOccurred()) + defer output.Close() + byohostContainerIDs = append(byohostContainerIDs, byohostContainerID) + f := WriteDockerLog(output, agentLogFile1) + defer func() { + deferredErr := f.Close() + if deferredErr != nil { + Showf("error closing file %s: %v", agentLogFile1, deferredErr) + } + }() + + runner.ByoHostName = byoHostName2 + runner.BootstrapKubeconfigData = generateBootstrapKubeconfig(runner.Context, bootstrapClusterProxy, clusterConName) + byohost, err = runner.SetupByoDockerHost() + Expect(err).NotTo(HaveOccurred()) + output, byohostContainerID, err = runner.ExecByoDockerHost(byohost) + Expect(err).NotTo(HaveOccurred()) + defer output.Close() + byohostContainerIDs = append(byohostContainerIDs, byohostContainerID) + + // read the log of host agent container in backend, and write it + f = WriteDockerLog(output, agentLogFile2) + defer func() { + deferredErr := f.Close() + if deferredErr != nil { + Showf("error closing file %s: %v", agentLogFile2, deferredErr) + } + }() + + setControlPlaneIP(context.Background(), dockerClient) + clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: bootstrapClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), + ClusterctlConfigPath: clusterctlConfigPath, + KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + Flavor: "topology", + Namespace: namespace.Name, + ClusterName: clusterName, + KubernetesVersion: e2eConfig.GetVariable(KubernetesVersion), + ControlPlaneMachineCount: pointer.Int64Ptr(1), + WorkerMachineCount: pointer.Int64Ptr(1), + }, + WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"), + }, clusterResources) + + }) + + JustAfterEach(func() { + if CurrentGinkgoTestDescription().Failed { + ShowInfo([]string{agentLogFile1, agentLogFile2}) + } + }) + + AfterEach(func() { + // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. + dumpSpecResourcesAndCleanup(ctx, specName, bootstrapClusterProxy, artifactFolder, namespace, cancelWatches, clusterResources.Cluster, e2eConfig.GetIntervals, skipCleanup) + + if dockerClient != nil && len(byohostContainerIDs) != 0 { + for _, byohostContainerID := range byohostContainerIDs { + err := dockerClient.ContainerStop(ctx, byohostContainerID, nil) + Expect(err).NotTo(HaveOccurred()) + + err = dockerClient.ContainerRemove(ctx, byohostContainerID, types.ContainerRemoveOptions{}) + Expect(err).NotTo(HaveOccurred()) + } + } + + err := os.Remove(agentLogFile1) + if err != nil { + Showf("error removing file %s: %v", agentLogFile1, err) + } + + err = os.Remove(agentLogFile2) + if err != nil { + Showf("error removing file %s: %v", agentLogFile2, err) + } + + err = os.Remove(ReadByohControllerManagerLogShellFile) + if err != nil { + Showf("error removing file %s: %v", ReadByohControllerManagerLogShellFile, err) + } + + err = os.Remove(ReadAllPodsShellFile) + if err != nil { + Showf("error removing file %s: %v", ReadAllPodsShellFile, err) + } + }) +})