diff --git a/.gitignore b/.gitignore index 961e5e106501..5ba19e7300da 100644 --- a/.gitignore +++ b/.gitignore @@ -12,7 +12,7 @@ out *.test # E2E test templates -test/e2e/data/infrastructure-docker/*-template +test/e2e/data/infrastructure-docker/cluster-template*.yaml # Output of the go coverage tool, specifically when used with LiteIDE *.out diff --git a/test/e2e/clusterctl_upgrade.go b/test/e2e/clusterctl_upgrade.go new file mode 100644 index 000000000000..1d5d54608741 --- /dev/null +++ b/test/e2e/clusterctl_upgrade.go @@ -0,0 +1,228 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + "fmt" + "os" + "path/filepath" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/pointer" + "sigs.k8s.io/cluster-api/cmd/clusterctl/client/config" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/bootstrap" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util" +) + +// ClusterctlUpgradeSpecInput is the input for ClusterctlUpgradeSpec. +type ClusterctlUpgradeSpecInput struct { + E2EConfig *clusterctl.E2EConfig + ClusterctlConfigPath string + BootstrapClusterProxy framework.ClusterProxy + ArtifactFolder string + SkipCleanup bool +} + +const ( + ClusterctlUpgradeContract = "CLUSTERCTL_CONTRACT_UPGRADE_TO" +) + +// ClusterctlUpgradeSpec implements a test that verifies clusterctl upgrade of a management cluster. +func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpgradeSpecInput) { + var ( + specName = "clusterctl-upgrade" + input ClusterctlUpgradeSpecInput + + managementClusterNamespace *corev1.Namespace + managementClusterCancelWatches context.CancelFunc + managementClusterResources *clusterctl.ApplyClusterTemplateAndWaitResult + managementClusterProxy framework.ClusterProxy + + testNamespace *corev1.Namespace + testCancelWatches context.CancelFunc + testClusterResources *clusterctl.ApplyClusterTemplateAndWaitResult + ) + + BeforeEach(func() { + Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName) + input = inputGetter() + Expect(input.E2EConfig).ToNot(BeNil(), "Invalid argument. input.E2EConfig can't be nil when calling %s spec", specName) + Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling %s spec", specName) + Expect(input.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) + Expect(os.MkdirAll(input.ArtifactFolder, 0755)).To(Succeed(), "Invalid argument. input.ArtifactFolder can't be created for %s spec", specName) + Expect(input.E2EConfig.Variables).To(HaveKey(KubernetesVersion)) + Expect(input.E2EConfig.Variables).To(HaveKey(ClusterctlUpgradeContract)) + + // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. + managementClusterNamespace, managementClusterCancelWatches = setupSpecNamespace(context.TODO(), specName, input.BootstrapClusterProxy, input.ArtifactFolder) + }) + + It("Should create a management cluster and then upgrade all the providers", func() { + + By("Creating a workload cluster to be used as a new management cluster") + // NOTE: given that the bootstrap cluster is shared by many tests, it is not possible to use it for testing clusterctl upgrades. + // So we are creating a workload cluster that will be used as a new management cluster where to install older version of providers + + managementClusterResources = clusterctl.ApplyClusterTemplateAndWait(context.TODO(), clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", input.BootstrapClusterProxy.GetName()), + ClusterctlConfigPath: input.ClusterctlConfigPath, + KubeconfigPath: input.BootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + Flavor: clusterctl.DefaultFlavor, + Namespace: managementClusterNamespace.Name, + ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), + KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), + ControlPlaneMachineCount: pointer.Int64Ptr(1), + WorkerMachineCount: pointer.Int64Ptr(1), + }, + WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + }) + + By("Turning the workload cluster into a management cluster with older versions of providers") + + // In case of the cluster id a DockerCluster, we should load controller images into the nodes. + // Nb. this can be achieved also by changing the DockerMachine spec, but for the time being we are using + // this approach because this allows to have a single source of truth for images, the e2e config + // Nb. the images for official version of the providers will be pulled from internet, but the latest images must be + // built locally and loaded into kind + cluster := managementClusterResources.Cluster + if cluster.Spec.InfrastructureRef.Kind == "DockerCluster" { + Expect(bootstrap.LoadImagesToKindCluster(context.TODO(), bootstrap.LoadImagesToKindClusterInput{ + Name: cluster.Name, + Images: input.E2EConfig.Images, + })).To(Succeed()) + } + + // Get a ClusterBroker so we can interact with the workload cluster + managementClusterProxy = input.BootstrapClusterProxy.GetWorkloadCluster(context.TODO(), cluster.Namespace, cluster.Name) + + By("Initializing the workload cluster with older versions of providers") + clusterctl.InitManagementClusterAndWatchControllerLogs(context.TODO(), clusterctl.InitManagementClusterAndWatchControllerLogsInput{ + ClusterProxy: managementClusterProxy, + ClusterctlConfigPath: input.ClusterctlConfigPath, + CoreProvider: input.E2EConfig.GetProvidersWithOldestVersion(config.ClusterAPIProviderName)[0], + BootstrapProviders: input.E2EConfig.GetProvidersWithOldestVersion(config.KubeadmBootstrapProviderName), + ControlPlaneProviders: input.E2EConfig.GetProvidersWithOldestVersion(config.KubeadmControlPlaneProviderName), + InfrastructureProviders: input.E2EConfig.GetProvidersWithOldestVersion(input.E2EConfig.InfrastructureProviders()...), + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name), + }, input.E2EConfig.GetIntervals(specName, "wait-controllers")...) + + By("THE MANAGEMENT CLUSTER WITH THE OLDER VERSION OF PROVIDERS IS UP&RUNNING!") + + Byf("Creating a namespace for hosting the %s test workload cluster", specName) + testNamespace, testCancelWatches = framework.CreateNamespaceAndWatchEvents(context.TODO(), framework.CreateNamespaceAndWatchEventsInput{ + Creator: managementClusterProxy.GetClient(), + ClientSet: managementClusterProxy.GetClientSet(), + Name: specName, + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", "bootstrap"), + }) + + By("Creating a test workload cluster") + // NOTE: this workload cluster is used to check the management cluster works fine + + testClusterResources = clusterctl.ApplyClusterTemplateAndWait(context.TODO(), clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: managementClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", managementClusterProxy.GetName()), + ClusterctlConfigPath: input.ClusterctlConfigPath, + KubeconfigPath: managementClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + Flavor: clusterctl.DefaultFlavor, + Namespace: testNamespace.Name, + ClusterName: fmt.Sprintf("%s-%s", specName, util.RandomString(6)), + KubernetesVersion: input.E2EConfig.GetVariable(KubernetesVersion), + ControlPlaneMachineCount: pointer.Int64Ptr(1), + WorkerMachineCount: pointer.Int64Ptr(1), + }, + WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + }) + + By("THE MANAGEMENT CLUSTER WITH OLDER VERSION OF PROVIDERS WORKS!") + + By("Upgrading providers to the latest version available") + clusterctl.UpgradeManagementClusterAndWait(context.TODO(), clusterctl.UpgradeManagementClusterAndWaitInput{ + ClusterctlConfigPath: input.ClusterctlConfigPath, + ClusterProxy: managementClusterProxy, + ManagementGroup: fmt.Sprintf("capi-system/%s", config.ClusterAPIProviderName), + Contract: input.E2EConfig.GetVariable(ClusterctlUpgradeContract), + LogFolder: filepath.Join(input.ArtifactFolder, "clusters", cluster.Name), + }, input.E2EConfig.GetIntervals(specName, "wait-controllers")...) + + By("THE MANAGEMENT CLUSTER WAS SUCCESSFULLY UPGRADED!") + + testMachineDeployments := framework.GetMachineDeploymentsByCluster(context.TODO(), framework.GetMachineDeploymentsByClusterInput{ + Lister: managementClusterProxy.GetClient(), + ClusterName: testClusterResources.Cluster.Name, + Namespace: testNamespace.Name, + }) + + framework.ScaleAndWaitMachineDeployment(context.TODO(), framework.ScaleAndWaitMachineDeploymentInput{ + ClusterProxy: managementClusterProxy, + Cluster: testClusterResources.Cluster, + MachineDeployment: testMachineDeployments[0], + Replicas: 2, + WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + }) + + By("THE UPGRADED MANAGEMENT CLUSTER WORKS!") + + By("PASSED!") + }) + + AfterEach(func() { + if testNamespace != nil { + // Dump all the logs from the workload cluster before deleting them. + managementClusterProxy.CollectWorkloadClusterLogs(ctx, testNamespace.Name, testClusterResources.Cluster.Name, filepath.Join(input.ArtifactFolder, "clusters", testClusterResources.Cluster.Name, "machines")) + + // Dump all Cluster API related resources to artifacts before pivoting back. + framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{ + Lister: managementClusterProxy.GetClient(), + Namespace: managementClusterNamespace.Name, + LogPath: filepath.Join(input.ArtifactFolder, "clusters", managementClusterResources.Cluster.Name, "resources"), + }) + + if !input.SkipCleanup { + Byf("Deleting cluster %s/%s", testNamespace.Name, testClusterResources.Cluster.Name) + framework.DeleteAllClustersAndWait(ctx, framework.DeleteAllClustersAndWaitInput{ + Client: managementClusterProxy.GetClient(), + Namespace: testNamespace.Name, + }, input.E2EConfig.GetIntervals(specName, "wait-delete-cluster")...) + + Byf("Deleting namespace used for hosting the %q test", specName) + framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{ + Deleter: managementClusterProxy.GetClient(), + Name: testNamespace.Name, + }) + } + testCancelWatches() + } + + // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. + dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, managementClusterNamespace, managementClusterCancelWatches, managementClusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + }) +} diff --git a/test/e2e/clusterctl_upgrade_test.go b/test/e2e/clusterctl_upgrade_test.go new file mode 100644 index 000000000000..6ef92109a8a9 --- /dev/null +++ b/test/e2e/clusterctl_upgrade_test.go @@ -0,0 +1,39 @@ +// +build e2e + +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + "context" + + . "github.com/onsi/ginkgo" +) + +var _ = Describe("When testing clusterctl upgrades", func() { + + ClusterctlUpgradeSpec(context.TODO(), func() ClusterctlUpgradeSpecInput { + return ClusterctlUpgradeSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + } + }) + +}) diff --git a/test/e2e/config/docker.yaml b/test/e2e/config/docker.yaml index 4d90735b403a..826d2454ea96 100644 --- a/test/e2e/config/docker.yaml +++ b/test/e2e/config/docker.yaml @@ -29,8 +29,13 @@ providers: - name: cluster-api type: CoreProvider versions: - - name: v0.3.0 - # Use manifest from source files + - name: v0.3.10 # latest published release + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.10/core-components.yaml" + type: "url" + replacements: + - old: --metrics-addr=127.0.0.1:8080 + new: --metrics-addr=:8080 + - name: v0.3.11 # next; use manifest from source files value: ../../../config replacements: - old: --metrics-addr=127.0.0.1:8080 @@ -39,8 +44,13 @@ providers: - name: kubeadm type: BootstrapProvider versions: - - name: v0.3.0 - # Use manifest from source files + - name: v0.3.10 # latest published release + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.10/bootstrap-components.yaml" + type: "url" + replacements: + - old: --metrics-addr=127.0.0.1:8080 + new: --metrics-addr=:8080 + - name: v0.3.11 # next; use manifest from source files value: ../../../bootstrap/kubeadm/config replacements: - old: --metrics-addr=127.0.0.1:8080 @@ -49,8 +59,13 @@ providers: - name: kubeadm type: ControlPlaneProvider versions: - - name: v0.3.0 - # Use manifest from source files + - name: v0.3.10 # latest published release + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.10/control-plane-components.yaml" + type: "url" + replacements: + - old: --metrics-addr=127.0.0.1:8080 + new: --metrics-addr=:8080 + - name: v0.3.11 # next; use manifest from source files value: ../../../controlplane/kubeadm/config replacements: - old: --metrics-addr=127.0.0.1:8080 @@ -59,8 +74,13 @@ providers: - name: docker type: InfrastructureProvider versions: - - name: v0.3.0 - # Use manifest from source files + - name: v0.3.10 # latest published release + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.3.10/infrastructure-components-development.yaml" + type: "url" + replacements: + - old: --metrics-addr=127.0.0.1:8080 + new: --metrics-addr=:8080 + - name: v0.3.11 # next; use manifest from source files value: ../../../test/infrastructure/docker/config replacements: - old: --metrics-addr=127.0.0.1:8080 @@ -85,6 +105,7 @@ variables: CNI: "./data/cni/kindnet/kindnet.yaml" EXP_CLUSTER_RESOURCE_SET: "true" EXP_MACHINE_POOL: "true" + CLUSTERCTL_CONTRACT_UPGRADE_TO: "v1alpha3" intervals: default/wait-controllers: ["3m", "10s"] diff --git a/test/framework/clusterctl/client.go b/test/framework/clusterctl/client.go index 2e5cd0741355..dc5c0a274b86 100644 --- a/test/framework/clusterctl/client.go +++ b/test/framework/clusterctl/client.go @@ -82,6 +82,38 @@ func Init(ctx context.Context, input InitInput) { Expect(err).ToNot(HaveOccurred(), "failed to run clusterctl init") } +// UpgradeInput is the input for Upgrade. +type UpgradeInput struct { + LogFolder string + ClusterctlConfigPath string + KubeconfigPath string + ManagementGroup string + Contract string +} + +// Upgrade calls clusterctl upgrade apply with the list of providers defined in the local repository +func Upgrade(ctx context.Context, input UpgradeInput) { + log.Logf("clusterctl upgrade apply --management-group %s --contract %s", + input.ManagementGroup, + input.Contract, + ) + + upgradeOpt := clusterctlclient.ApplyUpgradeOptions{ + Kubeconfig: clusterctlclient.Kubeconfig{ + Path: input.KubeconfigPath, + Context: "", + }, + ManagementGroup: input.ManagementGroup, + Contract: input.Contract, + } + + clusterctlClient, log := getClusterctlClientWithLogger(input.ClusterctlConfigPath, "clusterctl-upgrade.log", input.LogFolder) + defer log.Close() + + err := clusterctlClient.ApplyUpgrade(upgradeOpt) + Expect(err).ToNot(HaveOccurred(), "failed to run clusterctl upgrade") +} + // ConfigClusterInput is the input for ConfigCluster. type ConfigClusterInput struct { LogFolder string diff --git a/test/framework/clusterctl/clusterctl_helpers.go b/test/framework/clusterctl/clusterctl_helpers.go index 3d5b46fe291e..e5b9eb6c7852 100644 --- a/test/framework/clusterctl/clusterctl_helpers.go +++ b/test/framework/clusterctl/clusterctl_helpers.go @@ -36,6 +36,9 @@ import ( type InitManagementClusterAndWatchControllerLogsInput struct { ClusterProxy framework.ClusterProxy ClusterctlConfigPath string + CoreProvider string + BootstrapProviders []string + ControlPlaneProviders []string InfrastructureProviders []string LogFolder string DisableMetricsCollection bool @@ -51,6 +54,16 @@ func InitManagementClusterAndWatchControllerLogs(ctx context.Context, input Init Expect(input.InfrastructureProviders).ToNot(BeEmpty(), "Invalid argument. input.InfrastructureProviders can't be empty when calling InitManagementClusterAndWatchControllerLogs") Expect(os.MkdirAll(input.LogFolder, 0755)).To(Succeed(), "Invalid argument. input.LogFolder can't be created for InitManagementClusterAndWatchControllerLogs") + if input.CoreProvider == "" { + input.CoreProvider = config.ClusterAPIProviderName + } + if len(input.BootstrapProviders) == 0 { + input.BootstrapProviders = []string{config.KubeadmBootstrapProviderName} + } + if len(input.ControlPlaneProviders) == 0 { + input.ControlPlaneProviders = []string{config.KubeadmControlPlaneProviderName} + } + client := input.ClusterProxy.GetClient() controllersDeployments := framework.GetControllerDeployments(ctx, framework.GetControllerDeploymentsInput{ Lister: client, @@ -62,9 +75,9 @@ func InitManagementClusterAndWatchControllerLogs(ctx context.Context, input Init // pass the clusterctl config file that points to the local provider repository created for this test ClusterctlConfigPath: input.ClusterctlConfigPath, // setup the desired list of providers for a single-tenant management cluster - CoreProvider: config.ClusterAPIProviderName, - BootstrapProviders: []string{config.KubeadmBootstrapProviderName}, - ControlPlaneProviders: []string{config.KubeadmControlPlaneProviderName}, + CoreProvider: input.CoreProvider, + BootstrapProviders: input.BootstrapProviders, + ControlPlaneProviders: input.ControlPlaneProviders, InfrastructureProviders: input.InfrastructureProviders, // setup clusterctl logs folder LogFolder: input.LogFolder, @@ -102,6 +115,62 @@ func InitManagementClusterAndWatchControllerLogs(ctx context.Context, input Init } } +// UpgradeManagementClusterAndWaitInput is the input type for UpgradeManagementClusterAndWait. +type UpgradeManagementClusterAndWaitInput struct { + ClusterProxy framework.ClusterProxy + ClusterctlConfigPath string + ManagementGroup string + Contract string + LogFolder string +} + +// UpgradeManagementClusterAndWait upgrades provider a management cluster using clusterctl, and waits for the cluster to be ready. +func UpgradeManagementClusterAndWait(ctx context.Context, input UpgradeManagementClusterAndWaitInput, intervals ...interface{}) { + Expect(ctx).NotTo(BeNil(), "ctx is required for UpgradeManagementClusterAndWait") + Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling UpgradeManagementClusterAndWait") + Expect(input.ClusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. input.ClusterctlConfigPath must be an existing file when calling UpgradeManagementClusterAndWait") + Expect(input.ManagementGroup).ToNot(BeEmpty(), "Invalid argument. input.ManagementGroup can't be empty when calling UpgradeManagementClusterAndWait") + Expect(input.Contract).ToNot(BeEmpty(), "Invalid argument. input.Contract can't be empty when calling UpgradeManagementClusterAndWait") + Expect(os.MkdirAll(input.LogFolder, 0755)).To(Succeed(), "Invalid argument. input.LogFolder can't be created for UpgradeManagementClusterAndWait") + + Upgrade(ctx, UpgradeInput{ + ClusterctlConfigPath: input.ClusterctlConfigPath, + KubeconfigPath: input.ClusterProxy.GetKubeconfigPath(), + ManagementGroup: input.ManagementGroup, + Contract: input.Contract, + LogFolder: input.LogFolder, + }) + + client := input.ClusterProxy.GetClient() + + log.Logf("Waiting for provider controllers to be running") + controllersDeployments := framework.GetControllerDeployments(ctx, framework.GetControllerDeploymentsInput{ + Lister: client, + }) + Expect(controllersDeployments).ToNot(BeEmpty(), "The list of controller deployments should not be empty") + for _, deployment := range controllersDeployments { + framework.WaitForDeploymentsAvailable(ctx, framework.WaitForDeploymentsAvailableInput{ + Getter: client, + Deployment: deployment, + }, intervals...) + + // Start streaming logs from all controller providers + framework.WatchDeploymentLogs(ctx, framework.WatchDeploymentLogsInput{ + GetLister: client, + ClientSet: input.ClusterProxy.GetClientSet(), + Deployment: deployment, + LogPath: filepath.Join(input.LogFolder, "controllers"), + }) + + framework.WatchPodMetrics(ctx, framework.WatchPodMetricsInput{ + GetLister: client, + ClientSet: input.ClusterProxy.GetClientSet(), + Deployment: deployment, + MetricsPath: filepath.Join(input.LogFolder, "controllers"), + }) + } +} + // ApplyClusterTemplateAndWaitInput is the input type for ApplyClusterTemplateAndWait. type ApplyClusterTemplateAndWaitInput struct { ClusterProxy framework.ClusterProxy @@ -127,8 +196,8 @@ func ApplyClusterTemplateAndWait(ctx context.Context, input ApplyClusterTemplate Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling ApplyClusterTemplateAndWait") - log.Logf("Creating the workload cluster with name %q using the %q template (Kubernetes %s, %d control-plane machines, %d worker machines)", - input.ConfigCluster.ClusterName, valueOrDefault(input.ConfigCluster.Flavor), input.ConfigCluster.KubernetesVersion, *input.ConfigCluster.ControlPlaneMachineCount, *input.ConfigCluster.WorkerMachineCount) + log.Logf("Creating the workload cluster with name %q using the %q template (Kubernetes %s, %s control-plane machines, %s worker machines)", + input.ConfigCluster.ClusterName, valueOrDefault(input.ConfigCluster.Flavor), input.ConfigCluster.KubernetesVersion, input.ConfigCluster.ControlPlaneMachineCount, input.ConfigCluster.WorkerMachineCount) log.Logf("Getting the cluster template yaml") workloadClusterTemplate := ConfigCluster(ctx, ConfigClusterInput{ diff --git a/test/framework/clusterctl/e2e_config.go b/test/framework/clusterctl/e2e_config.go index bfbe516fd532..f0b21fd4cf34 100644 --- a/test/framework/clusterctl/e2e_config.go +++ b/test/framework/clusterctl/e2e_config.go @@ -23,6 +23,7 @@ import ( "os" "path/filepath" "regexp" + "sort" "strconv" "time" @@ -399,3 +400,34 @@ func (c *E2EConfig) GetInt32PtrVariable(varName string) *int32 { Expect(err).NotTo(HaveOccurred()) return pointer.Int32Ptr(int32(wCount)) } + +// GetProviderVersions +func (c *E2EConfig) GetProviderVersions(provider string) []string { + versions := []string{} + for _, p := range c.Providers { + if p.Name == provider { + for _, v := range p.Versions { + versions = append(versions, v.Name) + } + } + } + + sort.Slice(versions, func(i, j int) bool { + // NOTE: ignoring errors because the validity of the format is ensured by Validation + vI, _ := version.ParseSemantic(versions[i]) + vJ, _ := version.ParseSemantic(versions[j]) + return vI.LessThan(vJ) + }) + return versions +} + +func (c *E2EConfig) GetProvidersWithOldestVersion(providers ...string) []string { + ret := make([]string, 0, len(providers)) + for _, p := range providers { + versions := c.GetProviderVersions(p) + if len(versions) > 0 { + ret = append(ret, fmt.Sprintf("%s:%s", p, versions[0])) + } + } + return ret +} diff --git a/test/framework/clusterctl/repository.go b/test/framework/clusterctl/repository.go index 74c4e748ce24..9f4ee9201826 100644 --- a/test/framework/clusterctl/repository.go +++ b/test/framework/clusterctl/repository.go @@ -71,10 +71,9 @@ func CreateRepository(ctx context.Context, input CreateRepositoryInput) string { providers := []providerConfig{} for _, provider := range input.E2EConfig.Providers { - providerURL := "" + providerLabel := clusterctlv1.ManifestLabel(provider.Name, clusterctlv1.ProviderType(provider.Type)) + providerURL := filepath.Join(input.RepositoryFolder, providerLabel, "latest", "components.yaml") for _, version := range provider.Versions { - providerLabel := clusterctlv1.ManifestLabel(provider.Name, clusterctlv1.ProviderType(provider.Type)) - generator := framework.ComponentGeneratorForComponentSource(version) manifest, err := generator.Manifests(ctx) Expect(err).ToNot(HaveOccurred(), "Failed to generate the manifest for %q / %q", providerLabel, version.Name) @@ -85,8 +84,13 @@ func CreateRepository(ctx context.Context, input CreateRepositoryInput) string { filePath := filepath.Join(sourcePath, "components.yaml") Expect(ioutil.WriteFile(filePath, manifest, 0600)).To(Succeed(), "Failed to write manifest in the clusterctl local repository for %q / %q", providerLabel, version.Name) - if providerURL == "" { - providerURL = filePath + destinationPath := filepath.Join(input.RepositoryFolder, providerLabel, version.Name, "components.yaml") + for _, file := range provider.Files { + data, err := ioutil.ReadFile(file.SourcePath) + Expect(err).ToNot(HaveOccurred(), "Failed to read file %q / %q", provider.Name, file.SourcePath) + + destinationFile := filepath.Join(filepath.Dir(destinationPath), file.TargetName) + Expect(ioutil.WriteFile(destinationFile, data, 0600)).To(Succeed(), "Failed to write clusterctl local repository file %q / %q", provider.Name, file.TargetName) } } providers = append(providers, providerConfig{ @@ -105,7 +109,7 @@ func CreateRepository(ctx context.Context, input CreateRepositoryInput) string { Expect(err).ToNot(HaveOccurred(), "Failed to apply transformation func template %q", file) } - destinationFile := filepath.Join(filepath.Dir(providerURL), file.TargetName) + destinationFile := filepath.Join(input.RepositoryFolder, providerLabel, file.TargetName) Expect(ioutil.WriteFile(destinationFile, data, 0600)).To(Succeed(), "Failed to write clusterctl local repository file %q / %q", provider.Name, file.TargetName) } }