From 5c7b5ed2f8f1013c6880ea97e7754a6788d06c3e Mon Sep 17 00:00:00 2001 From: Furkat Gofurov Date: Wed, 9 Aug 2023 16:58:10 +0300 Subject: [PATCH] Fetch machine CRD GVK and pass it to unstructured instead of hardcoding it Signed-off-by: Furkat Gofurov --- test/e2e/clusterctl_upgrade.go | 38 +++++++++++++++++++++++++---- test/e2e/clusterctl_upgrade_test.go | 12 ++------- 2 files changed, 35 insertions(+), 15 deletions(-) diff --git a/test/e2e/clusterctl_upgrade.go b/test/e2e/clusterctl_upgrade.go index 71bf3da0bf32..a781cf30dee2 100644 --- a/test/e2e/clusterctl_upgrade.go +++ b/test/e2e/clusterctl_upgrade.go @@ -29,9 +29,11 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/discovery" "k8s.io/klog/v2" @@ -334,6 +336,11 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg By("THE MANAGEMENT CLUSTER WITH THE OLDER VERSION OF PROVIDERS IS UP&RUNNING!") + machineCRD := &apiextensionsv1.CustomResourceDefinition{} + if err := managementClusterProxy.GetClient().Get(ctx, client.ObjectKey{Name: "machines.cluster.x-k8s.io"}, machineCRD); err != nil { + Expect(err).ToNot(HaveOccurred(), "failed to retrieve a machine CRD") + } + Byf("Creating a namespace for hosting the %s test workload cluster", specName) testNamespace, testCancelWatches = framework.CreateNamespaceAndWatchEvents(ctx, framework.CreateNamespaceAndWatchEventsInput{ Creator: managementClusterProxy.GetClient(), @@ -387,13 +394,34 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg input.PreWaitForCluster(managementClusterProxy, testNamespace.Name, workLoadClusterName) } + // Build GroupVersionKind for Machine resources + machineListGVK := schema.GroupVersionKind{ + Group: machineCRD.Spec.Group, + Kind: machineCRD.Spec.Names.ListKind, + } + + // Pick the first served version + for _, gvk := range machineCRD.Spec.Versions { + if gvk.Storage { + machineListGVK.Version = gvk.Name + break + } + } + By("Waiting for the machines to exist") Eventually(func() (int64, error) { var n int64 - machineList := &clusterv1alpha3.MachineList{} - if err := managementClusterProxy.GetClient().List(ctx, machineList, client.InNamespace(testNamespace.Name), client.MatchingLabels{clusterv1.ClusterNameLabel: workLoadClusterName}); err == nil { - for _, machine := range machineList.Items { - if machine.Status.NodeRef != nil { + machineList := &unstructured.UnstructuredList{} + machineList.SetGroupVersionKind(machineListGVK) + if err := managementClusterProxy.GetClient().List( + ctx, + machineList, + client.InNamespace(testNamespace.Name), + client.MatchingLabels{clusterv1.ClusterNameLabel: workLoadClusterName}, + ); err == nil { + for _, m := range machineList.Items { + _, found, err := unstructured.NestedMap(m.Object, "status", "nodeRef") + if err == nil && found { n++ } } @@ -411,7 +439,7 @@ func ClusterctlUpgradeSpec(ctx context.Context, inputGetter func() ClusterctlUpg // Get the workloadCluster before the management cluster is upgraded to make sure that the upgrade did not trigger // any unexpected rollouts. preUpgradeMachineList := &unstructured.UnstructuredList{} - preUpgradeMachineList.SetGroupVersionKind(clusterv1alpha3.GroupVersion.WithKind("MachineList")) + preUpgradeMachineList.SetGroupVersionKind(machineListGVK) err = managementClusterProxy.GetClient().List( ctx, preUpgradeMachineList, diff --git a/test/e2e/clusterctl_upgrade_test.go b/test/e2e/clusterctl_upgrade_test.go index 8ad7929f221d..e61680076feb 100644 --- a/test/e2e/clusterctl_upgrade_test.go +++ b/test/e2e/clusterctl_upgrade_test.go @@ -82,11 +82,7 @@ var _ = Describe("When testing clusterctl upgrades (v1.4=>current)", func() { InitWithBootstrapProviders: []string{"kubeadm:v1.4.5"}, InitWithControlPlaneProviders: []string{"kubeadm:v1.4.5"}, InitWithInfrastructureProviders: []string{"docker:v1.4.5"}, - // We have to set this to an empty array as clusterctl v1.4 doesn't support - // runtime extension providers. If we don't do this the test will automatically - // try to deploy the latest version of our test-extension from docker.yaml. - InitWithRuntimeExtensionProviders: []string{}, - InitWithProvidersContract: "v1beta1", + InitWithProvidersContract: "v1beta1", // NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/e2e/data/infrastructure-docker/v1.4/bases. InitWithKubernetesVersion: "v1.27.3", WorkloadKubernetesVersion: "v1.27.3", @@ -123,11 +119,7 @@ var _ = Describe("When testing clusterctl upgrades using ClusterClass (v1.4=>cur InitWithBootstrapProviders: []string{"kubeadm:v1.4.5"}, InitWithControlPlaneProviders: []string{"kubeadm:v1.4.5"}, InitWithInfrastructureProviders: []string{"docker:v1.4.5"}, - // We have to set this to an empty array as clusterctl v1.4 doesn't support - // runtime extension providers. If we don't do this the test will automatically - // try to deploy the latest version of our test-extension from docker.yaml. - InitWithRuntimeExtensionProviders: []string{}, - InitWithProvidersContract: "v1beta1", + InitWithProvidersContract: "v1beta1", // NOTE: If this version is changed here the image and SHA must also be updated in all DockerMachineTemplates in `test/e2e/data/infrastructure-docker/v1.4/bases. InitWithKubernetesVersion: "v1.27.3", WorkloadKubernetesVersion: "v1.27.3",