diff --git a/api/v1alpha3/conversion_test.go b/api/v1alpha3/conversion_test.go index b911d997b551..4c2c07951479 100644 --- a/api/v1alpha3/conversion_test.go +++ b/api/v1alpha3/conversion_test.go @@ -20,54 +20,41 @@ import ( "testing" fuzz "github.com/google/gofuzz" - . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" - "sigs.k8s.io/controller-runtime/pkg/conversion" - - "k8s.io/apimachinery/pkg/runtime" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" "sigs.k8s.io/cluster-api/api/v1alpha4" utilconversion "sigs.k8s.io/cluster-api/util/conversion" + "sigs.k8s.io/controller-runtime/pkg/conversion" ) func TestFuzzyConversion(t *testing.T) { - g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(AddToScheme(scheme)).To(Succeed()) - g.Expect(v1alpha4.AddToScheme(scheme)).To(Succeed()) - t.Run("for Cluster", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Scheme: scheme, Hub: &v1alpha4.Cluster{}, Spoke: &Cluster{}, SpokeAfterMutation: clusterSpokeAfterMutation, })) t.Run("for Machine", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Scheme: scheme, Hub: &v1alpha4.Machine{}, Spoke: &Machine{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{BootstrapFuzzFuncs}, })) t.Run("for MachineSet", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Scheme: scheme, Hub: &v1alpha4.MachineSet{}, Spoke: &MachineSet{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{BootstrapFuzzFuncs, CustomObjectMetaFuzzFunc}, })) t.Run("for MachineDeployment", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Scheme: scheme, Hub: &v1alpha4.MachineDeployment{}, Spoke: &MachineDeployment{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{BootstrapFuzzFuncs, CustomObjectMetaFuzzFunc}, })) t.Run("for MachineHealthCheckSpec", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Scheme: scheme, - Hub: &v1alpha4.MachineHealthCheck{}, - Spoke: &MachineHealthCheck{}, + Hub: &v1alpha4.MachineHealthCheck{}, + Spoke: &MachineHealthCheck{}, })) } diff --git a/bootstrap/kubeadm/api/v1alpha3/conversion_test.go b/bootstrap/kubeadm/api/v1alpha3/conversion_test.go index 8bb62e1f17ac..e5b0de2b61b9 100644 --- a/bootstrap/kubeadm/api/v1alpha3/conversion_test.go +++ b/bootstrap/kubeadm/api/v1alpha3/conversion_test.go @@ -20,30 +20,20 @@ import ( "testing" fuzz "github.com/google/gofuzz" - . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" - - "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func TestFuzzyConversion(t *testing.T) { - g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(AddToScheme(scheme)).To(Succeed()) - g.Expect(v1alpha4.AddToScheme(scheme)).To(Succeed()) - t.Run("for KubeadmConfig", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Scheme: scheme, Hub: &v1alpha4.KubeadmConfig{}, Spoke: &KubeadmConfig{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) t.Run("for KubeadmConfigTemplate", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Scheme: scheme, Hub: &v1alpha4.KubeadmConfigTemplate{}, Spoke: &KubeadmConfigTemplate{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, diff --git a/bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go b/bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go index 28bb556fca81..2e555d49f4ad 100644 --- a/bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go +++ b/bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go @@ -27,8 +27,8 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" bootstrapapi "k8s.io/cluster-bootstrap/token/api" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" @@ -46,23 +46,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -func setupScheme() *runtime.Scheme { - scheme := runtime.NewScheme() - if err := clusterv1.AddToScheme(scheme); err != nil { - panic(err) - } - if err := expv1.AddToScheme(scheme); err != nil { - panic(err) - } - if err := bootstrapv1.AddToScheme(scheme); err != nil { - panic(err) - } - if err := corev1.AddToScheme(scheme); err != nil { - panic(err) - } - return scheme -} - // MachineToBootstrapMapFunc return kubeadm bootstrap configref name when configref exists. func TestKubeadmConfigReconciler_MachineToBootstrapMapFuncReturn(t *testing.T) { g := NewWithT(t) @@ -83,7 +66,7 @@ func TestKubeadmConfigReconciler_MachineToBootstrapMapFuncReturn(t *testing.T) { } machineObjs = append(machineObjs, m) } - fakeClient := helpers.NewFakeClientWithScheme(setupScheme(), objs...) + fakeClient := helpers.NewFakeClientWithScheme(scheme.Scheme, objs...) reconciler := &KubeadmConfigReconciler{ Client: fakeClient, } @@ -108,7 +91,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfKubeadmConfigIsReady(t * objects := []client.Object{ config, } - myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(scheme.Scheme, objects...) k := &KubeadmConfigReconciler{ Client: myclient, @@ -137,7 +120,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnNilIfReferencedMachineIsNotFoun // intentionally omitting machine config, } - myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(scheme.Scheme, objects...) k := &KubeadmConfigReconciler{ Client: myclient, @@ -165,7 +148,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfMachineHasDataSecretName machine, config, } - myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(scheme.Scheme, objects...) k := &KubeadmConfigReconciler{ Client: myclient, @@ -200,7 +183,7 @@ func TestKubeadmConfigReconciler_ReturnEarlyIfClusterInfraNotReady(t *testing.T) machine, config, } - myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(scheme.Scheme, objects...) k := &KubeadmConfigReconciler{ Client: myclient, @@ -231,7 +214,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnEarlyIfMachineHasNoCluster(t *t machine, config, } - myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(scheme.Scheme, objects...) k := &KubeadmConfigReconciler{ Client: myclient, @@ -258,7 +241,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnNilIfMachineDoesNotHaveAssociat machine, config, } - myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(scheme.Scheme, objects...) k := &KubeadmConfigReconciler{ Client: myclient, @@ -287,7 +270,7 @@ func TestKubeadmConfigReconciler_Reconcile_ReturnNilIfAssociatedClusterIsNotFoun machine, config, } - myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(scheme.Scheme, objects...) k := &KubeadmConfigReconciler{ Client: myclient, @@ -352,7 +335,7 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueJoiningNodesIfControlPlaneNotI t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - myclient := helpers.NewFakeClientWithScheme(setupScheme(), tc.objects...) + myclient := helpers.NewFakeClientWithScheme(scheme.Scheme, tc.objects...) k := &KubeadmConfigReconciler{ Client: myclient, @@ -385,7 +368,7 @@ func TestKubeadmConfigReconciler_Reconcile_GenerateCloudConfigData(t *testing.T) } objects = append(objects, createSecrets(t, cluster, controlPlaneInitConfig)...) - myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(scheme.Scheme, objects...) k := &KubeadmConfigReconciler{ Client: myclient, @@ -437,7 +420,7 @@ func TestKubeadmConfigReconciler_Reconcile_ErrorIfJoiningControlPlaneHasInvalidC controlPlaneJoinConfig, } objects = append(objects, createSecrets(t, cluster, controlPlaneInitConfig)...) - myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(scheme.Scheme, objects...) k := &KubeadmConfigReconciler{ Client: myclient, @@ -475,7 +458,7 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueIfControlPlaneIsMissingAPIEndp } objects = append(objects, createSecrets(t, cluster, controlPlaneInitConfig)...) - myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(scheme.Scheme, objects...) k := &KubeadmConfigReconciler{ Client: myclient, @@ -547,7 +530,7 @@ func TestReconcileIfJoinNodesAndControlPlaneIsReady(t *testing.T) { config, } objects = append(objects, createSecrets(t, cluster, config)...) - myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(scheme.Scheme, objects...) k := &KubeadmConfigReconciler{ Client: myclient, KubeadmInitLock: &myInitLocker{}, @@ -623,7 +606,7 @@ func TestReconcileIfJoinNodePoolsAndControlPlaneIsReady(t *testing.T) { config, } objects = append(objects, createSecrets(t, cluster, config)...) - myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(scheme.Scheme, objects...) k := &KubeadmConfigReconciler{ Client: myclient, KubeadmInitLock: &myInitLocker{}, @@ -677,7 +660,7 @@ func TestKubeadmConfigSecretCreatedStatusNotPatched(t *testing.T) { } objects = append(objects, createSecrets(t, cluster, initConfig)...) - myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(scheme.Scheme, objects...) k := &KubeadmConfigReconciler{ Client: myclient, KubeadmInitLock: &myInitLocker{}, @@ -750,7 +733,7 @@ func TestBootstrapTokenTTLExtension(t *testing.T) { } objects = append(objects, createSecrets(t, cluster, initConfig)...) - myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(scheme.Scheme, objects...) k := &KubeadmConfigReconciler{ Client: myclient, KubeadmInitLock: &myInitLocker{}, @@ -896,7 +879,7 @@ func TestBootstrapTokenRotationMachinePool(t *testing.T) { } objects = append(objects, createSecrets(t, cluster, initConfig)...) - myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(scheme.Scheme, objects...) k := &KubeadmConfigReconciler{ Client: myclient, KubeadmInitLock: &myInitLocker{}, @@ -1020,7 +1003,7 @@ func TestBootstrapTokenRotationMachinePool(t *testing.T) { // Ensure the discovery portion of the JoinConfiguration gets generated correctly. func TestKubeadmConfigReconciler_Reconcile_DiscoveryReconcileBehaviors(t *testing.T) { k := &KubeadmConfigReconciler{ - Client: helpers.NewFakeClientWithScheme(setupScheme()), + Client: helpers.NewFakeClientWithScheme(scheme.Scheme), KubeadmInitLock: &myInitLocker{}, remoteClientGetter: fakeremote.NewClusterClient, } @@ -1353,7 +1336,7 @@ func TestKubeadmConfigReconciler_Reconcile_AlwaysCheckCAVerificationUnlessReques t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(scheme.Scheme, objects...) reconciler := KubeadmConfigReconciler{ Client: myclient, KubeadmInitLock: &myInitLocker{}, @@ -1401,7 +1384,7 @@ func TestKubeadmConfigReconciler_ClusterToKubeadmConfigs(t *testing.T) { expectedNames = append(expectedNames, configName) objs = append(objs, mp, c) } - fakeClient := helpers.NewFakeClientWithScheme(setupScheme(), objs...) + fakeClient := helpers.NewFakeClientWithScheme(scheme.Scheme, objs...) reconciler := &KubeadmConfigReconciler{ Client: fakeClient, } @@ -1440,7 +1423,7 @@ func TestKubeadmConfigReconciler_Reconcile_DoesNotFailIfCASecretsAlreadyExist(t "tls.key": []byte("hello world"), }, } - fakec := helpers.NewFakeClientWithScheme(setupScheme(), []client.Object{cluster, m, c, scrt}...) + fakec := helpers.NewFakeClientWithScheme(scheme.Scheme, []client.Object{cluster, m, c, scrt}...) reconciler := &KubeadmConfigReconciler{ Client: fakec, KubeadmInitLock: &myInitLocker{}, @@ -1472,7 +1455,7 @@ func TestKubeadmConfigReconciler_Reconcile_ExactlyOneControlPlaneMachineInitiali controlPlaneInitMachineSecond, controlPlaneInitConfigSecond, } - myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(scheme.Scheme, objects...) k := &KubeadmConfigReconciler{ Client: myclient, KubeadmInitLock: &myInitLocker{}, @@ -1527,7 +1510,7 @@ func TestKubeadmConfigReconciler_Reconcile_PatchWhenErrorOccurred(t *testing.T) objects = append(objects, s) } - myclient := helpers.NewFakeClientWithScheme(setupScheme(), objects...) + myclient := helpers.NewFakeClientWithScheme(scheme.Scheme, objects...) k := &KubeadmConfigReconciler{ Client: myclient, KubeadmInitLock: &myInitLocker{}, @@ -1663,7 +1646,7 @@ func TestKubeadmConfigReconciler_ResolveFiles(t *testing.T) { t.Run(name, func(t *testing.T) { g := NewWithT(t) - myclient := helpers.NewFakeClientWithScheme(setupScheme(), tc.objects...) + myclient := helpers.NewFakeClientWithScheme(scheme.Scheme, tc.objects...) k := &KubeadmConfigReconciler{ Client: myclient, KubeadmInitLock: &myInitLocker{}, diff --git a/bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go b/bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go index b3291e023eae..220041464171 100644 --- a/bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go +++ b/bootstrap/kubeadm/internal/locking/control_plane_init_mutex_test.go @@ -130,12 +130,6 @@ func TestControlPlaneInitMutex_Lock(t *testing.T) { } } func TestControlPlaneInitMutex_UnLock(t *testing.T) { - g := NewWithT(t) - - scheme := runtime.NewScheme() - g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) - g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) - uid := types.UID("test-uid") configMap := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ @@ -151,14 +145,14 @@ func TestControlPlaneInitMutex_UnLock(t *testing.T) { { name: "should release lock by deleting config map", client: &fakeClient{ - Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + Client: fake.NewClientBuilder().Build(), }, shouldRelease: true, }, { name: "should not release lock if cannot delete config map", client: &fakeClient{ - Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(configMap).Build(), + Client: fake.NewClientBuilder().WithObjects(configMap).Build(), deleteError: errors.New("delete error"), }, shouldRelease: false, @@ -166,7 +160,7 @@ func TestControlPlaneInitMutex_UnLock(t *testing.T) { { name: "should release lock if config map does not exist", client: &fakeClient{ - Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + Client: fake.NewClientBuilder().Build(), getError: apierrors.NewNotFound(schema.GroupResource{Group: "", Resource: "configmaps"}, fmt.Sprintf("%s-controlplane", uid)), }, shouldRelease: true, @@ -174,7 +168,7 @@ func TestControlPlaneInitMutex_UnLock(t *testing.T) { { name: "should not release lock if error while getting config map", client: &fakeClient{ - Client: fake.NewClientBuilder().WithScheme(scheme).Build(), + Client: fake.NewClientBuilder().Build(), getError: errors.New("get error"), }, shouldRelease: false, @@ -207,17 +201,13 @@ func TestControlPlaneInitMutex_UnLock(t *testing.T) { func TestInfoLines_Lock(t *testing.T) { g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) - g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) - uid := types.UID("test-uid") info := information{MachineName: "my-control-plane"} b, err := json.Marshal(info) g.Expect(err).NotTo(HaveOccurred()) c := &fakeClient{ - Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{ + Client: fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: configMapName(clusterName), Namespace: clusterNamespace, diff --git a/bootstrap/kubeadm/types/v1beta1/conversion_test.go b/bootstrap/kubeadm/types/v1beta1/conversion_test.go index 7aa84da55a70..f4ec090ec779 100644 --- a/bootstrap/kubeadm/types/v1beta1/conversion_test.go +++ b/bootstrap/kubeadm/types/v1beta1/conversion_test.go @@ -20,48 +20,37 @@ import ( "testing" fuzz "github.com/google/gofuzz" - . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" - "k8s.io/apimachinery/pkg/runtime" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func TestFuzzyConversion(t *testing.T) { - g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(AddToScheme(scheme)).To(Succeed()) - g.Expect(v1alpha4.AddToScheme(scheme)).To(Succeed()) - t.Run("for ClusterConfiguration", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Scheme: scheme, - Hub: &v1alpha4.ClusterConfiguration{}, - Spoke: &ClusterConfiguration{}, + Hub: &v1alpha4.ClusterConfiguration{}, + Spoke: &ClusterConfiguration{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) t.Run("for ClusterStatus", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Scheme: scheme, - Hub: &v1alpha4.ClusterStatus{}, - Spoke: &ClusterStatus{}, + Hub: &v1alpha4.ClusterStatus{}, + Spoke: &ClusterStatus{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) t.Run("for InitConfiguration", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Scheme: scheme, - Hub: &v1alpha4.InitConfiguration{}, - Spoke: &InitConfiguration{}, + Hub: &v1alpha4.InitConfiguration{}, + Spoke: &InitConfiguration{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) t.Run("for JoinConfiguration", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Scheme: scheme, - Hub: &v1alpha4.JoinConfiguration{}, - Spoke: &JoinConfiguration{}, + Hub: &v1alpha4.JoinConfiguration{}, + Spoke: &JoinConfiguration{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, diff --git a/bootstrap/kubeadm/types/v1beta2/conversion_test.go b/bootstrap/kubeadm/types/v1beta2/conversion_test.go index 48165a2f0d4d..acfaea4ea81e 100644 --- a/bootstrap/kubeadm/types/v1beta2/conversion_test.go +++ b/bootstrap/kubeadm/types/v1beta2/conversion_test.go @@ -20,49 +20,38 @@ import ( "testing" fuzz "github.com/google/gofuzz" - . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func TestFuzzyConversion(t *testing.T) { - g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(AddToScheme(scheme)).To(Succeed()) - g.Expect(v1alpha4.AddToScheme(scheme)).To(Succeed()) - t.Run("for ClusterConfiguration", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Scheme: scheme, - Hub: &v1alpha4.ClusterConfiguration{}, - Spoke: &ClusterConfiguration{}, + Hub: &v1alpha4.ClusterConfiguration{}, + Spoke: &ClusterConfiguration{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) t.Run("for ClusterStatus", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Scheme: scheme, - Hub: &v1alpha4.ClusterStatus{}, - Spoke: &ClusterStatus{}, + Hub: &v1alpha4.ClusterStatus{}, + Spoke: &ClusterStatus{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) t.Run("for InitConfiguration", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Scheme: scheme, - Hub: &v1alpha4.InitConfiguration{}, - Spoke: &InitConfiguration{}, + Hub: &v1alpha4.InitConfiguration{}, + Spoke: &InitConfiguration{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, })) t.Run("for JoinConfiguration", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Scheme: scheme, - Hub: &v1alpha4.JoinConfiguration{}, - Spoke: &JoinConfiguration{}, + Hub: &v1alpha4.JoinConfiguration{}, + Spoke: &JoinConfiguration{}, // NOTE: Kubeadm types does not have ObjectMeta, so we are required to skip data annotation cleanup in the spoke-hub-spoke round trip test. SkipSpokeAnnotationCleanup: true, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, diff --git a/bootstrap/util/configowner_test.go b/bootstrap/util/configowner_test.go index dabc56dccdbe..a05ee736b459 100644 --- a/bootstrap/util/configowner_test.go +++ b/bootstrap/util/configowner_test.go @@ -22,27 +22,15 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" "sigs.k8s.io/cluster-api/feature" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) -var ( - ctx = ctrl.SetupSignalHandler() -) - func TestGetConfigOwner(t *testing.T) { - g := NewWithT(t) - - scheme := runtime.NewScheme() - g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) - g.Expect(expv1.AddToScheme(scheme)).To(Succeed()) - t.Run("should get the owner when present (Machine)", func(t *testing.T) { g := NewWithT(t) myMachine := &clusterv1.Machine{ @@ -65,7 +53,7 @@ func TestGetConfigOwner(t *testing.T) { }, } - c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(myMachine).Build() + c := fake.NewClientBuilder().WithObjects(myMachine).Build() obj := &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ OwnerReferences: []metav1.OwnerReference{ @@ -115,7 +103,7 @@ func TestGetConfigOwner(t *testing.T) { }, } - c := fake.NewClientBuilder().WithScheme(scheme).WithObjects(myPool).Build() + c := fake.NewClientBuilder().WithObjects(myPool).Build() obj := &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ OwnerReferences: []metav1.OwnerReference{ @@ -142,7 +130,7 @@ func TestGetConfigOwner(t *testing.T) { t.Run("return an error when not found", func(t *testing.T) { g := NewWithT(t) - c := fake.NewClientBuilder().WithScheme(scheme).Build() + c := fake.NewClientBuilder().Build() obj := &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ OwnerReferences: []metav1.OwnerReference{ @@ -162,7 +150,7 @@ func TestGetConfigOwner(t *testing.T) { t.Run("return nothing when there is no owner", func(t *testing.T) { g := NewWithT(t) - c := fake.NewClientBuilder().WithScheme(scheme).Build() + c := fake.NewClientBuilder().Build() obj := &bootstrapv1.KubeadmConfig{ ObjectMeta: metav1.ObjectMeta{ OwnerReferences: []metav1.OwnerReference{}, diff --git a/bootstrap/util/suite_test.go b/bootstrap/util/suite_test.go new file mode 100644 index 000000000000..da7bea3b5359 --- /dev/null +++ b/bootstrap/util/suite_test.go @@ -0,0 +1,54 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "os" + "testing" + + "sigs.k8s.io/cluster-api/test/helpers" + ctrl "sigs.k8s.io/controller-runtime" + // +kubebuilder:scaffold:imports +) + +var ( + testEnv *helpers.TestEnvironment + ctx = ctrl.SetupSignalHandler() +) + +func TestMain(m *testing.M) { + // Bootstrapping test environment + testEnv = helpers.NewTestEnvironment() + go func() { + if err := testEnv.StartManager(ctx); err != nil { + panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) + } + }() + <-testEnv.Manager.Elected() + testEnv.WaitForWebhooks() + + // Run tests + code := m.Run() + // Tearing down the test environment + if err := testEnv.Stop(); err != nil { + panic(fmt.Sprintf("Failed to stop the envtest: %v", err)) + } + + // Report exit code + os.Exit(code) +} diff --git a/cmd/clusterctl/internal/test/fake_repository.go b/cmd/clusterctl/internal/test/fake_repository.go index 3409b4183768..f6b901be3ef6 100644 --- a/cmd/clusterctl/internal/test/fake_repository.go +++ b/cmd/clusterctl/internal/test/fake_repository.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" + "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/scheme" ) type FakeRepository struct { @@ -98,12 +99,7 @@ func (f *FakeRepository) WithVersions(version ...string) *FakeRepository { } func (f *FakeRepository) WithMetadata(version string, metadata *clusterctlv1.Metadata) *FakeRepository { - scheme := runtime.NewScheme() - if err := clusterctlv1.AddToScheme(scheme); err != nil { - panic(err) - } - - codecs := serializer.NewCodecFactory(scheme) + codecs := serializer.NewCodecFactory(scheme.Scheme) mediaType := "application/yaml" info, match := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), mediaType) diff --git a/controllers/cluster_controller_phases_test.go b/controllers/cluster_controller_phases_test.go index b3a6780cbf60..89df60f1c81d 100644 --- a/controllers/cluster_controller_phases_test.go +++ b/controllers/cluster_controller_phases_test.go @@ -23,10 +23,8 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/client-go/kubernetes/scheme" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/controllers/external" capierrors "sigs.k8s.io/cluster-api/errors" @@ -125,8 +123,6 @@ func TestClusterReconcilePhases(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - g.Expect(apiextensionsv1.AddToScheme(scheme.Scheme)).To(Succeed()) var c client.Client if tt.infraRef != nil { @@ -209,15 +205,12 @@ func TestClusterReconcilePhases(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) c := fake.NewClientBuilder(). - WithScheme(scheme.Scheme). WithObjects(tt.cluster). Build() if tt.secret != nil { c = fake.NewClientBuilder(). - WithScheme(scheme.Scheme). WithObjects(tt.cluster, tt.secret). Build() } @@ -366,10 +359,7 @@ func TestClusterReconciler_reconcilePhase(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - c := fake.NewClientBuilder(). - WithScheme(scheme.Scheme). WithObjects(tt.cluster). Build() diff --git a/controllers/cluster_controller_test.go b/controllers/cluster_controller_test.go index 453d10e27a2c..8963fd575672 100644 --- a/controllers/cluster_controller_test.go +++ b/controllers/cluster_controller_test.go @@ -22,7 +22,6 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/scheme" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" @@ -459,10 +458,8 @@ func TestClusterReconcilerNodeRef(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - r := &ClusterReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(cluster, controlPlaneWithNoderef, controlPlaneWithoutNoderef, nonControlPlaneWithNoderef, nonControlPlaneWithoutNoderef).Build(), + Client: fake.NewClientBuilder().WithObjects(cluster, controlPlaneWithNoderef, controlPlaneWithoutNoderef, nonControlPlaneWithNoderef, nonControlPlaneWithoutNoderef).Build(), } requests := r.controlPlaneMachineToCluster(tt.o) g.Expect(requests).To(Equal(tt.want)) diff --git a/controllers/external/util_test.go b/controllers/external/util_test.go index 476a1100c693..09fce058440f 100644 --- a/controllers/external/util_test.go +++ b/controllers/external/util_test.go @@ -26,7 +26,6 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -64,7 +63,7 @@ func TestGetResourceFound(t *testing.T) { Namespace: testNamespace, } - fakeClient := fake.NewClientBuilder().WithScheme(runtime.NewScheme()).WithObjects(testResource.DeepCopy()).Build() + fakeClient := fake.NewClientBuilder().WithObjects(testResource.DeepCopy()).Build() got, err := Get(ctx, fakeClient, testResourceReference, testNamespace) g.Expect(err).NotTo(HaveOccurred()) g.Expect(got).To(Equal(testResource)) @@ -82,7 +81,7 @@ func TestGetResourceNotFound(t *testing.T) { Namespace: namespace, } - fakeClient := fake.NewClientBuilder().WithScheme(runtime.NewScheme()).Build() + fakeClient := fake.NewClientBuilder().Build() _, err := Get(ctx, fakeClient, testResourceReference, namespace) g.Expect(err).To(HaveOccurred()) g.Expect(apierrors.IsNotFound(errors.Cause(err))).To(BeTrue()) @@ -100,7 +99,7 @@ func TestCloneTemplateResourceNotFound(t *testing.T) { Namespace: testNamespace, } - fakeClient := fake.NewClientBuilder().WithScheme(runtime.NewScheme()).Build() + fakeClient := fake.NewClientBuilder().Build() _, err := CloneTemplate(ctx, &CloneTemplateInput{ Client: fakeClient, TemplateRef: testResourceReference, @@ -171,7 +170,7 @@ func TestCloneTemplateResourceFound(t *testing.T) { g.Expect(ok).To(BeTrue()) g.Expect(expectedSpec).NotTo(BeEmpty()) - fakeClient := fake.NewClientBuilder().WithScheme(runtime.NewScheme()).WithObjects(template.DeepCopy()).Build() + fakeClient := fake.NewClientBuilder().WithObjects(template.DeepCopy()).Build() ref, err := CloneTemplate(ctx, &CloneTemplateInput{ Client: fakeClient, @@ -263,7 +262,7 @@ func TestCloneTemplateResourceFoundNoOwner(t *testing.T) { g.Expect(ok).To(BeTrue()) g.Expect(expectedSpec).NotTo(BeEmpty()) - fakeClient := fake.NewClientBuilder().WithScheme(runtime.NewScheme()).WithObjects(template.DeepCopy()).Build() + fakeClient := fake.NewClientBuilder().WithObjects(template.DeepCopy()).Build() ref, err := CloneTemplate(ctx, &CloneTemplateInput{ Client: fakeClient, @@ -317,7 +316,7 @@ func TestCloneTemplateMissingSpecTemplate(t *testing.T) { Namespace: testNamespace, } - fakeClient := fake.NewClientBuilder().WithScheme(runtime.NewScheme()).WithObjects(template.DeepCopy()).Build() + fakeClient := fake.NewClientBuilder().WithObjects(template.DeepCopy()).Build() _, err := CloneTemplate(ctx, &CloneTemplateInput{ Client: fakeClient, diff --git a/controllers/machine_controller_noderef_test.go b/controllers/machine_controller_noderef_test.go index 87bb02d32c13..d9b6b0342e6c 100644 --- a/controllers/machine_controller_noderef_test.go +++ b/controllers/machine_controller_noderef_test.go @@ -23,9 +23,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/controllers/noderefutil" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -34,10 +32,8 @@ import ( func TestGetNodeReference(t *testing.T) { g := NewWithT(t) - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - r := &MachineReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).Build(), + Client: fake.NewClientBuilder().Build(), recorder: record.NewFakeRecorder(32), } diff --git a/controllers/machine_controller_phases_test.go b/controllers/machine_controller_phases_test.go index a1e51f4ee918..81c4f8d46c4d 100644 --- a/controllers/machine_controller_phases_test.go +++ b/controllers/machine_controller_phases_test.go @@ -847,8 +847,6 @@ func TestReconcileBootstrap(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - if tc.machine == nil { tc.machine = defaultMachine.DeepCopy() } @@ -856,7 +854,6 @@ func TestReconcileBootstrap(t *testing.T) { bootstrapConfig := &unstructured.Unstructured{Object: tc.bootstrapConfig} r := &MachineReconciler{ Client: fake.NewClientBuilder(). - WithScheme(scheme.Scheme). WithObjects(tc.machine, external.TestGenericBootstrapCRD.DeepCopy(), external.TestGenericInfrastructureCRD.DeepCopy(), @@ -1060,8 +1057,6 @@ func TestReconcileInfrastructure(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - if tc.machine == nil { tc.machine = defaultMachine.DeepCopy() } @@ -1069,7 +1064,6 @@ func TestReconcileInfrastructure(t *testing.T) { infraConfig := &unstructured.Unstructured{Object: tc.infraConfig} r := &MachineReconciler{ Client: fake.NewClientBuilder(). - WithScheme(scheme.Scheme). WithObjects(tc.machine, external.TestGenericBootstrapCRD.DeepCopy(), external.TestGenericInfrastructureCRD.DeepCopy(), diff --git a/controllers/machinedeployment_controller_test.go b/controllers/machinedeployment_controller_test.go index d2e4b1e941b1..d6c60cc9fd1e 100644 --- a/controllers/machinedeployment_controller_test.go +++ b/controllers/machinedeployment_controller_test.go @@ -24,7 +24,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" @@ -480,7 +479,6 @@ func TestMachineSetToDeployments(t *testing.T) { }, } - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) r := &MachineDeploymentReconciler{ Client: fake.NewClientBuilder().WithObjects(machineDeplopymentList...).Build(), recorder: record.NewFakeRecorder(32), @@ -546,7 +544,6 @@ func TestGetMachineDeploymentsForMachineSet(t *testing.T) { }, } - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) r := &MachineDeploymentReconciler{ Client: fake.NewClientBuilder().WithObjects(append(machineDeploymentList, &ms1, &ms2)...).Build(), recorder: record.NewFakeRecorder(32), @@ -705,8 +702,6 @@ func TestGetMachineSetsForDeployment(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - r := &MachineDeploymentReconciler{ Client: fake.NewClientBuilder().WithObjects(machineSetList...).Build(), recorder: record.NewFakeRecorder(32), diff --git a/controllers/machinedeployment_rolling_test.go b/controllers/machinedeployment_rolling_test.go index 0f0af8fe9098..6aa0332cc2db 100644 --- a/controllers/machinedeployment_rolling_test.go +++ b/controllers/machinedeployment_rolling_test.go @@ -23,7 +23,6 @@ import ( . "github.com/onsi/gomega" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" @@ -179,8 +178,6 @@ func TestReconcileNewMachineSet(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - resources := []client.Object{ tc.machineDeployment, } @@ -373,8 +370,6 @@ func TestReconcileOldMachineSets(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - resources := []client.Object{ tc.machineDeployment, } diff --git a/controllers/machinedeployment_sync_test.go b/controllers/machinedeployment_sync_test.go index 0eab7b498426..f2890b34f089 100644 --- a/controllers/machinedeployment_sync_test.go +++ b/controllers/machinedeployment_sync_test.go @@ -24,7 +24,6 @@ import ( . "github.com/onsi/gomega" "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" @@ -363,8 +362,6 @@ func TestScaleMachineSet(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - resources := []client.Object{ tc.machineDeployment, tc.machineSet, diff --git a/controllers/machinehealthcheck_controller_test.go b/controllers/machinehealthcheck_controller_test.go index 2c3e0ab1a490..2cca16fc1a28 100644 --- a/controllers/machinehealthcheck_controller_test.go +++ b/controllers/machinehealthcheck_controller_test.go @@ -1796,7 +1796,6 @@ func TestMachineHealthCheck_Reconcile(t *testing.T) { } func TestClusterToMachineHealthCheck(t *testing.T) { - _ = clusterv1.AddToScheme(scheme.Scheme) fakeClient := fake.NewClientBuilder().Build() r := &MachineHealthCheckReconciler{ @@ -1876,7 +1875,6 @@ func TestClusterToMachineHealthCheck(t *testing.T) { } func TestMachineToMachineHealthCheck(t *testing.T) { - _ = clusterv1.AddToScheme(scheme.Scheme) fakeClient := fake.NewClientBuilder().Build() r := &MachineHealthCheckReconciler{ @@ -1952,7 +1950,6 @@ func TestMachineToMachineHealthCheck(t *testing.T) { } func TestNodeToMachineHealthCheck(t *testing.T) { - _ = clusterv1.AddToScheme(scheme.Scheme) fakeClient := fake.NewClientBuilder().Build() r := &MachineHealthCheckReconciler{ @@ -2567,7 +2564,6 @@ func newMachineHealthCheck(namespace, clusterName string) *clusterv1.MachineHeal } func TestPatchTargets(t *testing.T) { - _ = clusterv1.AddToScheme(scheme.Scheme) g := NewWithT(t) namespace := defaultNamespaceName @@ -2587,7 +2583,7 @@ func TestPatchTargets(t *testing.T) { machine2 := machine1.DeepCopy() machine2.Name = "machine2" - cl := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects( + cl := fake.NewClientBuilder().WithObjects( machine1, machine2, mhc, @@ -2601,7 +2597,7 @@ func TestPatchTargets(t *testing.T) { // To make the patch fail, create patchHelper with a different client. fakeMachine := machine1.DeepCopy() fakeMachine.Name = "fake" - patchHelper, _ := patch.NewHelper(fakeMachine, fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(fakeMachine).Build()) + patchHelper, _ := patch.NewHelper(fakeMachine, fake.NewClientBuilder().WithObjects(fakeMachine).Build()) // healthCheckTarget with fake patchHelper, patch should fail on this target. target1 := healthCheckTarget{ MHC: mhc, diff --git a/controllers/machinehealthcheck_targets_test.go b/controllers/machinehealthcheck_targets_test.go index 46c44ba4a48c..331b897a15be 100644 --- a/controllers/machinehealthcheck_targets_test.go +++ b/controllers/machinehealthcheck_targets_test.go @@ -23,7 +23,6 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/util/conditions" @@ -157,7 +156,6 @@ func TestGetTargetsFromMHC(t *testing.T) { t.Run(tc.desc, func(t *testing.T) { gs := NewGomegaWithT(t) - gs.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) k8sClient := fake.NewClientBuilder().WithObjects(tc.toCreate...).Build() // Create a test reconciler diff --git a/controllers/machineset_controller_test.go b/controllers/machineset_controller_test.go index 18c0f87fdecc..cf7db63e531f 100644 --- a/controllers/machineset_controller_test.go +++ b/controllers/machineset_controller_test.go @@ -26,7 +26,6 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/controllers/external" @@ -340,10 +339,8 @@ func TestMachineSetOwnerReference(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - msr := &MachineSetReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects( + Client: fake.NewClientBuilder().WithObjects( testCluster, ms1, ms2, @@ -392,10 +389,8 @@ func TestMachineSetReconcile(t *testing.T) { NamespacedName: util.ObjectKey(ms), } - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - msr := &MachineSetReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(testCluster, ms).Build(), + Client: fake.NewClientBuilder().WithObjects(testCluster, ms).Build(), recorder: record.NewFakeRecorder(32), } result, err := msr.Reconcile(ctx, request) @@ -415,11 +410,9 @@ func TestMachineSetReconcile(t *testing.T) { NamespacedName: util.ObjectKey(ms), } - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - rec := record.NewFakeRecorder(32) msr := &MachineSetReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(testCluster, ms).Build(), + Client: fake.NewClientBuilder().WithObjects(testCluster, ms).Build(), recorder: rec, } _, _ = msr.Reconcile(ctx, request) @@ -438,11 +431,9 @@ func TestMachineSetReconcile(t *testing.T) { NamespacedName: util.ObjectKey(ms), } - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - rec := record.NewFakeRecorder(32) msr := &MachineSetReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(testCluster, ms).Build(), + Client: fake.NewClientBuilder().WithObjects(testCluster, ms).Build(), recorder: rec, } _, err := msr.Reconcile(ctx, request) @@ -451,8 +442,6 @@ func TestMachineSetReconcile(t *testing.T) { } func TestMachineSetToMachines(t *testing.T) { - g := NewWithT(t) - machineSetList := []client.Object{ &clusterv1.MachineSet{ ObjectMeta: metav1.ObjectMeta{ @@ -529,8 +518,6 @@ func TestMachineSetToMachines(t *testing.T) { }, } - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - r := &MachineSetReconciler{ Client: fake.NewClientBuilder().WithObjects(append(machineSetList, &m, &m2, &m3)...).Build(), } @@ -674,10 +661,8 @@ func TestAdoptOrphan(t *testing.T) { }, } - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - r := &MachineSetReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(&m).Build(), + Client: fake.NewClientBuilder().WithObjects(&m).Build(), } for _, tc := range testCases { g.Expect(r.adoptOrphan(ctx, tc.machineSet.DeepCopy(), tc.machine.DeepCopy())).To(Succeed()) diff --git a/controllers/remote/cluster_test.go b/controllers/remote/cluster_test.go index 8f64262e7ace..90756147490d 100644 --- a/controllers/remote/cluster_test.go +++ b/controllers/remote/cluster_test.go @@ -25,8 +25,6 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -88,14 +86,10 @@ users: ) func TestNewClusterClient(t *testing.T) { - g := NewWithT(t) - - testScheme := runtime.NewScheme() - g.Expect(scheme.AddToScheme(testScheme)).To(Succeed()) t.Run("cluster with valid kubeconfig", func(t *testing.T) { gs := NewWithT(t) - client := fake.NewClientBuilder().WithScheme(testScheme).WithObjects(validSecret).Build() + client := fake.NewClientBuilder().WithObjects(validSecret).Build() _, err := NewClusterClient(ctx, "test-source", client, clusterWithValidKubeConfig) // Since we do not have a remote server to connect to, we should expect to get // an error to that effect for the purpose of this test. @@ -111,7 +105,7 @@ func TestNewClusterClient(t *testing.T) { t.Run("cluster with no kubeconfig", func(t *testing.T) { gs := NewWithT(t) - client := fake.NewClientBuilder().WithScheme(testScheme).Build() + client := fake.NewClientBuilder().Build() _, err := NewClusterClient(ctx, "test-source", client, clusterWithNoKubeConfig) gs.Expect(err).To(MatchError(ContainSubstring("not found"))) }) @@ -119,7 +113,7 @@ func TestNewClusterClient(t *testing.T) { t.Run("cluster with invalid kubeconfig", func(t *testing.T) { gs := NewWithT(t) - client := fake.NewClientBuilder().WithScheme(testScheme).WithObjects(invalidSecret).Build() + client := fake.NewClientBuilder().WithObjects(invalidSecret).Build() _, err := NewClusterClient(ctx, "test-source", client, clusterWithInvalidKubeConfig) gs.Expect(err).To(HaveOccurred()) gs.Expect(apierrors.IsNotFound(err)).To(BeFalse()) diff --git a/controlplane/kubeadm/api/v1alpha3/conversion_test.go b/controlplane/kubeadm/api/v1alpha3/conversion_test.go index b43afb0dbc32..59fb9ebbba61 100644 --- a/controlplane/kubeadm/api/v1alpha3/conversion_test.go +++ b/controlplane/kubeadm/api/v1alpha3/conversion_test.go @@ -20,10 +20,8 @@ import ( "testing" fuzz "github.com/google/gofuzz" - . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" - "k8s.io/apimachinery/pkg/runtime" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" cabpkv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" kubeadmv1beta1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/types/v1beta1" @@ -32,13 +30,7 @@ import ( ) func TestFuzzyConversion(t *testing.T) { - g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(AddToScheme(scheme)).To(Succeed()) - g.Expect(v1alpha4.AddToScheme(scheme)).To(Succeed()) - t.Run("for KubeadmControlPLane", utilconversion.FuzzTestFunc(utilconversion.FuzzTestFuncInput{ - Scheme: scheme, Hub: &v1alpha4.KubeadmControlPlane{}, Spoke: &KubeadmControlPlane{}, FuzzerFuncs: []fuzzer.FuzzerFuncs{fuzzFuncs}, diff --git a/controlplane/kubeadm/controllers/controller_test.go b/controlplane/kubeadm/controllers/controller_test.go index 1f5f4bf6ad6e..48f8e5157754 100644 --- a/controlplane/kubeadm/controllers/controller_test.go +++ b/controlplane/kubeadm/controllers/controller_test.go @@ -58,7 +58,7 @@ import ( func TestClusterToKubeadmControlPlane(t *testing.T) { g := NewWithT(t) - fakeClient := newFakeClient(g) + fakeClient := newFakeClient() cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: "test"}) cluster.Spec = clusterv1.ClusterSpec{ @@ -89,7 +89,7 @@ func TestClusterToKubeadmControlPlane(t *testing.T) { func TestClusterToKubeadmControlPlaneNoControlPlane(t *testing.T) { g := NewWithT(t) - fakeClient := newFakeClient(g) + fakeClient := newFakeClient() cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: "test"}) @@ -104,7 +104,7 @@ func TestClusterToKubeadmControlPlaneNoControlPlane(t *testing.T) { func TestClusterToKubeadmControlPlaneOtherControlPlane(t *testing.T) { g := NewWithT(t) - fakeClient := newFakeClient(g) + fakeClient := newFakeClient() cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: "test"}) cluster.Spec = clusterv1.ClusterSpec{ @@ -227,7 +227,7 @@ func TestReconcileNoClusterOwnerRef(t *testing.T) { kcp.Default() g.Expect(kcp.ValidateCreate()).To(Succeed()) - fakeClient := newFakeClient(g, kcp.DeepCopy()) + fakeClient := newFakeClient(kcp.DeepCopy()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, recorder: record.NewFakeRecorder(32), @@ -263,7 +263,7 @@ func TestReconcileNoKCP(t *testing.T) { }, } - fakeClient := newFakeClient(g) + fakeClient := newFakeClient() r := &KubeadmControlPlaneReconciler{ Client: fakeClient, recorder: record.NewFakeRecorder(32), @@ -303,7 +303,7 @@ func TestReconcileNoCluster(t *testing.T) { kcp.Default() g.Expect(kcp.ValidateCreate()).To(Succeed()) - fakeClient := newFakeClient(g, kcp.DeepCopy()) + fakeClient := newFakeClient(kcp.DeepCopy()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, recorder: record.NewFakeRecorder(32), @@ -351,7 +351,7 @@ func TestReconcilePaused(t *testing.T) { } kcp.Default() g.Expect(kcp.ValidateCreate()).To(Succeed()) - fakeClient := newFakeClient(g, kcp.DeepCopy(), cluster.DeepCopy()) + fakeClient := newFakeClient(kcp.DeepCopy(), cluster.DeepCopy()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, recorder: record.NewFakeRecorder(32), @@ -405,7 +405,7 @@ func TestReconcileClusterNoEndpoints(t *testing.T) { kcp.Default() g.Expect(kcp.ValidateCreate()).To(Succeed()) - fakeClient := newFakeClient(g, kcp.DeepCopy(), cluster.DeepCopy()) + fakeClient := newFakeClient(kcp.DeepCopy(), cluster.DeepCopy()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, recorder: record.NewFakeRecorder(32), @@ -485,7 +485,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { fmc.Machines.Insert(m) } - fakeClient := newFakeClient(g, objs...) + fakeClient := newFakeClient(objs...) fmc.Reader = fakeClient r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -576,7 +576,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { fmc.Machines.Insert(m) } - fakeClient := newFakeClient(g, objs...) + fakeClient := newFakeClient(objs...) fmc.Reader = fakeClient r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -655,7 +655,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { objs = append(objs, m, cfg) fmc.Machines.Insert(m) } - fakeClient := newFakeClient(g, objs...) + fakeClient := newFakeClient(objs...) fmc.Reader = fakeClient r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -706,7 +706,7 @@ func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) { Workload: fakeWorkloadCluster{}, } - fakeClient := newFakeClient(g, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy(), fmc.Machines["test0"].DeepCopy()) + fakeClient := newFakeClient(cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy(), fmc.Machines["test0"].DeepCopy()) fmc.Reader = fakeClient recorder := record.NewFakeRecorder(32) r := &KubeadmControlPlaneReconciler{ @@ -833,7 +833,6 @@ kubernetesVersion: metav1.16.1`, } fakeClient := newFakeClient( - g, kcp.DeepCopy(), cluster.DeepCopy(), genericMachineTemplate.DeepCopy(), @@ -1008,7 +1007,7 @@ kubernetesVersion: metav1.16.1`, corednsCM.DeepCopy(), kubeadmCM.DeepCopy(), } - fakeClient := newFakeClient(g, objs...) + fakeClient := newFakeClient(objs...) log.SetLogger(klogr.New()) workloadCluster := fakeWorkloadCluster{ @@ -1066,7 +1065,7 @@ kubernetesVersion: metav1.16.1`, kubeadmCM.DeepCopy(), } - fakeClient := newFakeClient(g, objs...) + fakeClient := newFakeClient(objs...) log.SetLogger(klogr.New()) workloadCluster := fakeWorkloadCluster{ @@ -1090,7 +1089,7 @@ kubernetesVersion: metav1.16.1`, kubeadmCM.DeepCopy(), } - fakeClient := newFakeClient(g, objs...) + fakeClient := newFakeClient(objs...) workloadCluster := fakeWorkloadCluster{ Workload: &internal.Workload{ Client: fakeClient, @@ -1112,7 +1111,7 @@ kubernetesVersion: metav1.16.1`, kubeadmCM.DeepCopy(), } - fakeClient := newFakeClient(g, objs...) + fakeClient := newFakeClient(objs...) log.SetLogger(klogr.New()) workloadCluster := fakeWorkloadCluster{ @@ -1142,7 +1141,7 @@ kubernetesVersion: metav1.16.1`, depl.Spec.Template.Spec.Containers[0].Image = "my-cool-image!!!!" // something very unlikely for getCoreDNSInfo to parse objs = append(objs, depl) - fakeClient := newFakeClient(g, objs...) + fakeClient := newFakeClient(objs...) workloadCluster := fakeWorkloadCluster{ Workload: &internal.Workload{ Client: fakeClient, @@ -1173,7 +1172,7 @@ kubernetesVersion: metav1.16.1`, corednsCM.DeepCopy(), } - fakeClient := newFakeClient(g, objs...) + fakeClient := newFakeClient(objs...) log.SetLogger(klogr.New()) workloadCluster := fakeWorkloadCluster{ @@ -1202,7 +1201,7 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { initObjs = append(initObjs, m) } - fakeClient := newFakeClient(g, initObjs...) + fakeClient := newFakeClient(initObjs...) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -1252,7 +1251,7 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { initObjs = append(initObjs, m) } - fakeClient := newFakeClient(g, initObjs...) + fakeClient := newFakeClient(initObjs...) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -1301,7 +1300,7 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { initObjs = append(initObjs, m) } - fakeClient := newFakeClient(g, initObjs...) + fakeClient := newFakeClient(initObjs...) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -1332,7 +1331,7 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { cluster, kcp, _ := createClusterWithControlPlane() controllerutil.AddFinalizer(kcp, controlplanev1.KubeadmControlPlaneFinalizer) - fakeClient := newFakeClient(g, cluster.DeepCopy(), kcp.DeepCopy()) + fakeClient := newFakeClient(cluster.DeepCopy(), kcp.DeepCopy()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -1352,10 +1351,7 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { // test utils -func newFakeClient(g *WithT, initObjs ...client.Object) client.Client { - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - g.Expect(bootstrapv1.AddToScheme(scheme.Scheme)).To(Succeed()) - g.Expect(controlplanev1.AddToScheme(scheme.Scheme)).To(Succeed()) +func newFakeClient(initObjs ...client.Object) client.Client { return &fakeClient{ startTime: time.Now(), Client: helpers.NewFakeClientWithScheme(scheme.Scheme, initObjs...), diff --git a/controlplane/kubeadm/controllers/helpers_test.go b/controlplane/kubeadm/controllers/helpers_test.go index 602359c17de5..3db90837b0fb 100644 --- a/controlplane/kubeadm/controllers/helpers_test.go +++ b/controlplane/kubeadm/controllers/helpers_test.go @@ -70,7 +70,7 @@ func TestReconcileKubeconfigEmptyAPIEndpoints(t *testing.T) { } clusterName := client.ObjectKey{Namespace: "test", Name: "foo"} - fakeClient := newFakeClient(g, kcp.DeepCopy()) + fakeClient := newFakeClient(kcp.DeepCopy()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, recorder: record.NewFakeRecorder(32), @@ -119,7 +119,7 @@ func TestReconcileKubeconfigMissingCACertificate(t *testing.T) { }, } - fakeClient := newFakeClient(g, kcp.DeepCopy()) + fakeClient := newFakeClient(kcp.DeepCopy()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, recorder: record.NewFakeRecorder(32), @@ -179,7 +179,7 @@ func TestReconcileKubeconfigSecretAdoptsV1alpha2Secrets(t *testing.T) { }, // the Cluster ownership defines v1alpha2 controlled secrets ) - fakeClient := newFakeClient(g, kcp.DeepCopy(), existingKubeconfigSecret.DeepCopy()) + fakeClient := newFakeClient(kcp.DeepCopy(), existingKubeconfigSecret.DeepCopy()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, recorder: record.NewFakeRecorder(32), @@ -243,7 +243,7 @@ func TestReconcileKubeconfigSecretDoesNotAdoptsUserSecrets(t *testing.T) { metav1.OwnerReference{}, // user defined secrets are not owned by the cluster. ) - fakeClient := newFakeClient(g, kcp.DeepCopy(), existingKubeconfigSecret.DeepCopy()) + fakeClient := newFakeClient(kcp.DeepCopy(), existingKubeconfigSecret.DeepCopy()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, recorder: record.NewFakeRecorder(32), @@ -303,7 +303,7 @@ func TestKubeadmControlPlaneReconciler_reconcileKubeconfig(t *testing.T) { *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")), ) - fakeClient := newFakeClient(g, kcp.DeepCopy(), existingCACertSecret.DeepCopy()) + fakeClient := newFakeClient(kcp.DeepCopy(), existingCACertSecret.DeepCopy()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, recorder: record.NewFakeRecorder(32), @@ -369,7 +369,7 @@ func TestCloneConfigsAndGenerateMachine(t *testing.T) { }, } - fakeClient := newFakeClient(g, cluster.DeepCopy(), kcp.DeepCopy(), genericMachineTemplate.DeepCopy()) + fakeClient := newFakeClient(cluster.DeepCopy(), kcp.DeepCopy(), genericMachineTemplate.DeepCopy()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -453,7 +453,7 @@ func TestCloneConfigsAndGenerateMachineFail(t *testing.T) { }, } - fakeClient := newFakeClient(g, cluster.DeepCopy(), kcp.DeepCopy(), genericMachineTemplate.DeepCopy()) + fakeClient := newFakeClient(cluster.DeepCopy(), kcp.DeepCopy(), genericMachineTemplate.DeepCopy()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -478,7 +478,7 @@ func TestCloneConfigsAndGenerateMachineFail(t *testing.T) { func TestKubeadmControlPlaneReconciler_generateMachine(t *testing.T) { g := NewWithT(t) - fakeClient := newFakeClient(g) + fakeClient := newFakeClient() cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -537,7 +537,7 @@ func TestKubeadmControlPlaneReconciler_generateMachine(t *testing.T) { func TestKubeadmControlPlaneReconciler_generateKubeadmConfig(t *testing.T) { g := NewWithT(t) - fakeClient := newFakeClient(g) + fakeClient := newFakeClient() cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ diff --git a/controlplane/kubeadm/controllers/scale_test.go b/controlplane/kubeadm/controllers/scale_test.go index 810b7bfc832f..e8e0bb9de29e 100644 --- a/controlplane/kubeadm/controllers/scale_test.go +++ b/controlplane/kubeadm/controllers/scale_test.go @@ -28,7 +28,6 @@ import ( "sigs.k8s.io/cluster-api/util/conditions" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" @@ -43,7 +42,7 @@ func TestKubeadmControlPlaneReconciler_initializeControlPlane(t *testing.T) { cluster, kcp, genericMachineTemplate := createClusterWithControlPlane() - fakeClient := newFakeClient(g, cluster.DeepCopy(), kcp.DeepCopy(), genericMachineTemplate.DeepCopy()) + fakeClient := newFakeClient(cluster.DeepCopy(), kcp.DeepCopy(), genericMachineTemplate.DeepCopy()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -104,7 +103,7 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { initObjs = append(initObjs, m.DeepCopy()) } - fakeClient := newFakeClient(g, initObjs...) + fakeClient := newFakeClient(initObjs...) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -141,7 +140,7 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { g := NewWithT(t) - fakeClient := newFakeClient(g, initObjs...) + fakeClient := newFakeClient(initObjs...) fmc := &fakeManagementCluster{ Machines: beforeMachines.DeepCopy(), Workload: fakeWorkloadCluster{}, @@ -181,7 +180,7 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. "one": machine("one"), } setMachineHealthy(machines["one"]) - fakeClient := newFakeClient(g, machines["one"]) + fakeClient := newFakeClient(machines["one"]) r := &KubeadmControlPlaneReconciler{ recorder: record.NewFakeRecorder(32), @@ -222,7 +221,7 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. } setMachineHealthy(machines["two"]) setMachineHealthy(machines["three"]) - fakeClient := newFakeClient(g, machines["one"], machines["two"], machines["three"]) + fakeClient := newFakeClient(machines["one"], machines["two"], machines["three"]) r := &KubeadmControlPlaneReconciler{ recorder: record.NewFakeRecorder(32), @@ -262,7 +261,7 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane_NoError(t *testing. "three": machine("three", withTimestamp(time.Now())), } setMachineHealthy(machines["three"]) - fakeClient := newFakeClient(g, machines["one"], machines["two"], machines["three"]) + fakeClient := newFakeClient(machines["one"], machines["two"], machines["three"]) r := &KubeadmControlPlaneReconciler{ recorder: record.NewFakeRecorder(32), @@ -391,8 +390,6 @@ func TestSelectMachineForScaleDown(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - selectedMachine, err := selectMachineForScaleDown(tc.cp, tc.outDatedMachines) if tc.expectErr { diff --git a/controlplane/kubeadm/controllers/status_test.go b/controlplane/kubeadm/controllers/status_test.go index 446586119b4f..96737ad011a5 100644 --- a/controlplane/kubeadm/controllers/status_test.go +++ b/controlplane/kubeadm/controllers/status_test.go @@ -68,7 +68,7 @@ func TestKubeadmControlPlaneReconciler_updateStatusNoMachines(t *testing.T) { kcp.Default() g.Expect(kcp.ValidateCreate()).To(Succeed()) - fakeClient := newFakeClient(g, kcp.DeepCopy(), cluster.DeepCopy()) + fakeClient := newFakeClient(kcp.DeepCopy(), cluster.DeepCopy()) log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ @@ -133,7 +133,7 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesNotReady(t *testin machines[m.Name] = m } - fakeClient := newFakeClient(g, objs...) + fakeClient := newFakeClient(objs...) log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ @@ -198,7 +198,7 @@ func TestKubeadmControlPlaneReconciler_updateStatusAllMachinesReady(t *testing.T machines[m.Name] = m } - fakeClient := newFakeClient(g, objs...) + fakeClient := newFakeClient(objs...) log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ @@ -272,7 +272,7 @@ func TestKubeadmControlPlaneReconciler_updateStatusMachinesReadyMixed(t *testing m, n := createMachineNodePair("testReady", cluster, kcp, true) objs = append(objs, n, m, kubeadmConfigMap()) machines[m.Name] = m - fakeClient := newFakeClient(g, objs...) + fakeClient := newFakeClient(objs...) log.SetLogger(klogr.New()) r := &KubeadmControlPlaneReconciler{ @@ -344,7 +344,7 @@ func TestKubeadmControlPlaneReconciler_machinesCreatedIsIsTrueEvenWhenTheNodesAr objs = append(objs, n, m) } - fakeClient := newFakeClient(g, objs...) + fakeClient := newFakeClient(objs...) log.SetLogger(klogr.New()) // Set all the machines to `not ready` diff --git a/controlplane/kubeadm/controllers/upgrade_test.go b/controlplane/kubeadm/controllers/upgrade_test.go index 36bdb49f785e..3210261ff39e 100644 --- a/controlplane/kubeadm/controllers/upgrade_test.go +++ b/controlplane/kubeadm/controllers/upgrade_test.go @@ -49,7 +49,7 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleUp(t *testing.T) { kcp.Spec.Replicas = pointer.Int32Ptr(1) setKCPHealthy(kcp) - fakeClient := newFakeClient(g, cluster.DeepCopy(), kcp.DeepCopy(), genericMachineTemplate.DeepCopy()) + fakeClient := newFakeClient(cluster.DeepCopy(), kcp.DeepCopy(), genericMachineTemplate.DeepCopy()) r := &KubeadmControlPlaneReconciler{ Client: fakeClient, @@ -173,7 +173,7 @@ func TestKubeadmControlPlaneReconciler_RolloutStrategy_ScaleDown(t *testing.T) { objs = append(objs, m, cfg) fmc.Machines.Insert(m) } - fakeClient := newFakeClient(g, objs...) + fakeClient := newFakeClient(objs...) fmc.Reader = fakeClient r := &KubeadmControlPlaneReconciler{ Client: fakeClient, diff --git a/controlplane/kubeadm/internal/workload_cluster_coredns_test.go b/controlplane/kubeadm/internal/workload_cluster_coredns_test.go index d1ae76c7dde3..9008e53367c6 100644 --- a/controlplane/kubeadm/internal/workload_cluster_coredns_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_coredns_test.go @@ -26,7 +26,6 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha4" "sigs.k8s.io/controller-runtime/pkg/client" @@ -868,9 +867,6 @@ func TestGetCoreDNSInfo(t *testing.T) { } func TestUpdateCoreDNSImageInfoInKubeadmConfigMap(t *testing.T) { - g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) tests := []struct { name string clusterConfigurationData string @@ -902,7 +898,7 @@ func TestUpdateCoreDNSImageInfoInKubeadmConfigMap(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{ + fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem, diff --git a/controlplane/kubeadm/internal/workload_cluster_etcd_test.go b/controlplane/kubeadm/internal/workload_cluster_etcd_test.go index e7fdc91a66dd..918e018715fc 100644 --- a/controlplane/kubeadm/internal/workload_cluster_etcd_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_etcd_test.go @@ -28,7 +28,6 @@ import ( pb "go.etcd.io/etcd/etcdserver/etcdserverpb" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd" fake2 "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd/fake" @@ -37,9 +36,6 @@ import ( ) func TestUpdateEtcdVersionInKubeadmConfigMap(t *testing.T) { - g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) tests := []struct { name string clusterConfigurationData string @@ -85,7 +81,7 @@ func TestUpdateEtcdVersionInKubeadmConfigMap(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{ + fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem, @@ -136,9 +132,6 @@ func TestRemoveEtcdMemberForMachine(t *testing.T) { cp2.Name = "cp2" cp2.Namespace = "cp2" - g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) tests := []struct { name string machine *clusterv1.Machine @@ -604,9 +597,6 @@ func TestReconcileEtcdMembers(t *testing.T) { } func TestRemoveNodeFromKubeadmConfigMap(t *testing.T) { - g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) tests := []struct { name string apiEndpoint string @@ -652,7 +642,7 @@ func TestRemoveNodeFromKubeadmConfigMap(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{ + fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem, diff --git a/controlplane/kubeadm/internal/workload_cluster_test.go b/controlplane/kubeadm/internal/workload_cluster_test.go index 9b67595bfa7f..680aefd06479 100644 --- a/controlplane/kubeadm/internal/workload_cluster_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_test.go @@ -29,7 +29,6 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1alpha4" "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha4" @@ -38,11 +37,6 @@ import ( ) func TestUpdateKubeProxyImageInfo(t *testing.T) { - g := NewWithT(t) - - scheme := runtime.NewScheme() - g.Expect(appsv1.AddToScheme(scheme)).To(Succeed()) - tests := []struct { name string ds appsv1.DaemonSet @@ -147,7 +141,7 @@ func TestUpdateKubeProxyImageInfo(t *testing.T) { objects := []client.Object{ &tt.ds, } - fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objects...).Build() + fakeClient := fake.NewClientBuilder().WithObjects(objects...).Build() w := &Workload{ Client: fakeClient, } @@ -198,9 +192,6 @@ func TestRemoveMachineFromKubeadmConfigMap(t *testing.T) { kubeadmConfigWithoutClusterStatus := kubeadmConfig.DeepCopy() delete(kubeadmConfigWithoutClusterStatus.Data, clusterStatusKey) - g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) tests := []struct { name string kubernetesVersion semver.Version @@ -266,7 +257,7 @@ func TestRemoveMachineFromKubeadmConfigMap(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.objs...).Build() + fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build() w := &Workload{ Client: fakeClient, } @@ -290,9 +281,6 @@ func TestRemoveMachineFromKubeadmConfigMap(t *testing.T) { } func TestUpdateKubeletConfigMap(t *testing.T) { - g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) tests := []struct { name string version semver.Version @@ -364,7 +352,7 @@ func TestUpdateKubeletConfigMap(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.objs...).Build() + fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build() w := &Workload{ Client: fakeClient, } @@ -387,9 +375,6 @@ func TestUpdateKubeletConfigMap(t *testing.T) { } func TestUpdateUpdateClusterConfigurationInKubeadmConfigMap(t *testing.T) { - g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) tests := []struct { name string version semver.Version @@ -532,7 +517,7 @@ func TestUpdateUpdateClusterConfigurationInKubeadmConfigMap(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.objs...).Build() + fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build() w := &Workload{ Client: fakeClient, @@ -556,9 +541,6 @@ func TestUpdateUpdateClusterConfigurationInKubeadmConfigMap(t *testing.T) { } func TestUpdateUpdateClusterStatusInKubeadmConfigMap(t *testing.T) { - g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) tests := []struct { name string version semver.Version @@ -709,7 +691,7 @@ func TestUpdateUpdateClusterStatusInKubeadmConfigMap(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.objs...).Build() + fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build() w := &Workload{ Client: fakeClient, @@ -733,9 +715,6 @@ func TestUpdateUpdateClusterStatusInKubeadmConfigMap(t *testing.T) { } func TestUpdateKubernetesVersionInKubeadmConfigMap(t *testing.T) { - g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) tests := []struct { name string version semver.Version @@ -753,7 +732,7 @@ func TestUpdateKubernetesVersionInKubeadmConfigMap(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{ + fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem, @@ -781,9 +760,6 @@ func TestUpdateKubernetesVersionInKubeadmConfigMap(t *testing.T) { } func TestUpdateImageRepositoryInKubeadmConfigMap(t *testing.T) { - g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) tests := []struct { name string clusterConfigurationData string @@ -810,7 +786,7 @@ func TestUpdateImageRepositoryInKubeadmConfigMap(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{ + fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem, @@ -838,9 +814,6 @@ func TestUpdateImageRepositoryInKubeadmConfigMap(t *testing.T) { } func TestUpdateApiServerInKubeadmConfigMap(t *testing.T) { - g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) tests := []struct { name string clusterConfigurationData string @@ -887,7 +860,7 @@ func TestUpdateApiServerInKubeadmConfigMap(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{ + fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem, @@ -915,9 +888,6 @@ func TestUpdateApiServerInKubeadmConfigMap(t *testing.T) { } func TestUpdateControllerManagerInKubeadmConfigMap(t *testing.T) { - g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) tests := []struct { name string clusterConfigurationData string @@ -962,7 +932,7 @@ func TestUpdateControllerManagerInKubeadmConfigMap(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{ + fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem, @@ -990,9 +960,6 @@ func TestUpdateControllerManagerInKubeadmConfigMap(t *testing.T) { } func TestUpdateSchedulerInKubeadmConfigMap(t *testing.T) { - g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) tests := []struct { name string clusterConfigurationData string @@ -1036,7 +1003,7 @@ func TestUpdateSchedulerInKubeadmConfigMap(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&corev1.ConfigMap{ + fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem, @@ -1121,9 +1088,7 @@ func TestClusterStatus(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) - fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.objs...).Build() + fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build() w := &Workload{ Client: fakeClient, } diff --git a/exp/addons/api/v1alpha3/clusterresourcesetbinding_types_test.go b/exp/addons/api/v1alpha3/clusterresourcesetbinding_types_test.go index 5d7cd552f335..1391548f5534 100644 --- a/exp/addons/api/v1alpha3/clusterresourcesetbinding_types_test.go +++ b/exp/addons/api/v1alpha3/clusterresourcesetbinding_types_test.go @@ -24,15 +24,9 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" ) func TestIsResourceApplied(t *testing.T) { - g := NewWithT(t) - - scheme := runtime.NewScheme() - g.Expect(AddToScheme(scheme)).To(Succeed()) - resourceRefApplyFailed := ResourceRef{ Name: "applyFailed", Kind: "Secret", @@ -98,11 +92,6 @@ func TestIsResourceApplied(t *testing.T) { } func TestSetResourceBinding(t *testing.T) { - g := NewWithT(t) - - scheme := runtime.NewScheme() - g.Expect(AddToScheme(scheme)).To(Succeed()) - resourceRefApplyFailed := ResourceRef{ Name: "applyFailed", Kind: "Secret", diff --git a/exp/addons/api/v1alpha4/clusterresourcesetbinding_types_test.go b/exp/addons/api/v1alpha4/clusterresourcesetbinding_types_test.go index 7ffe81159763..ceefafaf6a3a 100644 --- a/exp/addons/api/v1alpha4/clusterresourcesetbinding_types_test.go +++ b/exp/addons/api/v1alpha4/clusterresourcesetbinding_types_test.go @@ -24,15 +24,9 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" ) func TestIsResourceApplied(t *testing.T) { - g := NewWithT(t) - - scheme := runtime.NewScheme() - g.Expect(AddToScheme(scheme)).To(Succeed()) - resourceRefApplyFailed := ResourceRef{ Name: "applyFailed", Kind: "Secret", @@ -98,11 +92,6 @@ func TestIsResourceApplied(t *testing.T) { } func TestSetResourceBinding(t *testing.T) { - g := NewWithT(t) - - scheme := runtime.NewScheme() - g.Expect(AddToScheme(scheme)).To(Succeed()) - resourceRefApplyFailed := ResourceRef{ Name: "applyFailed", Kind: "Secret", diff --git a/exp/addons/controllers/clusterresourceset_helpers_test.go b/exp/addons/controllers/clusterresourceset_helpers_test.go index 51cc28a07cef..8d14965fbb75 100644 --- a/exp/addons/controllers/clusterresourceset_helpers_test.go +++ b/exp/addons/controllers/clusterresourceset_helpers_test.go @@ -33,11 +33,6 @@ import ( ) func TestGetorCreateClusterResourceSetBinding(t *testing.T) { - g := NewWithT(t) - - scheme := runtime.NewScheme() - g.Expect(addonsv1.AddToScheme(scheme)).To(Succeed()) - testClusterWithBinding := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test-cluster-with-binding", @@ -78,7 +73,6 @@ func TestGetorCreateClusterResourceSetBinding(t *testing.T) { } c := fake.NewClientBuilder(). - WithScheme(scheme). WithObjects(testClusterResourceSetBinding). Build() r := &ClusterResourceSetReconciler{ @@ -115,11 +109,6 @@ func TestGetorCreateClusterResourceSetBinding(t *testing.T) { } func TestGetSecretFromNamespacedName(t *testing.T) { - g := NewWithT(t) - - scheme := runtime.NewScheme() - g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) - existingSecretName := types.NamespacedName{Name: "my-secret", Namespace: "default"} existingSecret := &corev1.Secret{ TypeMeta: metav1.TypeMeta{Kind: "Secret", APIVersion: "v1"}, @@ -154,7 +143,6 @@ func TestGetSecretFromNamespacedName(t *testing.T) { gs := NewWithT(t) c := fake.NewClientBuilder(). - WithScheme(scheme). WithObjects(existingSecret). Build() diff --git a/exp/controllers/machinepool_controller_noderef_test.go b/exp/controllers/machinepool_controller_noderef_test.go index 354043701ad5..22a3d1187c03 100644 --- a/exp/controllers/machinepool_controller_noderef_test.go +++ b/exp/controllers/machinepool_controller_noderef_test.go @@ -23,21 +23,14 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" ) func TestMachinePoolGetNodeReference(t *testing.T) { - g := NewWithT(t) - - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).Build(), + Client: fake.NewClientBuilder().Build(), recorder: record.NewFakeRecorder(32), } @@ -140,25 +133,25 @@ func TestMachinePoolGetNodeReference(t *testing.T) { for _, test := range testCases { t.Run(test.name, func(t *testing.T) { - gt := NewWithT(t) + g := NewWithT(t) result, err := r.getNodeReferences(ctx, client, test.providerIDList) if test.err == nil { g.Expect(err).To(BeNil()) } else { - gt.Expect(err).NotTo(BeNil()) - gt.Expect(err).To(Equal(test.err), "Expected error %v, got %v", test.err, err) + g.Expect(err).NotTo(BeNil()) + g.Expect(err).To(Equal(test.err), "Expected error %v, got %v", test.err, err) } if test.expected == nil && len(result.references) == 0 { return } - gt.Expect(len(result.references)).To(Equal(len(test.expected.references)), "Expected NodeRef count to be %v, got %v", len(result.references), len(test.expected.references)) + g.Expect(len(result.references)).To(Equal(len(test.expected.references)), "Expected NodeRef count to be %v, got %v", len(result.references), len(test.expected.references)) for n := range test.expected.references { - gt.Expect(result.references[n].Name).To(Equal(test.expected.references[n].Name), "Expected NodeRef's name to be %v, got %v", result.references[n].Name, test.expected.references[n].Name) - gt.Expect(result.references[n].Namespace).To(Equal(test.expected.references[n].Namespace), "Expected NodeRef's namespace to be %v, got %v", result.references[n].Namespace, test.expected.references[n].Namespace) + g.Expect(result.references[n].Name).To(Equal(test.expected.references[n].Name), "Expected NodeRef's name to be %v, got %v", result.references[n].Name, test.expected.references[n].Name) + g.Expect(result.references[n].Namespace).To(Equal(test.expected.references[n].Namespace), "Expected NodeRef's namespace to be %v, got %v", result.references[n].Namespace, test.expected.references[n].Namespace) } }) } diff --git a/exp/controllers/machinepool_controller_phases_test.go b/exp/controllers/machinepool_controller_phases_test.go index 2f499a363f1f..f5bf4068a6a7 100644 --- a/exp/controllers/machinepool_controller_phases_test.go +++ b/exp/controllers/machinepool_controller_phases_test.go @@ -26,7 +26,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha4" @@ -116,7 +115,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { infraConfig := defaultInfra.DeepCopy() r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig).Build(), + Client: fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig).Build(), } res, err := r.reconcile(ctx, defaultCluster, machinepool) @@ -145,7 +144,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { infraConfig := defaultInfra.DeepCopy() r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig).Build(), + Client: fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig).Build(), } res, err := r.reconcile(ctx, defaultCluster, machinepool) @@ -172,7 +171,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { g.Expect(err).NotTo(HaveOccurred()) r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig).Build(), + Client: fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig).Build(), } res, err := r.reconcile(ctx, defaultCluster, machinepool) @@ -215,7 +214,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { machinepool.Status.NodeRefs = []corev1.ObjectReference{{Kind: "Node", Name: "machinepool-test-node"}} r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig).Build(), + Client: fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig).Build(), } res, err := r.reconcile(ctx, defaultCluster, machinepool) @@ -270,7 +269,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { machinepool.Status.NodeRefs = []corev1.ObjectReference{{Kind: "Node", Name: "machinepool-test-node"}} r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig).Build(), + Client: fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig).Build(), } res, err := r.reconcile(ctx, defaultCluster, machinepool) @@ -303,7 +302,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { machinepool.Status.NodeRefs = []corev1.ObjectReference{{Kind: "Node", Name: "machinepool-test-node"}} r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig).Build(), + Client: fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig).Build(), } res, err := r.reconcile(ctx, defaultCluster, machinepool) @@ -343,7 +342,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { machinepool.Status.NodeRefs = []corev1.ObjectReference{{Kind: "Node", Name: "machinepool-test-node"}} r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig).Build(), + Client: fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig).Build(), } res, err := r.reconcile(ctx, defaultCluster, machinepool) @@ -396,7 +395,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { } r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig).Build(), + Client: fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig).Build(), } res, err := r.reconcile(ctx, defaultCluster, machinepool) @@ -454,7 +453,7 @@ func TestReconcileMachinePoolPhases(t *testing.T) { machinepool.SetDeletionTimestamp(&deletionTimestamp) r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig).Build(), + Client: fake.NewClientBuilder().WithObjects(defaultCluster, defaultKubeconfigSecret, machinepool, bootstrapConfig, infraConfig).Build(), } res, err := r.reconcile(ctx, defaultCluster, machinepool) @@ -688,16 +687,13 @@ func TestReconcileMachinePoolBootstrap(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - if tc.machinepool == nil { tc.machinepool = defaultMachinePool.DeepCopy() } bootstrapConfig := &unstructured.Unstructured{Object: tc.bootstrapConfig} r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(tc.machinepool, bootstrapConfig).Build(), + Client: fake.NewClientBuilder().WithObjects(tc.machinepool, bootstrapConfig).Build(), } res, err := r.reconcileBootstrap(ctx, defaultCluster, tc.machinepool) @@ -898,15 +894,13 @@ func TestReconcileMachinePoolInfrastructure(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - if tc.machinepool == nil { tc.machinepool = defaultMachinePool.DeepCopy() } infraConfig := &unstructured.Unstructured{Object: tc.infraConfig} r := &MachinePoolReconciler{ - Client: fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(tc.machinepool, infraConfig).Build(), + Client: fake.NewClientBuilder().WithObjects(tc.machinepool, infraConfig).Build(), } res, err := r.reconcileInfrastructure(ctx, defaultCluster, tc.machinepool) diff --git a/exp/controllers/machinepool_controller_test.go b/exp/controllers/machinepool_controller_test.go index eb919d8bbc1d..3611daf93d89 100644 --- a/exp/controllers/machinepool_controller_test.go +++ b/exp/controllers/machinepool_controller_test.go @@ -110,8 +110,6 @@ func TestMachinePoolFinalizer(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - mr := &MachinePoolReconciler{ Client: helpers.NewFakeClientWithScheme( scheme.Scheme, @@ -220,8 +218,6 @@ func TestMachinePoolOwnerReference(t *testing.T) { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - mr := &MachinePoolReconciler{ Client: helpers.NewFakeClientWithScheme( scheme.Scheme, @@ -415,9 +411,6 @@ func TestReconcileMachinePoolRequest(t *testing.T) { for _, tc := range testCases { t.Run("machinePool should be "+tc.machinePool.Name, func(t *testing.T) { g := NewWithT(t) - - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - clientFake := helpers.NewFakeClientWithScheme( scheme.Scheme, &testCluster, @@ -536,9 +529,6 @@ func TestReconcileMachinePoolDeleteExternal(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { g := NewWithT(t) - - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - objs := []client.Object{testCluster, machinePool} if tc.bootstrapExists { @@ -567,8 +557,6 @@ func TestReconcileMachinePoolDeleteExternal(t *testing.T) { func TestRemoveMachinePoolFinalizerAfterDeleteReconcile(t *testing.T) { g := NewWithT(t) - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - dt := metav1.Now() testCluster := &clusterv1.Cluster{ @@ -847,8 +835,6 @@ func TestMachinePoolConditions(t *testing.T) { tt.beforeFunc(bootstrap, infra, mp, nodes) } - g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed()) - clientFake := helpers.NewFakeClientWithScheme( scheme.Scheme, testCluster, diff --git a/test/infrastructure/docker/controllers/dockermachine_controller_test.go b/test/infrastructure/docker/controllers/dockermachine_controller_test.go index cd7b5030fde5..512b3fd059c3 100644 --- a/test/infrastructure/docker/controllers/dockermachine_controller_test.go +++ b/test/infrastructure/docker/controllers/dockermachine_controller_test.go @@ -23,7 +23,6 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha4" "sigs.k8s.io/controller-runtime/pkg/client" @@ -42,17 +41,6 @@ var ( anotherMachine = newMachine(clusterName, "my-machine-1", anotherDockerMachine) ) -func setupScheme() *runtime.Scheme { - s := runtime.NewScheme() - if err := clusterv1.AddToScheme(s); err != nil { - panic(err) - } - if err := infrav1.AddToScheme(s); err != nil { - panic(err) - } - return s -} - func TestDockerMachineReconciler_DockerClusterToDockerMachines(t *testing.T) { g := NewWithT(t) @@ -64,7 +52,7 @@ func TestDockerMachineReconciler_DockerClusterToDockerMachines(t *testing.T) { // Intentionally omitted newMachine(clusterName, "my-machine-2", nil), } - c := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(objects...).Build() + c := fake.NewClientBuilder().WithObjects(objects...).Build() r := DockerMachineReconciler{ Client: c, } diff --git a/test/infrastructure/docker/controllers/suite_test.go b/test/infrastructure/docker/controllers/suite_test.go new file mode 100644 index 000000000000..085bbe7379e4 --- /dev/null +++ b/test/infrastructure/docker/controllers/suite_test.go @@ -0,0 +1,29 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes/scheme" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1alpha4" +) + +func init() { + utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) + utilruntime.Must(infrav1.AddToScheme(scheme.Scheme)) +} diff --git a/util/collections/machine_filters_test.go b/util/collections/machine_filters_test.go index 7dc45137ebeb..49c67de121f3 100644 --- a/util/collections/machine_filters_test.go +++ b/util/collections/machine_filters_test.go @@ -20,9 +20,7 @@ import ( "testing" "time" - "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api/util/collections" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" . "github.com/onsi/gomega" @@ -33,10 +31,6 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" ) -var ( - ctx = ctrl.SetupSignalHandler() -) - func falseFilter(_ *clusterv1.Machine) bool { return false } @@ -292,9 +286,6 @@ func TestMatchesKubernetesVersion(t *testing.T) { func TestGetFilteredMachinesForCluster(t *testing.T) { g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) - cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Namespace: "my-namespace", @@ -303,7 +294,6 @@ func TestGetFilteredMachinesForCluster(t *testing.T) { } c := fake.NewClientBuilder(). - WithScheme(scheme). WithObjects(cluster, testControlPlaneMachine("first-machine"), testMachine("second-machine"), diff --git a/util/collections/suite_test.go b/util/collections/suite_test.go new file mode 100644 index 000000000000..49fb7c7fa13a --- /dev/null +++ b/util/collections/suite_test.go @@ -0,0 +1,53 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collections_test + +import ( + "fmt" + "os" + "testing" + + "sigs.k8s.io/cluster-api/test/helpers" + ctrl "sigs.k8s.io/controller-runtime" +) + +var ( + testEnv *helpers.TestEnvironment + ctx = ctrl.SetupSignalHandler() +) + +func TestMain(m *testing.M) { + // Bootstrapping test environment + testEnv = helpers.NewTestEnvironment() + go func() { + if err := testEnv.StartManager(ctx); err != nil { + panic(fmt.Sprintf("Failed to start the envtest manager: %v", err)) + } + }() + <-testEnv.Manager.Elected() + testEnv.WaitForWebhooks() + + // Run tests + code := m.Run() + // Tearing down the test environment + if err := testEnv.Stop(); err != nil { + panic(fmt.Sprintf("Failed to stop the envtest: %v", err)) + } + + // Report exit code + os.Exit(code) +} diff --git a/util/conditions/suite_test.go b/util/conditions/suite_test.go new file mode 100644 index 000000000000..bb2ad0cb5b35 --- /dev/null +++ b/util/conditions/suite_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conditions + +import ( + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes/scheme" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" +) + +func init() { + utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) +} diff --git a/util/conditions/unstructured_test.go b/util/conditions/unstructured_test.go index 5158bb37fb45..618022cf1240 100644 --- a/util/conditions/unstructured_test.go +++ b/util/conditions/unstructured_test.go @@ -23,36 +23,32 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/scheme" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" ) func TestUnstructuredGetConditions(t *testing.T) { g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) - g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) - // GetConditions should return conditions from an unstructured object c := &clusterv1.Cluster{} c.SetConditions(conditionList(true1)) u := &unstructured.Unstructured{} - g.Expect(scheme.Convert(c, u, nil)).To(Succeed()) + g.Expect(scheme.Scheme.Convert(c, u, nil)).To(Succeed()) g.Expect(UnstructuredGetter(u).GetConditions()).To(haveSameConditionsOf(conditionList(true1))) // GetConditions should return nil for an unstructured object with empty conditions c = &clusterv1.Cluster{} u = &unstructured.Unstructured{} - g.Expect(scheme.Convert(c, u, nil)).To(Succeed()) + g.Expect(scheme.Scheme.Convert(c, u, nil)).To(Succeed()) g.Expect(UnstructuredGetter(u).GetConditions()).To(BeNil()) // GetConditions should return nil for an unstructured object without conditions e := &corev1.Endpoints{} u = &unstructured.Unstructured{} - g.Expect(scheme.Convert(e, u, nil)).To(Succeed()) + g.Expect(scheme.Scheme.Convert(e, u, nil)).To(Succeed()) g.Expect(UnstructuredGetter(u).GetConditions()).To(BeNil()) @@ -70,7 +66,7 @@ func TestUnstructuredGetConditions(t *testing.T) { }, }} u = &unstructured.Unstructured{} - g.Expect(scheme.Convert(p, u, nil)).To(Succeed()) + g.Expect(scheme.Scheme.Convert(p, u, nil)).To(Succeed()) g.Expect(UnstructuredGetter(u).GetConditions()).To(HaveLen(1)) } @@ -78,14 +74,9 @@ func TestUnstructuredGetConditions(t *testing.T) { func TestUnstructuredSetConditions(t *testing.T) { g := NewWithT(t) - // gets an unstructured with empty conditions - scheme := runtime.NewScheme() - g.Expect(corev1.AddToScheme(scheme)).To(Succeed()) - g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) - c := &clusterv1.Cluster{} u := &unstructured.Unstructured{} - g.Expect(scheme.Convert(c, u, nil)).To(Succeed()) + g.Expect(scheme.Scheme.Convert(c, u, nil)).To(Succeed()) // set conditions conditions := conditionList(true1, falseInfo1) diff --git a/util/conversion/conversion.go b/util/conversion/conversion.go index f2e6a64f311d..3ef3e5d199b2 100644 --- a/util/conversion/conversion.go +++ b/util/conversion/conversion.go @@ -37,6 +37,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/serializer" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/apimachinery/pkg/util/json" + "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/util" @@ -173,6 +174,10 @@ type FuzzTestFuncInput struct { // FuzzTestFunc returns a new testing function to be used in tests to make sure conversions between // the Hub version of an object and an older version aren't lossy. func FuzzTestFunc(input FuzzTestFuncInput) func(*testing.T) { + if input.Scheme == nil { + input.Scheme = scheme.Scheme + } + return func(t *testing.T) { t.Run("spoke-hub-spoke", func(t *testing.T) { g := gomega.NewWithT(t) diff --git a/util/kubeconfig/kubeconfig_test.go b/util/kubeconfig/kubeconfig_test.go index cfedff437489..92063c74bd4f 100644 --- a/util/kubeconfig/kubeconfig_test.go +++ b/util/kubeconfig/kubeconfig_test.go @@ -29,7 +29,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd/api" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" @@ -78,14 +77,6 @@ users: } ) -func setupScheme() *runtime.Scheme { - scheme := runtime.NewScheme() - if err := corev1.AddToScheme(scheme); err != nil { - panic(err) - } - return scheme -} - func TestGetKubeConfigSecret(t *testing.T) { g := NewWithT(t) @@ -95,7 +86,7 @@ func TestGetKubeConfigSecret(t *testing.T) { } // creating a local copy to ensure validSecret.ObjectMeta.ResourceVersion does not get set by fakeClient validSec := validSecret.DeepCopy() - client := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(validSec).Build() + client := fake.NewClientBuilder().WithObjects(validSec).Build() found, err := FromSecret(ctx, client, clusterKey) g.Expect(err).NotTo(HaveOccurred()) @@ -258,7 +249,7 @@ func TestCreateSecretWithOwner(t *testing.T) { }, } - c := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(caSecret).Build() + c := fake.NewClientBuilder().WithObjects(caSecret).Build() owner := metav1.OwnerReference{ Name: "test1", @@ -313,7 +304,7 @@ func TestCreateSecret(t *testing.T) { }, } - c := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(caSecret).Build() + c := fake.NewClientBuilder().WithObjects(caSecret).Build() cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ @@ -408,7 +399,7 @@ func TestRegenerateClientCerts(t *testing.T) { }, } - c := fake.NewClientBuilder().WithScheme(setupScheme()).WithObjects(validSecret, caSecret).Build() + c := fake.NewClientBuilder().WithObjects(validSecret, caSecret).Build() oldConfig, err := clientcmd.Load(validSecret.Data[secret.KubeconfigDataName]) g.Expect(err).NotTo(HaveOccurred()) diff --git a/util/suite_test.go b/util/suite_test.go new file mode 100644 index 000000000000..21ef3db697e8 --- /dev/null +++ b/util/suite_test.go @@ -0,0 +1,32 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes/scheme" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + ctrl "sigs.k8s.io/controller-runtime" +) + +var ( + ctx = ctrl.SetupSignalHandler() +) + +func init() { + utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) +} diff --git a/util/util_test.go b/util/util_test.go index fd389597b2e6..d76265a5ac5c 100644 --- a/util/util_test.go +++ b/util/util_test.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - + "k8s.io/client-go/kubernetes/scheme" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -35,10 +35,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -var ( - ctx = ctrl.SetupSignalHandler() -) - func TestMachineToInfrastructureMapFunc(t *testing.T) { g := NewWithT(t) @@ -396,9 +392,6 @@ func TestIsOwnedByObject(t *testing.T) { func TestGetOwnerClusterSuccessByName(t *testing.T) { g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) - myCluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", @@ -407,7 +400,6 @@ func TestGetOwnerClusterSuccessByName(t *testing.T) { } c := fake.NewClientBuilder(). - WithScheme(scheme). WithObjects(myCluster). Build() @@ -436,9 +428,6 @@ func TestGetOwnerClusterSuccessByName(t *testing.T) { func TestGetOwnerMachineSuccessByName(t *testing.T) { g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) - myMachine := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "my-machine", @@ -447,7 +436,6 @@ func TestGetOwnerMachineSuccessByName(t *testing.T) { } c := fake.NewClientBuilder(). - WithScheme(scheme). WithObjects(myMachine). Build() @@ -470,9 +458,6 @@ func TestGetOwnerMachineSuccessByName(t *testing.T) { func TestGetOwnerMachineSuccessByNameFromDifferentVersion(t *testing.T) { g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) - myMachine := &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "my-machine", @@ -481,7 +466,6 @@ func TestGetOwnerMachineSuccessByNameFromDifferentVersion(t *testing.T) { } c := fake.NewClientBuilder(). - WithScheme(scheme). WithObjects(myMachine). Build() @@ -504,9 +488,6 @@ func TestGetOwnerMachineSuccessByNameFromDifferentVersion(t *testing.T) { func TestGetMachinesForCluster(t *testing.T) { g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) - cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "my-cluster", @@ -544,7 +525,7 @@ func TestGetMachinesForCluster(t *testing.T) { }, } - c := fake.NewClientBuilder().WithScheme(scheme).WithObjects( + c := fake.NewClientBuilder().WithObjects( machine, machineDifferentClusterNameSameNamespace, machineSameClusterNameDifferentNamespace, @@ -662,9 +643,6 @@ func TestEnsureOwnerRef(t *testing.T) { func TestClusterToObjectsMapper(t *testing.T) { g := NewWithT(t) - scheme := runtime.NewScheme() - g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) - cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ Name: "test1", @@ -747,8 +725,8 @@ func TestClusterToObjectsMapper(t *testing.T) { for _, tc := range table { tc.objects = append(tc.objects, cluster) - client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tc.objects...).Build() - f, err := ClusterToObjectsMapper(client, tc.input, scheme) + client := fake.NewClientBuilder().WithObjects(tc.objects...).Build() + f, err := ClusterToObjectsMapper(client, tc.input, scheme.Scheme) g.Expect(err != nil, err).To(Equal(tc.expectError)) g.Expect(f(cluster)).To(ConsistOf(tc.output)) }