diff --git a/test/e2e/aks.go b/test/e2e/aks.go index f75970b36ec..52f573cac0d 100644 --- a/test/e2e/aks.go +++ b/test/e2e/aks.go @@ -30,10 +30,9 @@ import ( "github.com/pkg/errors" "golang.org/x/mod/semver" "k8s.io/apimachinery/pkg/types" - infraexpv1 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/controller-runtime/pkg/client" @@ -126,8 +125,8 @@ type GetAzureManagedControlPlaneByClusterInput struct { // GetAzureManagedControlPlaneByCluster returns the AzureManagedControlPlane object for a cluster. // Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so // it is necessary to ensure this is already happened before calling it. -func GetAzureManagedControlPlaneByCluster(ctx context.Context, input GetAzureManagedControlPlaneByClusterInput) *infraexpv1.AzureManagedControlPlane { - controlPlaneList := &infraexpv1.AzureManagedControlPlaneList{} +func GetAzureManagedControlPlaneByCluster(ctx context.Context, input GetAzureManagedControlPlaneByClusterInput) *infrav1exp.AzureManagedControlPlane { + controlPlaneList := &infrav1exp.AzureManagedControlPlaneList{} Expect(input.Lister.List(ctx, controlPlaneList, byClusterOptions(input.ClusterName, input.Namespace)...)).To(Succeed(), "Failed to list AzureManagedControlPlane object for Cluster %s/%s", input.Namespace, input.ClusterName) Expect(len(controlPlaneList.Items)).NotTo(BeNumerically(">", 1), "Cluster %s/%s should not have more than 1 AzureManagedControlPlane object", input.Namespace, input.ClusterName) if len(controlPlaneList.Items) == 1 { @@ -140,7 +139,7 @@ func GetAzureManagedControlPlaneByCluster(ctx context.Context, input GetAzureMan type WaitForControlPlaneAndMachinesReadyInput struct { Lister framework.Lister Getter framework.Getter - ControlPlane *infraexpv1.AzureManagedControlPlane + ControlPlane *infrav1exp.AzureManagedControlPlane ClusterName string Namespace string } @@ -166,7 +165,7 @@ const ( ) // value returns the integer equivalent of controlPlaneReplicas -func (r controlPlaneReplicas) value(mp *clusterv1exp.MachinePool) int { +func (r controlPlaneReplicas) value(mp *expv1.MachinePool) int { switch r { case atLeastOne: return 1 @@ -179,7 +178,6 @@ func (r controlPlaneReplicas) value(mp *clusterv1exp.MachinePool) int { // WaitForAKSSystemNodePoolMachinesToExist waits for a certain number of machines in the "system" node pool to exist. func WaitForAKSSystemNodePoolMachinesToExist(ctx context.Context, input WaitForControlPlaneAndMachinesReadyInput, minReplicas controlPlaneReplicas, intervals ...interface{}) { Eventually(func() bool { - opt1 := client.InNamespace(input.Namespace) opt2 := client.MatchingLabels(map[string]string{ infrav1exp.LabelAgentPoolMode: string(infrav1exp.NodePoolModeSystem), @@ -200,7 +198,7 @@ func WaitForAKSSystemNodePoolMachinesToExist(ctx context.Context, input WaitForC continue } - ownerMachinePool := &clusterv1exp.MachinePool{} + ownerMachinePool := &expv1.MachinePool{} if err := input.Getter.Get(ctx, types.NamespacedName{Namespace: input.Namespace, Name: ref.Name}, ownerMachinePool); err != nil { LogWarningf("Failed to get machinePool: %+v", err) @@ -213,7 +211,6 @@ func WaitForAKSSystemNodePoolMachinesToExist(ctx context.Context, input WaitForC } return false - }, intervals...).Should(Equal(true), "System machine pools not detected") } diff --git a/test/e2e/azure_clusterproxy.go b/test/e2e/azure_clusterproxy.go index 0394e7c639d..0684a328c4a 100644 --- a/test/e2e/azure_clusterproxy.go +++ b/test/e2e/azure_clusterproxy.go @@ -42,8 +42,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" - clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -69,8 +69,8 @@ func initScheme() *runtime.Scheme { scheme := runtime.NewScheme() framework.TryAddDefaultSchemes(scheme) Expect(infrav1.AddToScheme(scheme)).To(Succeed()) + Expect(infrav1exp.AddToScheme(scheme)).To(Succeed()) Expect(expv1.AddToScheme(scheme)).To(Succeed()) - Expect(clusterv1exp.AddToScheme(scheme)).To(Succeed()) // Add aadpodidentity v1 to the scheme. aadPodIdentityGroupVersion := schema.GroupVersion{Group: aadpodv1.GroupName, Version: "v1"} scheme.AddKnownTypes(aadPodIdentityGroupVersion, diff --git a/test/e2e/azure_csidriver.go b/test/e2e/azure_csidriver.go index cd2f0027287..4794e77cecb 100644 --- a/test/e2e/azure_csidriver.go +++ b/test/e2e/azure_csidriver.go @@ -41,8 +41,8 @@ type AzureDiskCSISpecInput struct { // AzureDiskCSISpec implements a test that verifies out of tree azure disk csi driver // can be used to create a PVC that is usable by a pod. -func AzureDiskCSISpec(ctx context.Context, inputGetter func() AzureDiskCSISpecInput){ - specName := "azurediskcsi-driver" +func AzureDiskCSISpec(ctx context.Context, inputGetter func() AzureDiskCSISpecInput) { + specName := "azurediskcsi-driver" input := inputGetter() Expect(input.BootstrapClusterProxy).NotTo(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName) @@ -56,7 +56,7 @@ func AzureDiskCSISpec(ctx context.Context, inputGetter func() AzureDiskCSISpecIn e2e_sc.Create("managedhdd").WithWaitForFirstConsumer().DeployStorageClass(clientset) By("Deploying persistent volume claim") - b,err := e2e_pvc.Create("dd-managed-hdd-5g", "5Gi") + b, err := e2e_pvc.Create("dd-managed-hdd-5g", "5Gi") Expect(err).To(BeNil()) b.DeployPVC(clientset) diff --git a/test/e2e/azure_lb.go b/test/e2e/azure_lb.go index 21c2d707441..df1c03a69b8 100644 --- a/test/e2e/azure_lb.go +++ b/test/e2e/azure_lb.go @@ -25,23 +25,20 @@ import ( "net" "time" - "sigs.k8s.io/cluster-api/util" - + "github.com/hashicorp/go-retryablehttp" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - k8snet "k8s.io/utils/net" - - "github.com/hashicorp/go-retryablehttp" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" - "sigs.k8s.io/cluster-api/test/framework" - + k8snet "k8s.io/utils/net" deploymentBuilder "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/deployment" "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/job" "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/node" "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/windows" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/util" ) // AzureLBSpecInput is the input for AzureLBSpec. diff --git a/test/e2e/azure_logcollector.go b/test/e2e/azure_logcollector.go index 147200cceca..b1f14e09cca 100644 --- a/test/e2e/azure_logcollector.go +++ b/test/e2e/azure_logcollector.go @@ -27,20 +27,17 @@ import ( "strings" "time" - apierrors "k8s.io/apimachinery/pkg/api/errors" - - expv1alpha4 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/test/framework" - - "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" - "sigs.k8s.io/cluster-api-provider-azure/azure" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-11-01/compute" autorest "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/azure/auth" "github.com/pkg/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + "sigs.k8s.io/cluster-api-provider-azure/azure" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/controller-runtime/pkg/client" kinderrors "sigs.k8s.io/kind/pkg/errors" @@ -176,46 +173,46 @@ func getHostname(m *clusterv1.Machine, isWindows bool) string { return hostname } -func getAzureCluster(ctx context.Context, managementClusterClient client.Client, namespace, name string) (*v1beta1.AzureCluster, error) { +func getAzureCluster(ctx context.Context, managementClusterClient client.Client, namespace, name string) (*infrav1.AzureCluster, error) { key := client.ObjectKey{ Namespace: namespace, Name: name, } - azCluster := &v1beta1.AzureCluster{} + azCluster := &infrav1.AzureCluster{} err := managementClusterClient.Get(context.TODO(), key, azCluster) return azCluster, err } -func getAzureMachine(ctx context.Context, managementClusterClient client.Client, m *clusterv1.Machine) (*v1beta1.AzureMachine, error) { +func getAzureMachine(ctx context.Context, managementClusterClient client.Client, m *clusterv1.Machine) (*infrav1.AzureMachine, error) { key := client.ObjectKey{ Namespace: m.Spec.InfrastructureRef.Namespace, Name: m.Spec.InfrastructureRef.Name, } - azMachine := &v1beta1.AzureMachine{} + azMachine := &infrav1.AzureMachine{} err := managementClusterClient.Get(ctx, key, azMachine) return azMachine, err } -func getAzureMachinePool(ctx context.Context, managementClusterClient client.Client, mp *expv1.MachinePool) (*expv1alpha4.AzureMachinePool, error) { +func getAzureMachinePool(ctx context.Context, managementClusterClient client.Client, mp *expv1.MachinePool) (*infrav1exp.AzureMachinePool, error) { key := client.ObjectKey{ Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace, Name: mp.Spec.Template.Spec.InfrastructureRef.Name, } - azMachinePool := &expv1alpha4.AzureMachinePool{} + azMachinePool := &infrav1exp.AzureMachinePool{} err := managementClusterClient.Get(ctx, key, azMachinePool) return azMachinePool, err } -func getAzureManagedMachinePool(ctx context.Context, managementClusterClient client.Client, mp *expv1.MachinePool) (*expv1alpha4.AzureManagedMachinePool, error) { +func getAzureManagedMachinePool(ctx context.Context, managementClusterClient client.Client, mp *expv1.MachinePool) (*infrav1exp.AzureManagedMachinePool, error) { key := client.ObjectKey{ Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace, Name: mp.Spec.Template.Spec.InfrastructureRef.Name, } - azManagedMachinePool := &expv1alpha4.AzureManagedMachinePool{} + azManagedMachinePool := &infrav1exp.AzureManagedMachinePool{} err := managementClusterClient.Get(ctx, key, azManagedMachinePool) return azManagedMachinePool, err } @@ -357,7 +354,7 @@ func windowsNetworkLogs(execToPathFn func(outputFileName string, command string, } // collectVMBootLog collects boot logs of the vm by using azure boot diagnostics. -func collectVMBootLog(ctx context.Context, am *v1beta1.AzureMachine, outputPath string) error { +func collectVMBootLog(ctx context.Context, am *infrav1.AzureMachine, outputPath string) error { Logf("Collecting boot logs for AzureMachine %s\n", am.GetName()) if am == nil || am.Spec.ProviderID == nil { diff --git a/test/e2e/azure_machinepool_drain.go b/test/e2e/azure_machinepool_drain.go index 4279560e8c0..3a51db460a9 100644 --- a/test/e2e/azure_machinepool_drain.go +++ b/test/e2e/azure_machinepool_drain.go @@ -33,13 +33,12 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/kubernetes" "sigs.k8s.io/cluster-api-provider-azure/azure" - "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" deployments "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/deployment" "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/node" "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/windows" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/patch" @@ -87,19 +86,18 @@ func AzureMachinePoolDrainSpec(ctx context.Context, inputGetter func() AzureMach Expect(clientset).NotTo(BeNil()) By(fmt.Sprintf("listing AzureMachinePools in the cluster in namespace %s", input.Namespace.Name)) - ampList := &v1beta1.AzureMachinePoolList{} + ampList := &infrav1exp.AzureMachinePoolList{} Expect(bootstrapClusterProxy.GetClient().List(ctx, ampList, client.InNamespace(input.Namespace.Name), client.MatchingLabels(labels))).To(Succeed()) for _, amp := range ampList.Items { testMachinePoolCordonAndDrain(ctx, bootstrapClusterProxy, workloadClusterProxy, amp) } - } -func testMachinePoolCordonAndDrain(ctx context.Context, mgmtClusterProxy, workloadClusterProxy framework.ClusterProxy, amp v1beta1.AzureMachinePool) { +func testMachinePoolCordonAndDrain(ctx context.Context, mgmtClusterProxy, workloadClusterProxy framework.ClusterProxy, amp infrav1exp.AzureMachinePool) { var ( isWindows = amp.Spec.Template.OSDisk.OSType == azure.WindowsOS clientset = workloadClusterProxy.GetClientSet() - owningMachinePool = func() *clusterv1exp.MachinePool { + owningMachinePool = func() *expv1.MachinePool { mp, err := getOwnerMachinePool(ctx, mgmtClusterProxy.GetClient(), amp.ObjectMeta) Expect(err).NotTo(HaveOccurred()) return mp @@ -169,7 +167,6 @@ func testMachinePoolCordonAndDrain(ctx context.Context, mgmtClusterProxy, worklo // TODO setup a watcher to validate expected 2nd order drain outcomes // https://github.com/kubernetes-sigs/cluster-api-provider-azure/issues/2159 - } func labelNodesWithMachinePoolName(ctx context.Context, workloadClient client.Client, mpName string, ampms []infrav1exp.AzureMachinePoolMachine) { @@ -211,15 +208,15 @@ func getAzureMachinePoolMachines(ctx context.Context, mgmtClusterProxy, workload } // getOwnerMachinePool returns the name of MachinePool object owning the current resource. -func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1exp.MachinePool, error) { +func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*expv1.MachinePool, error) { for _, ref := range obj.OwnerReferences { gv, err := schema.ParseGroupVersion(ref.APIVersion) if err != nil { return nil, err } - if ref.Kind == "MachinePool" && gv.Group == clusterv1exp.GroupVersion.Group { - mp := &clusterv1exp.MachinePool{} + if ref.Kind == "MachinePool" && gv.Group == expv1.GroupVersion.Group { + mp := &expv1.MachinePool{} Eventually(func() error { err := c.Get(ctx, client.ObjectKey{ Name: ref.Name, diff --git a/test/e2e/azure_net_pol.go b/test/e2e/azure_net_pol.go index bfe537b1e7b..8bcb78346ac 100644 --- a/test/e2e/azure_net_pol.go +++ b/test/e2e/azure_net_pol.go @@ -29,14 +29,12 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - - deploymentBuilder "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/deployment" - e2e_namespace "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/namespace" - e2e_networkpolicy "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/networkpolicy" - corev1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + deploymentBuilder "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/deployment" + e2e_namespace "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/namespace" + e2e_networkpolicy "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/networkpolicy" "sigs.k8s.io/cluster-api/test/framework" ) @@ -264,5 +262,4 @@ func AzureNetPolSpec(ctx context.Context, inputGetter func() AzureNetPolSpecInpu By("Ensuring we have ingress access from role:frontend pods in development namespace") e2e_networkpolicy.EnsureConnectivityResultBetweenPods(clientset, config, frontendDevPods, backendPods, true) - } diff --git a/test/e2e/azure_selfhosted.go b/test/e2e/azure_selfhosted.go index 4ce233acace..bcb2b9362b2 100644 --- a/test/e2e/azure_selfhosted.go +++ b/test/e2e/azure_selfhosted.go @@ -27,8 +27,6 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "sigs.k8s.io/controller-runtime/pkg/client" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/pointer" @@ -38,6 +36,7 @@ import ( "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/controller-runtime/pkg/client" ) // SelfHostedSpecInput is the input for SelfHostedSpec. diff --git a/test/e2e/azure_test.go b/test/e2e/azure_test.go index 8bbdef207f3..e73712b7779 100644 --- a/test/e2e/azure_test.go +++ b/test/e2e/azure_test.go @@ -26,18 +26,17 @@ import ( "path/filepath" "time" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "sigs.k8s.io/cluster-api/util" - . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util" ) var _ = Describe("Workload cluster creation", func() { diff --git a/test/e2e/common.go b/test/e2e/common.go index 6037616b6bb..d30c4f0e833 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -249,7 +249,6 @@ func EnsureControlPlaneInitialized(ctx context.Context, input clusterctl.ApplyCl controlPlane := discoveryAndWaitForControlPlaneInitialized(ctx, input, result) InstallAzureDiskCSIDriverHelmChart(ctx, input) result.ControlPlane = controlPlane - } func discoveryAndWaitForControlPlaneInitialized(ctx context.Context, input clusterctl.ApplyClusterTemplateAndWaitInput, result *clusterctl.ApplyClusterTemplateAndWaitResult) *kubeadmv1.KubeadmControlPlane { diff --git a/test/e2e/conformance_test.go b/test/e2e/conformance_test.go index 32be44e43e5..b82d2d180e1 100644 --- a/test/e2e/conformance_test.go +++ b/test/e2e/conformance_test.go @@ -28,17 +28,13 @@ import ( "strings" "github.com/blang/semver" - - "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/node" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" "k8s.io/utils/pointer" + "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/node" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/test/framework/kubetest" @@ -189,7 +185,7 @@ var _ = Describe("Conformance Tests", func() { if isWindows(kubetestConfigFilePath) { // Windows requires a taint on control nodes nodes since not all conformance tests have ability to run - options := v1.ListOptions{ + options := metav1.ListOptions{ LabelSelector: "kubernetes.io/os=linux", } diff --git a/test/e2e/helpers.go b/test/e2e/helpers.go index 7bfc631125d..cdd2dcec783 100644 --- a/test/e2e/helpers.go +++ b/test/e2e/helpers.go @@ -36,8 +36,6 @@ import ( "text/tabwriter" "time" - capi_e2e "sigs.k8s.io/cluster-api/test/e2e" - "github.com/Azure/azure-sdk-for-go/profiles/2020-09-01/compute/mgmt/compute" "github.com/Azure/go-autorest/autorest/azure/auth" "github.com/blang/semver" @@ -46,6 +44,11 @@ import ( . "github.com/onsi/gomega" "github.com/pkg/errors" "golang.org/x/crypto/ssh" + helmAction "helm.sh/helm/v3/pkg/action" + helmLoader "helm.sh/helm/v3/pkg/chart/loader" + helmCli "helm.sh/helm/v3/pkg/cli" + helmVals "helm.sh/helm/v3/pkg/cli/values" + helmGetter "helm.sh/helm/v3/pkg/getter" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" @@ -55,23 +58,18 @@ import ( typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" typedbatchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" + infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1" "sigs.k8s.io/cluster-api-provider-azure/azure" - expv1beta1 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" + infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/noderefutil" - clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + capi_e2e "sigs.k8s.io/cluster-api/test/e2e" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/test/framework/kubernetesversions" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/controller-runtime/pkg/client" - - helmAction "helm.sh/helm/v3/pkg/action" - helmLoader "helm.sh/helm/v3/pkg/chart/loader" - helmCli "helm.sh/helm/v3/pkg/cli" - helmVals "helm.sh/helm/v3/pkg/cli/values" - helmGetter "helm.sh/helm/v3/pkg/getter" ) const ( @@ -526,12 +524,12 @@ func getMachinesInCluster(ctx context.Context, c framework.Lister, namespace, na } // getMachinePoolsInCluster returns a list of all machine pools in the given cluster. -func getMachinePoolsInCluster(ctx context.Context, c framework.Lister, namespace, name string) (*clusterv1exp.MachinePoolList, error) { +func getMachinePoolsInCluster(ctx context.Context, c framework.Lister, namespace, name string) (*expv1.MachinePoolList, error) { if name == "" { return nil, nil } - machinePoolList := &clusterv1exp.MachinePoolList{} + machinePoolList := &expv1.MachinePoolList{} labels := map[string]string{clusterv1.ClusterLabelName: name} if err := c.List(ctx, machinePoolList, client.InNamespace(namespace), client.MatchingLabels(labels)); err != nil { @@ -541,11 +539,11 @@ func getMachinePoolsInCluster(ctx context.Context, c framework.Lister, namespace return machinePoolList, nil } -func isAzureMachineWindows(am *v1beta1.AzureMachine) bool { +func isAzureMachineWindows(am *infrav1.AzureMachine) bool { return am.Spec.OSDisk.OSType == azure.WindowsOS } -func isAzureMachinePoolWindows(amp *expv1beta1.AzureMachinePool) bool { +func isAzureMachinePoolWindows(amp *infrav1exp.AzureMachinePool) bool { return amp.Spec.Template.OSDisk.OSType == azure.WindowsOS } diff --git a/test/e2e/kubernetes/deployment/deployment.go b/test/e2e/kubernetes/deployment/deployment.go index 38de238874d..6bc717ae140 100644 --- a/test/e2e/kubernetes/deployment/deployment.go +++ b/test/e2e/kubernetes/deployment/deployment.go @@ -25,9 +25,6 @@ import ( "log" "time" - typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - . "github.com/onsi/gomega" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -35,7 +32,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/kubernetes" + typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" "k8s.io/utils/pointer" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) const ( @@ -132,11 +131,11 @@ func (d *Builder) AddContainerPort(name, portName string, portNumber int32, prot } } -func (d *Builder)AddPVC(pvcName string) *Builder { - volumes:= []corev1.Volume{ +func (d *Builder) AddPVC(pvcName string) *Builder { + volumes := []corev1.Volume{ { Name: "managed", - VolumeSource:corev1.VolumeSource{ + VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: pvcName, }, diff --git a/test/e2e/kubernetes/networkpolicy/networkpolicy.go b/test/e2e/kubernetes/networkpolicy/networkpolicy.go index 94ce73b058b..829065cf3a1 100644 --- a/test/e2e/kubernetes/networkpolicy/networkpolicy.go +++ b/test/e2e/kubernetes/networkpolicy/networkpolicy.go @@ -28,15 +28,13 @@ import ( "time" . "github.com/onsi/gomega" - - e2e_pod "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/pod" - - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/kubectl/pkg/scheme" + e2e_pod "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/pod" ) const ( @@ -93,13 +91,13 @@ func DeleteNetworkPolicy(ctx context.Context, clientset *kubernetes.Clientset, n }, networkPolicyOperationTimeout, networkPolicyOperationSleepBetweenRetries).Should(Succeed()) } -func EnsureOutboundInternetAccess(clientset *kubernetes.Clientset, config *restclient.Config, pods []v1.Pod) { +func EnsureOutboundInternetAccess(clientset *kubernetes.Clientset, config *restclient.Config, pods []corev1.Pod) { for _, pod := range pods { CheckOutboundConnection(clientset, config, pod) } } -func EnsureConnectivityResultBetweenPods(clientset *kubernetes.Clientset, config *restclient.Config, fromPods []v1.Pod, toPods []v1.Pod, shouldHaveConnection bool) { +func EnsureConnectivityResultBetweenPods(clientset *kubernetes.Clientset, config *restclient.Config, fromPods []corev1.Pod, toPods []corev1.Pod, shouldHaveConnection bool) { for _, fromPod := range fromPods { for _, toPod := range toPods { command := []string{"curl", "-S", "-s", "-o", "/dev/null", toPod.Status.PodIP} @@ -108,7 +106,7 @@ func EnsureConnectivityResultBetweenPods(clientset *kubernetes.Clientset, config } } -func CheckOutboundConnection(clientset *kubernetes.Clientset, config *restclient.Config, pod v1.Pod) { +func CheckOutboundConnection(clientset *kubernetes.Clientset, config *restclient.Config, pod corev1.Pod) { command := []string{"curl", "-S", "-s", "-o", "/dev/null", "www.bing.com"} e2e_pod.Exec(clientset, config, pod, command, true) } diff --git a/test/e2e/kubernetes/pod/pod.go b/test/e2e/kubernetes/pod/pod.go index e4441c0e6bb..6c33012ce1b 100644 --- a/test/e2e/kubernetes/pod/pod.go +++ b/test/e2e/kubernetes/pod/pod.go @@ -25,7 +25,7 @@ import ( "time" . "github.com/onsi/gomega" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" @@ -37,10 +37,10 @@ const ( podExecOperationSleepBetweenRetries = 3 * time.Second ) -func Exec(clientset *kubernetes.Clientset, config *restclient.Config, pod v1.Pod, command []string, testSuccess bool) error { +func Exec(clientset *kubernetes.Clientset, config *restclient.Config, pod corev1.Pod, command []string, testSuccess bool) error { req := clientset.CoreV1().RESTClient().Post().Resource("pods").Name(pod.GetName()). Namespace(pod.GetNamespace()).SubResource("exec") - option := &v1.PodExecOptions{ + option := &corev1.PodExecOptions{ Command: command, Stdin: false, Stdout: true, diff --git a/test/e2e/kubernetes/pvc/pvc.go b/test/e2e/kubernetes/pvc/pvc.go index 27b4ed6cbb0..355631b55f4 100644 --- a/test/e2e/kubernetes/pvc/pvc.go +++ b/test/e2e/kubernetes/pvc/pvc.go @@ -49,23 +49,23 @@ spec: resources: requests: storage: 5Gi - */ +*/ type Builder struct { pvc *corev1.PersistentVolumeClaim } -func Create(pvcName string, storageRequest string) (*Builder,error) { - qunatity,err:= resource.ParseQuantity("5Gi") - if err!=nil{ - return nil,err +func Create(pvcName string, storageRequest string) (*Builder, error) { + qunatity, err := resource.ParseQuantity("5Gi") + if err != nil { + return nil, err } - pvcBuilder:=&Builder{ + pvcBuilder := &Builder{ pvc: &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "dd-managed-hdd-5g", Annotations: map[string]string{ - "volume.beta.kubernetes.io/storage-class":"managedhdd", + "volume.beta.kubernetes.io/storage-class": "managedhdd", }, }, Spec: corev1.PersistentVolumeClaimSpec{ @@ -74,18 +74,18 @@ func Create(pvcName string, storageRequest string) (*Builder,error) { }, Resources: corev1.ResourceRequirements{ Requests: map[corev1.ResourceName]resource.Quantity{ - corev1.ResourceStorage : qunatity, + corev1.ResourceStorage: qunatity, }, }, }, }, } - return pvcBuilder,nil + return pvcBuilder, nil } -func (b *Builder)DeployPVC(clientset *kubernetes.Clientset) error { +func (b *Builder) DeployPVC(clientset *kubernetes.Clientset) error { Eventually(func() error { - _,err := clientset.CoreV1().PersistentVolumeClaims("default").Create(context.TODO(),b.pvc,metav1.CreateOptions{}) + _, err := clientset.CoreV1().PersistentVolumeClaims("default").Create(context.TODO(), b.pvc, metav1.CreateOptions{}) if err != nil { log.Printf("Error trying to deploy storage class %s in namespace %s:%s\n", b.pvc.Name, b.pvc.ObjectMeta.Namespace, err.Error()) return err @@ -94,4 +94,4 @@ func (b *Builder)DeployPVC(clientset *kubernetes.Clientset) error { }, pvcOperationTimeout, pvcOperationSleepBetweenRetries).Should(Succeed()) return nil -} \ No newline at end of file +} diff --git a/test/e2e/kubernetes/storageclass/storageclass.go b/test/e2e/kubernetes/storageclass/storageclass.go index 0665166e924..f025bed9d0d 100644 --- a/test/e2e/kubernetes/storageclass/storageclass.go +++ b/test/e2e/kubernetes/storageclass/storageclass.go @@ -40,12 +40,12 @@ volumeBindingMode: WaitForFirstConsumer parameters: storageaccounttype: Standard_LRS kind: Managed - */ +*/ const ( scOperationTimeout = 30 * time.Second scOperationSleepBetweenRetries = 3 * time.Second - AzureDiskProvisioner = "kubernetes.io/azure-disk" + AzureDiskProvisioner = "kubernetes.io/azure-disk" ) // Builder provides a helper interface for building storage class manifest @@ -55,15 +55,15 @@ type Builder struct { // Create creates a storage class builder manifest func Create(scName string) *Builder { - scBuilder:= &Builder{ + scBuilder := &Builder{ sc: &storagev1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ Name: scName, }, Provisioner: AzureDiskProvisioner, Parameters: map[string]string{ - "storageaccounttype":"Standard_LRS", - "kind": "managed", + "storageaccounttype": "Standard_LRS", + "kind": "managed", }, }, } @@ -71,16 +71,16 @@ func Create(scName string) *Builder { } // WithWaitForFirstConsumer sets volume binding on first consumer -func (d *Builder)WithWaitForFirstConsumer() *Builder { - volumeBinding:= storagev1.VolumeBindingWaitForFirstConsumer +func (d *Builder) WithWaitForFirstConsumer() *Builder { + volumeBinding := storagev1.VolumeBindingWaitForFirstConsumer d.sc.VolumeBindingMode = &volumeBinding return d } // DeployStorageClass creates a storage class on the k8s cluster -func (d *Builder)DeployStorageClass(clientset *kubernetes.Clientset) { +func (d *Builder) DeployStorageClass(clientset *kubernetes.Clientset) { Eventually(func() error { - _,err := clientset.StorageV1().StorageClasses().Create(context.TODO(),d.sc,metav1.CreateOptions{}) + _, err := clientset.StorageV1().StorageClasses().Create(context.TODO(), d.sc, metav1.CreateOptions{}) if err != nil { log.Printf("Error trying to deploy storage class %s in namespace %s:%s\n", d.sc.Name, d.sc.ObjectMeta.Namespace, err.Error()) return err diff --git a/test/logger.go b/test/logger.go index 8e825a92e3f..e7ffe1bb75b 100644 --- a/test/logger.go +++ b/test/logger.go @@ -28,7 +28,6 @@ import ( "path/filepath" . "github.com/onsi/gomega" - "sigs.k8s.io/cluster-api-provider-azure/test/e2e" "sigs.k8s.io/cluster-api/test/framework" )