Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Standardize go imports in test/e2e files #2574

Merged
merged 1 commit into from
Aug 24, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 6 additions & 9 deletions test/e2e/aks.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,9 @@ import (
"github.com/pkg/errors"
"golang.org/x/mod/semver"
"k8s.io/apimachinery/pkg/types"
infraexpv1 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1"
infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -126,8 +125,8 @@ type GetAzureManagedControlPlaneByClusterInput struct {
// GetAzureManagedControlPlaneByCluster returns the AzureManagedControlPlane object for a cluster.
// Important! this method relies on labels that are created by the CAPI controllers during the first reconciliation, so
// it is necessary to ensure this is already happened before calling it.
func GetAzureManagedControlPlaneByCluster(ctx context.Context, input GetAzureManagedControlPlaneByClusterInput) *infraexpv1.AzureManagedControlPlane {
controlPlaneList := &infraexpv1.AzureManagedControlPlaneList{}
func GetAzureManagedControlPlaneByCluster(ctx context.Context, input GetAzureManagedControlPlaneByClusterInput) *infrav1exp.AzureManagedControlPlane {
controlPlaneList := &infrav1exp.AzureManagedControlPlaneList{}
Expect(input.Lister.List(ctx, controlPlaneList, byClusterOptions(input.ClusterName, input.Namespace)...)).To(Succeed(), "Failed to list AzureManagedControlPlane object for Cluster %s/%s", input.Namespace, input.ClusterName)
Expect(len(controlPlaneList.Items)).NotTo(BeNumerically(">", 1), "Cluster %s/%s should not have more than 1 AzureManagedControlPlane object", input.Namespace, input.ClusterName)
if len(controlPlaneList.Items) == 1 {
Expand All @@ -140,7 +139,7 @@ func GetAzureManagedControlPlaneByCluster(ctx context.Context, input GetAzureMan
type WaitForControlPlaneAndMachinesReadyInput struct {
Lister framework.Lister
Getter framework.Getter
ControlPlane *infraexpv1.AzureManagedControlPlane
ControlPlane *infrav1exp.AzureManagedControlPlane
ClusterName string
Namespace string
}
Expand All @@ -166,7 +165,7 @@ const (
)

// value returns the integer equivalent of controlPlaneReplicas
func (r controlPlaneReplicas) value(mp *clusterv1exp.MachinePool) int {
func (r controlPlaneReplicas) value(mp *expv1.MachinePool) int {
switch r {
case atLeastOne:
return 1
Expand All @@ -179,7 +178,6 @@ func (r controlPlaneReplicas) value(mp *clusterv1exp.MachinePool) int {
// WaitForAKSSystemNodePoolMachinesToExist waits for a certain number of machines in the "system" node pool to exist.
func WaitForAKSSystemNodePoolMachinesToExist(ctx context.Context, input WaitForControlPlaneAndMachinesReadyInput, minReplicas controlPlaneReplicas, intervals ...interface{}) {
Eventually(func() bool {

opt1 := client.InNamespace(input.Namespace)
opt2 := client.MatchingLabels(map[string]string{
infrav1exp.LabelAgentPoolMode: string(infrav1exp.NodePoolModeSystem),
Expand All @@ -200,7 +198,7 @@ func WaitForAKSSystemNodePoolMachinesToExist(ctx context.Context, input WaitForC
continue
}

ownerMachinePool := &clusterv1exp.MachinePool{}
ownerMachinePool := &expv1.MachinePool{}
if err := input.Getter.Get(ctx, types.NamespacedName{Namespace: input.Namespace, Name: ref.Name},
ownerMachinePool); err != nil {
LogWarningf("Failed to get machinePool: %+v", err)
Expand All @@ -213,7 +211,6 @@ func WaitForAKSSystemNodePoolMachinesToExist(ctx context.Context, input WaitForC
}

return false

}, intervals...).Should(Equal(true), "System machine pools not detected")
}

Expand Down
6 changes: 3 additions & 3 deletions test/e2e/azure_clusterproxy.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,8 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
expv1 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1"
clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1"
infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/controller-runtime/pkg/client"
)
Expand All @@ -70,8 +70,8 @@ func initScheme() *runtime.Scheme {
scheme := runtime.NewScheme()
framework.TryAddDefaultSchemes(scheme)
Expect(infrav1.AddToScheme(scheme)).To(Succeed())
Expect(infrav1exp.AddToScheme(scheme)).To(Succeed())
Expect(expv1.AddToScheme(scheme)).To(Succeed())
Expect(clusterv1exp.AddToScheme(scheme)).To(Succeed())
// Add aadpodidentity v1 to the scheme.
aadPodIdentityGroupVersion := schema.GroupVersion{Group: aadpodv1.GroupName, Version: "v1"}
scheme.AddKnownTypes(aadPodIdentityGroupVersion,
Expand Down
6 changes: 3 additions & 3 deletions test/e2e/azure_csidriver.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ type AzureDiskCSISpecInput struct {

// AzureDiskCSISpec implements a test that verifies out of tree azure disk csi driver
// can be used to create a PVC that is usable by a pod.
func AzureDiskCSISpec(ctx context.Context, inputGetter func() AzureDiskCSISpecInput){
specName := "azurediskcsi-driver"
func AzureDiskCSISpec(ctx context.Context, inputGetter func() AzureDiskCSISpecInput) {
specName := "azurediskcsi-driver"
input := inputGetter()
Expect(input.BootstrapClusterProxy).NotTo(BeNil(), "Invalid argument. input.BootstrapClusterProxy can't be nil when calling %s spec", specName)

Expand All @@ -56,7 +56,7 @@ func AzureDiskCSISpec(ctx context.Context, inputGetter func() AzureDiskCSISpecIn
e2e_sc.Create("managedhdd").WithWaitForFirstConsumer().DeployStorageClass(clientset)

By("Deploying persistent volume claim")
b,err := e2e_pvc.Create("dd-managed-hdd-5g", "5Gi")
b, err := e2e_pvc.Create("dd-managed-hdd-5g", "5Gi")
Expect(err).To(BeNil())
b.DeployPVC(clientset)

Expand Down
11 changes: 4 additions & 7 deletions test/e2e/azure_lb.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,23 +25,20 @@ import (
"net"
"time"

"sigs.k8s.io/cluster-api/util"

"github.com/hashicorp/go-retryablehttp"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
k8snet "k8s.io/utils/net"

"github.com/hashicorp/go-retryablehttp"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/cluster-api/test/framework"

k8snet "k8s.io/utils/net"
deploymentBuilder "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/deployment"
"sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/job"
"sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/node"
"sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/windows"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/util"
)

// AzureLBSpecInput is the input for AzureLBSpec.
Expand Down
37 changes: 17 additions & 20 deletions test/e2e/azure_logcollector.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,20 +27,17 @@ import (
"strings"
"time"

apierrors "k8s.io/apimachinery/pkg/api/errors"

expv1alpha4 "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/test/framework"

"sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
"sigs.k8s.io/cluster-api-provider-azure/azure"

"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-11-01/compute"
autorest "github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/azure/auth"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
"sigs.k8s.io/cluster-api-provider-azure/azure"
infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/controller-runtime/pkg/client"
kinderrors "sigs.k8s.io/kind/pkg/errors"
Expand Down Expand Up @@ -179,57 +176,57 @@ func getHostname(m *clusterv1.Machine, isWindows bool) string {
return hostname
}

func getAzureCluster(ctx context.Context, managementClusterClient client.Client, namespace, name string) (*v1beta1.AzureCluster, error) {
func getAzureCluster(ctx context.Context, managementClusterClient client.Client, namespace, name string) (*infrav1.AzureCluster, error) {
key := client.ObjectKey{
Namespace: namespace,
Name: name,
}

azCluster := &v1beta1.AzureCluster{}
azCluster := &infrav1.AzureCluster{}
err := managementClusterClient.Get(ctx, key, azCluster)
return azCluster, err
}

func getAzureManagedControlPlane(ctx context.Context, managementClusterClient client.Client, namespace, name string) (*expv1alpha4.AzureManagedControlPlane, error) {
func getAzureManagedControlPlane(ctx context.Context, managementClusterClient client.Client, namespace, name string) (*infrav1exp.AzureManagedControlPlane, error) {
key := client.ObjectKey{
Namespace: namespace,
Name: name,
}

azManagedControlPlane := &expv1alpha4.AzureManagedControlPlane{}
azManagedControlPlane := &infrav1exp.AzureManagedControlPlane{}
err := managementClusterClient.Get(ctx, key, azManagedControlPlane)
return azManagedControlPlane, err
}

func getAzureMachine(ctx context.Context, managementClusterClient client.Client, m *clusterv1.Machine) (*v1beta1.AzureMachine, error) {
func getAzureMachine(ctx context.Context, managementClusterClient client.Client, m *clusterv1.Machine) (*infrav1.AzureMachine, error) {
key := client.ObjectKey{
Namespace: m.Spec.InfrastructureRef.Namespace,
Name: m.Spec.InfrastructureRef.Name,
}

azMachine := &v1beta1.AzureMachine{}
azMachine := &infrav1.AzureMachine{}
err := managementClusterClient.Get(ctx, key, azMachine)
return azMachine, err
}

func getAzureMachinePool(ctx context.Context, managementClusterClient client.Client, mp *expv1.MachinePool) (*expv1alpha4.AzureMachinePool, error) {
func getAzureMachinePool(ctx context.Context, managementClusterClient client.Client, mp *expv1.MachinePool) (*infrav1exp.AzureMachinePool, error) {
key := client.ObjectKey{
Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace,
Name: mp.Spec.Template.Spec.InfrastructureRef.Name,
}

azMachinePool := &expv1alpha4.AzureMachinePool{}
azMachinePool := &infrav1exp.AzureMachinePool{}
err := managementClusterClient.Get(ctx, key, azMachinePool)
return azMachinePool, err
}

func getAzureManagedMachinePool(ctx context.Context, managementClusterClient client.Client, mp *expv1.MachinePool) (*expv1alpha4.AzureManagedMachinePool, error) {
func getAzureManagedMachinePool(ctx context.Context, managementClusterClient client.Client, mp *expv1.MachinePool) (*infrav1exp.AzureManagedMachinePool, error) {
key := client.ObjectKey{
Namespace: mp.Spec.Template.Spec.InfrastructureRef.Namespace,
Name: mp.Spec.Template.Spec.InfrastructureRef.Name,
}

azManagedMachinePool := &expv1alpha4.AzureManagedMachinePool{}
azManagedMachinePool := &infrav1exp.AzureManagedMachinePool{}
err := managementClusterClient.Get(ctx, key, azManagedMachinePool)
return azManagedMachinePool, err
}
Expand Down Expand Up @@ -388,7 +385,7 @@ func windowsCrashDumpLogs(execToPathFn func(outputFileName string, command strin
}

// collectVMBootLog collects boot logs of the vm by using azure boot diagnostics.
func collectVMBootLog(ctx context.Context, am *v1beta1.AzureMachine, outputPath string) error {
func collectVMBootLog(ctx context.Context, am *infrav1.AzureMachine, outputPath string) error {
Logf("Collecting boot logs for AzureMachine %s\n", am.GetName())

if am == nil || am.Spec.ProviderID == nil {
Expand Down
17 changes: 7 additions & 10 deletions test/e2e/azure_machinepool_drain.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,13 +33,12 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
"sigs.k8s.io/cluster-api-provider-azure/azure"
"sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1"
infrav1exp "sigs.k8s.io/cluster-api-provider-azure/exp/api/v1beta1"
deployments "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/deployment"
"sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/node"
"sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/windows"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
clusterv1exp "sigs.k8s.io/cluster-api/exp/api/v1beta1"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/patch"
Expand Down Expand Up @@ -87,19 +86,18 @@ func AzureMachinePoolDrainSpec(ctx context.Context, inputGetter func() AzureMach
Expect(clientset).NotTo(BeNil())

By(fmt.Sprintf("listing AzureMachinePools in the cluster in namespace %s", input.Namespace.Name))
ampList := &v1beta1.AzureMachinePoolList{}
ampList := &infrav1exp.AzureMachinePoolList{}
Expect(bootstrapClusterProxy.GetClient().List(ctx, ampList, client.InNamespace(input.Namespace.Name), client.MatchingLabels(labels))).To(Succeed())
for _, amp := range ampList.Items {
testMachinePoolCordonAndDrain(ctx, bootstrapClusterProxy, workloadClusterProxy, amp)
}

}

func testMachinePoolCordonAndDrain(ctx context.Context, mgmtClusterProxy, workloadClusterProxy framework.ClusterProxy, amp v1beta1.AzureMachinePool) {
func testMachinePoolCordonAndDrain(ctx context.Context, mgmtClusterProxy, workloadClusterProxy framework.ClusterProxy, amp infrav1exp.AzureMachinePool) {
var (
isWindows = amp.Spec.Template.OSDisk.OSType == azure.WindowsOS
clientset = workloadClusterProxy.GetClientSet()
owningMachinePool = func() *clusterv1exp.MachinePool {
owningMachinePool = func() *expv1.MachinePool {
mp, err := getOwnerMachinePool(ctx, mgmtClusterProxy.GetClient(), amp.ObjectMeta)
Expect(err).NotTo(HaveOccurred())
return mp
Expand Down Expand Up @@ -169,7 +167,6 @@ func testMachinePoolCordonAndDrain(ctx context.Context, mgmtClusterProxy, worklo

// TODO setup a watcher to validate expected 2nd order drain outcomes
// https://github.com/kubernetes-sigs/cluster-api-provider-azure/issues/2159

}

func labelNodesWithMachinePoolName(ctx context.Context, workloadClient client.Client, mpName string, ampms []infrav1exp.AzureMachinePoolMachine) {
Expand Down Expand Up @@ -211,15 +208,15 @@ func getAzureMachinePoolMachines(ctx context.Context, mgmtClusterProxy, workload
}

// getOwnerMachinePool returns the name of MachinePool object owning the current resource.
func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1exp.MachinePool, error) {
func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*expv1.MachinePool, error) {
for _, ref := range obj.OwnerReferences {
gv, err := schema.ParseGroupVersion(ref.APIVersion)
if err != nil {
return nil, err
}

if ref.Kind == "MachinePool" && gv.Group == clusterv1exp.GroupVersion.Group {
mp := &clusterv1exp.MachinePool{}
if ref.Kind == "MachinePool" && gv.Group == expv1.GroupVersion.Group {
mp := &expv1.MachinePool{}
Eventually(func() error {
err := c.Get(ctx, client.ObjectKey{
Name: ref.Name,
Expand Down
9 changes: 3 additions & 6 deletions test/e2e/azure_net_pol.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,12 @@ import (

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"

deploymentBuilder "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/deployment"
e2e_namespace "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/namespace"
e2e_networkpolicy "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/networkpolicy"

corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
deploymentBuilder "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/deployment"
e2e_namespace "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/namespace"
e2e_networkpolicy "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/networkpolicy"
"sigs.k8s.io/cluster-api/test/framework"
)

Expand Down Expand Up @@ -264,5 +262,4 @@ func AzureNetPolSpec(ctx context.Context, inputGetter func() AzureNetPolSpecInpu

By("Ensuring we have ingress access from role:frontend pods in development namespace")
e2e_networkpolicy.EnsureConnectivityResultBetweenPods(clientset, config, frontendDevPods, backendPods, true)

}
3 changes: 1 addition & 2 deletions test/e2e/azure_selfhosted.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,6 @@ import (

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"sigs.k8s.io/controller-runtime/pkg/client"

corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
Expand All @@ -38,6 +36,7 @@ import (
"sigs.k8s.io/cluster-api/test/framework"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/controller-runtime/pkg/client"
)

// SelfHostedSpecInput is the input for SelfHostedSpec.
Expand Down
5 changes: 2 additions & 3 deletions test/e2e/azure_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,18 +26,17 @@ import (
"path/filepath"
"time"

apierrors "k8s.io/apimachinery/pkg/api/errors"
"sigs.k8s.io/cluster-api/util"

. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/pointer"
clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
capi_e2e "sigs.k8s.io/cluster-api/test/e2e"
"sigs.k8s.io/cluster-api/test/framework/clusterctl"
"sigs.k8s.io/cluster-api/util"
)

var _ = Describe("Workload cluster creation", func() {
Expand Down
1 change: 0 additions & 1 deletion test/e2e/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,6 @@ func EnsureControlPlaneInitialized(ctx context.Context, input clusterctl.ApplyCl
controlPlane := discoveryAndWaitForControlPlaneInitialized(ctx, input, result)
InstallAzureDiskCSIDriverHelmChart(ctx, input)
result.ControlPlane = controlPlane

}

func discoveryAndWaitForControlPlaneInitialized(ctx context.Context, input clusterctl.ApplyClusterTemplateAndWaitInput, result *clusterctl.ApplyClusterTemplateAndWaitResult) *kubeadmv1.KubeadmControlPlane {
Expand Down
Loading