diff --git a/test/integration/datapath/datapath_linux_test.go b/test/integration/datapath/datapath_linux_test.go index 22fbd16602..c4b40f553e 100644 --- a/test/integration/datapath/datapath_linux_test.go +++ b/test/integration/datapath/datapath_linux_test.go @@ -12,7 +12,7 @@ import ( k8s "github.com/Azure/azure-container-networking/test/integration" "github.com/Azure/azure-container-networking/test/integration/goldpinger" - k8sutils "github.com/Azure/azure-container-networking/test/internal/k8sutils" + "github.com/Azure/azure-container-networking/test/internal/kubernetes" "github.com/Azure/azure-container-networking/test/internal/retry" "github.com/pkg/errors" @@ -74,17 +74,17 @@ func setupLinuxEnvironment(t *testing.T) { ctx := context.Background() t.Log("Create Clientset") - clientset, err := k8sutils.MustGetClientset() + clientset, err := kubernetes.MustGetClientset() if err != nil { t.Fatalf("could not get k8s clientset: %v", err) } t.Log("Create Label Selectors") - podLabelSelector := k8sutils.CreateLabelSelector(podLabelKey, podPrefix) - nodeLabelSelector := k8sutils.CreateLabelSelector(nodepoolKey, nodepoolSelector) + podLabelSelector := kubernetes.CreateLabelSelector(podLabelKey, podPrefix) + nodeLabelSelector := kubernetes.CreateLabelSelector(nodepoolKey, nodepoolSelector) t.Log("Get Nodes") - nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) + nodes, err := kubernetes.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) if err != nil { t.Fatalf("could not get k8s node list: %v", err) } @@ -96,29 +96,29 @@ func setupLinuxEnvironment(t *testing.T) { var deployment appsv1.Deployment if *isDualStack { - deployment, err = k8sutils.MustParseDeployment(LinuxDeployIPv6) + deployment, err = kubernetes.MustParseDeployment(LinuxDeployIPv6) if err != nil { t.Fatal(err) } - daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonsetIPv6) + daemonset, err = kubernetes.MustParseDaemonSet(gpDaemonsetIPv6) if err != nil { t.Fatal(err) } } else { - deployment, err = k8sutils.MustParseDeployment(LinuxDeployIPV4) + deployment, err = kubernetes.MustParseDeployment(LinuxDeployIPV4) if err != nil { t.Fatal(err) } - daemonset, err = k8sutils.MustParseDaemonSet(gpDaemonset) + daemonset, err = kubernetes.MustParseDaemonSet(gpDaemonset) if err != nil { t.Fatal(err) } } // setup common RBAC, ClusteerRole, ClusterRoleBinding, ServiceAccount - rbacSetupFn, err := k8sutils.MustSetUpClusterRBAC(ctx, clientset, gpClusterRolePath, gpClusterRoleBindingPath, gpServiceAccountPath) + rbacSetupFn, err := kubernetes.MustSetUpClusterRBAC(ctx, clientset, gpClusterRolePath, gpClusterRoleBindingPath, gpServiceAccountPath) if err != nil { t.Log(os.Getwd()) t.Fatal(err) @@ -134,13 +134,13 @@ func setupLinuxEnvironment(t *testing.T) { daemonset.Namespace = *podNamespace deploymentsClient := clientset.AppsV1().Deployments(*podNamespace) - err = k8sutils.MustCreateDeployment(ctx, deploymentsClient, deployment) + err = kubernetes.MustCreateDeployment(ctx, deploymentsClient, deployment) if err != nil { t.Fatal(err) } daemonsetClient := clientset.AppsV1().DaemonSets(daemonset.Namespace) - err = k8sutils.MustCreateDaemonset(ctx, daemonsetClient, daemonset) + err = kubernetes.MustCreateDaemonset(ctx, daemonsetClient, daemonset) if err != nil { t.Fatal(err) } @@ -159,7 +159,7 @@ func setupLinuxEnvironment(t *testing.T) { }) t.Log("Waiting for pods to be running state") - err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + err = kubernetes.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) if err != nil { t.Fatalf("Pods are not in running state due to %+v", err) } @@ -172,7 +172,7 @@ func setupLinuxEnvironment(t *testing.T) { t.Log("Checking Linux test environment") for _, node := range nodes.Items { - pods, err := k8sutils.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name) + pods, err := kubernetes.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name) if err != nil { t.Fatalf("could not get k8s clientset: %v", err) } @@ -188,20 +188,20 @@ func TestDatapathLinux(t *testing.T) { ctx := context.Background() t.Log("Get REST config") - restConfig := k8sutils.MustGetRestConfig(t) + restConfig := kubernetes.MustGetRestConfig(t) t.Log("Create Clientset") - clientset, err := k8sutils.MustGetClientset() + clientset, err := kubernetes.MustGetClientset() if err != nil { t.Fatalf("could not get k8s clientset: %v", err) } setupLinuxEnvironment(t) - podLabelSelector := k8sutils.CreateLabelSelector(podLabelKey, podPrefix) + podLabelSelector := kubernetes.CreateLabelSelector(podLabelKey, podPrefix) t.Run("Linux ping tests", func(t *testing.T) { // Check goldpinger health t.Run("all pods have IPs assigned", func(t *testing.T) { - err := k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + err := kubernetes.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) if err != nil { t.Fatalf("Pods are not in running state due to %+v", err) } diff --git a/test/integration/datapath/datapath_windows_test.go b/test/integration/datapath/datapath_windows_test.go index 3707203db4..8ce2fdf9b7 100644 --- a/test/integration/datapath/datapath_windows_test.go +++ b/test/integration/datapath/datapath_windows_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/Azure/azure-container-networking/test/internal/datapath" - "github.com/Azure/azure-container-networking/test/internal/k8sutils" + "github.com/Azure/azure-container-networking/test/internal/kubernetes" "github.com/stretchr/testify/require" apiv1 "k8s.io/api/core/v1" ) @@ -49,23 +49,23 @@ func setupWindowsEnvironment(t *testing.T) { ctx := context.Background() t.Log("Create Clientset") - clientset, err := k8sutils.MustGetClientset() + clientset, err := kubernetes.MustGetClientset() if err != nil { t.Fatal(err) } t.Log("Create Label Selectors") - podLabelSelector := k8sutils.CreateLabelSelector(podLabelKey, podPrefix) - nodeLabelSelector := k8sutils.CreateLabelSelector(nodepoolKey, nodepoolSelector) + podLabelSelector := kubernetes.CreateLabelSelector(podLabelKey, podPrefix) + nodeLabelSelector := kubernetes.CreateLabelSelector(nodepoolKey, nodepoolSelector) t.Log("Get Nodes") - nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) + nodes, err := kubernetes.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) if err != nil { t.Fatal(err) } // Create namespace if it doesn't exist - namespaceExists, err := k8sutils.NamespaceExists(ctx, clientset, *podNamespace) + namespaceExists, err := kubernetes.NamespaceExists(ctx, clientset, *podNamespace) if err != nil { t.Fatalf("failed to check if namespace %s exists due to: %v", *podNamespace, err) } @@ -73,13 +73,13 @@ func setupWindowsEnvironment(t *testing.T) { if !namespaceExists { // Test Namespace t.Log("Create Namespace") - err := k8sutils.MustCreateNamespace(ctx, clientset, *podNamespace) + err := kubernetes.MustCreateNamespace(ctx, clientset, *podNamespace) if err != nil { t.Fatalf("failed to create pod namespace %s due to: %v", *podNamespace, err) } t.Log("Creating Windows pods through deployment") - deployment, err := k8sutils.MustParseDeployment(WindowsDeployYamlPath) + deployment, err := kubernetes.MustParseDeployment(WindowsDeployYamlPath) if err != nil { t.Fatal(err) } @@ -93,13 +93,13 @@ func setupWindowsEnvironment(t *testing.T) { deployment.Namespace = *podNamespace deploymentsClient := clientset.AppsV1().Deployments(*podNamespace) - err = k8sutils.MustCreateDeployment(ctx, deploymentsClient, deployment) + err = kubernetes.MustCreateDeployment(ctx, deploymentsClient, deployment) if err != nil { t.Fatal(err) } t.Log("Waiting for pods to be running state") - err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + err = kubernetes.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) if err != nil { t.Fatal(err) } @@ -109,7 +109,7 @@ func setupWindowsEnvironment(t *testing.T) { t.Log("Namespace already exists") t.Log("Checking for pods to be running state") - err = k8sutils.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) + err = kubernetes.WaitForPodsRunning(ctx, clientset, *podNamespace, podLabelSelector) if err != nil { t.Fatal(err) } @@ -118,7 +118,7 @@ func setupWindowsEnvironment(t *testing.T) { t.Log("Checking Windows test environment") for _, node := range nodes.Items { - pods, err := k8sutils.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name) + pods, err := kubernetes.GetPodsByNode(ctx, clientset, *podNamespace, podLabelSelector, node.Name) if err != nil { t.Fatal(err) } @@ -133,20 +133,20 @@ func TestDatapathWin(t *testing.T) { ctx := context.Background() t.Log("Get REST config") - restConfig := k8sutils.MustGetRestConfig(t) + restConfig := kubernetes.MustGetRestConfig(t) t.Log("Create Clientset") - clientset, err := k8sutils.MustGetClientset() + clientset, err := kubernetes.MustGetClientset() if err != nil { t.Fatalf("could not get k8s clientset: %v", err) } setupWindowsEnvironment(t) - podLabelSelector := k8sutils.CreateLabelSelector(podLabelKey, podPrefix) - nodeLabelSelector := k8sutils.CreateLabelSelector(nodepoolKey, nodepoolSelector) + podLabelSelector := kubernetes.CreateLabelSelector(podLabelKey, podPrefix) + nodeLabelSelector := kubernetes.CreateLabelSelector(nodepoolKey, nodepoolSelector) t.Log("Get Nodes") - nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) + nodes, err := kubernetes.GetNodeListByLabelSelector(ctx, clientset, nodeLabelSelector) if err != nil { t.Fatal(err) } diff --git a/test/integration/k8s_test.go b/test/integration/k8s_test.go index e97995197e..74ebe3f27a 100644 --- a/test/integration/k8s_test.go +++ b/test/integration/k8s_test.go @@ -13,7 +13,7 @@ import ( "time" "github.com/Azure/azure-container-networking/test/integration/goldpinger" - k8sutils "github.com/Azure/azure-container-networking/test/internal/k8sutils" + "github.com/Azure/azure-container-networking/test/internal/kubernetes" "github.com/Azure/azure-container-networking/test/internal/retry" v1 "k8s.io/client-go/kubernetes/typed/apps/v1" @@ -81,18 +81,18 @@ todo: */ func TestPodScaling(t *testing.T) { - clientset, err := k8sutils.MustGetClientset() + clientset, err := kubernetes.MustGetClientset() if err != nil { t.Fatal(err) } - restConfig := k8sutils.MustGetRestConfig(t) - deployment, err := k8sutils.MustParseDeployment(gpDeployment) + restConfig := kubernetes.MustGetRestConfig(t) + deployment, err := kubernetes.MustParseDeployment(gpDeployment) if err != nil { t.Fatal(err) } - daemonset, err := k8sutils.MustParseDaemonSet(gpDaemonset) + daemonset, err := kubernetes.MustParseDaemonSet(gpDaemonset) if err != nil { t.Fatal(err) } @@ -100,25 +100,25 @@ func TestPodScaling(t *testing.T) { ctx := context.Background() if shouldLabelNodes() { - k8sutils.MustLabelSwiftNodes(ctx, t, clientset, *delegatedSubnetID, *delegatedSubnetName) + kubernetes.MustLabelSwiftNodes(ctx, t, clientset, *delegatedSubnetID, *delegatedSubnetName) } else { t.Log("swift node labels not passed or set. skipping labeling") } - rbacCleanUpFn, err := k8sutils.MustSetUpClusterRBAC(ctx, clientset, gpClusterRolePath, gpClusterRoleBindingPath, gpServiceAccountPath) + rbacCleanUpFn, err := kubernetes.MustSetUpClusterRBAC(ctx, clientset, gpClusterRolePath, gpClusterRoleBindingPath, gpServiceAccountPath) if err != nil { t.Log(os.Getwd()) t.Fatal(err) } deploymentsClient := clientset.AppsV1().Deployments(deployment.Namespace) - err = k8sutils.MustCreateDeployment(ctx, deploymentsClient, deployment) + err = kubernetes.MustCreateDeployment(ctx, deploymentsClient, deployment) if err != nil { t.Fatal(err) } daemonsetClient := clientset.AppsV1().DaemonSets(daemonset.Namespace) - err = k8sutils.MustCreateDaemonset(ctx, daemonsetClient, daemonset) + err = kubernetes.MustCreateDaemonset(ctx, daemonsetClient, daemonset) if err != nil { t.Fatal(err) } @@ -254,7 +254,7 @@ func updateReplicaCount(t *testing.T, ctx context.Context, deployments v1.Deploy } t.Logf("setting deployment %s to %d replicas", name, replicas) - res.Spec.Replicas = k8sutils.Int32ToPtr(int32(replicas)) + res.Spec.Replicas = kubernetes.Int32ToPtr(int32(replicas)) _, err = deployments.Update(ctx, res, metav1.UpdateOptions{}) return err }) diff --git a/test/integration/load/load_test.go b/test/integration/load/load_test.go index 1ec299895b..c66695a2a1 100644 --- a/test/integration/load/load_test.go +++ b/test/integration/load/load_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - k8sutils "github.com/Azure/azure-container-networking/test/internal/k8sutils" + "github.com/Azure/azure-container-networking/test/internal/kubernetes" "github.com/Azure/azure-container-networking/test/validate" ) @@ -59,7 +59,7 @@ todo: consider adding the following scenarios - [x] Add deployment yaml for windows. */ func TestLoad(t *testing.T) { - clientset, err := k8sutils.MustGetClientset() + clientset, err := kubernetes.MustGetClientset() if err != nil { t.Fatal(err) } @@ -68,31 +68,31 @@ func TestLoad(t *testing.T) { defer cancel() // Create namespace if it doesn't exist - namespaceExists, err := k8sutils.NamespaceExists(ctx, clientset, namespace) + namespaceExists, err := kubernetes.NamespaceExists(ctx, clientset, namespace) if err != nil { t.Fatal(err) } if !namespaceExists { - err = k8sutils.MustCreateNamespace(ctx, clientset, namespace) + err = kubernetes.MustCreateNamespace(ctx, clientset, namespace) if err != nil { t.Fatal(err) } } - deployment, err := k8sutils.MustParseDeployment(noopDeploymentMap[*osType]) + deployment, err := kubernetes.MustParseDeployment(noopDeploymentMap[*osType]) if err != nil { t.Fatal(err) } deploymentsClient := clientset.AppsV1().Deployments(namespace) - err = k8sutils.MustCreateDeployment(ctx, deploymentsClient, deployment) + err = kubernetes.MustCreateDeployment(ctx, deploymentsClient, deployment) if err != nil { t.Fatal(err) } t.Log("Checking pods are running") - err = k8sutils.WaitForPodsRunning(ctx, clientset, namespace, podLabelSelector) + err = kubernetes.WaitForPodsRunning(ctx, clientset, namespace, podLabelSelector) if err != nil { t.Fatal(err) } @@ -101,18 +101,18 @@ func TestLoad(t *testing.T) { for i := 0; i < *iterations; i++ { t.Log("Iteration ", i) t.Log("Scale down deployment") - err = k8sutils.MustScaleDeployment(ctx, deploymentsClient, deployment, clientset, namespace, podLabelSelector, *scaleDownReplicas, *skipWait) + err = kubernetes.MustScaleDeployment(ctx, deploymentsClient, deployment, clientset, namespace, podLabelSelector, *scaleDownReplicas, *skipWait) if err != nil { t.Fatal(err) } t.Log("Scale up deployment") - err = k8sutils.MustScaleDeployment(ctx, deploymentsClient, deployment, clientset, namespace, podLabelSelector, *scaleUpReplicas, *skipWait) + err = kubernetes.MustScaleDeployment(ctx, deploymentsClient, deployment, clientset, namespace, podLabelSelector, *scaleUpReplicas, *skipWait) if err != nil { t.Fatal(err) } } t.Log("Checking pods are running and IP assigned") - err = k8sutils.WaitForPodsRunning(ctx, clientset, "", "") + err = kubernetes.WaitForPodsRunning(ctx, clientset, "", "") if err != nil { t.Fatal(err) } @@ -128,11 +128,11 @@ func TestLoad(t *testing.T) { // TestValidateState validates the state file based on the os and cni type. func TestValidateState(t *testing.T) { - clientset, err := k8sutils.MustGetClientset() + clientset, err := kubernetes.MustGetClientset() if err != nil { t.Fatal(err) } - config := k8sutils.MustGetRestConfig(t) + config := kubernetes.MustGetRestConfig(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) defer cancel() @@ -149,41 +149,41 @@ func TestValidateState(t *testing.T) { // go test -timeout 30m -tags load -run ^TestScaleDeployment$ -tags=load -replicas 10 func TestScaleDeployment(t *testing.T) { t.Log("Scale deployment") - clientset, err := k8sutils.MustGetClientset() + clientset, err := kubernetes.MustGetClientset() if err != nil { t.Fatal(err) } ctx := context.Background() // Create namespace if it doesn't exist - namespaceExists, err := k8sutils.NamespaceExists(ctx, clientset, namespace) + namespaceExists, err := kubernetes.NamespaceExists(ctx, clientset, namespace) if err != nil { t.Fatal(err) } if !namespaceExists { - err = k8sutils.MustCreateNamespace(ctx, clientset, namespace) + err = kubernetes.MustCreateNamespace(ctx, clientset, namespace) if err != nil { t.Fatal(err) } } - deployment, err := k8sutils.MustParseDeployment(noopDeploymentMap[*osType]) + deployment, err := kubernetes.MustParseDeployment(noopDeploymentMap[*osType]) if err != nil { t.Fatal(err) } deploymentsClient := clientset.AppsV1().Deployments(namespace) - err = k8sutils.MustScaleDeployment(ctx, deploymentsClient, deployment, clientset, namespace, podLabelSelector, *replicas, *skipWait) + err = kubernetes.MustScaleDeployment(ctx, deploymentsClient, deployment, clientset, namespace, podLabelSelector, *replicas, *skipWait) if err != nil { t.Fatal(err) } } func TestDualStackProperties(t *testing.T) { - clientset, err := k8sutils.MustGetClientset() + clientset, err := kubernetes.MustGetClientset() if err != nil { t.Fatal(err) } - config := k8sutils.MustGetRestConfig(t) + config := kubernetes.MustGetRestConfig(t) ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) defer cancel() diff --git a/test/integration/setup_test.go b/test/integration/setup_test.go index bde690e2b7..4cdc7f54aa 100644 --- a/test/integration/setup_test.go +++ b/test/integration/setup_test.go @@ -10,39 +10,12 @@ import ( "strconv" "testing" - k8sutils "github.com/Azure/azure-container-networking/test/internal/k8sutils" - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/kubernetes" + "github.com/Azure/azure-container-networking/test/internal/kubernetes" ) const ( exitFail = 1 - envTestDropgz = "TEST_DROPGZ" - envCNIDropgzVersion = "CNI_DROPGZ_VERSION" - envCNSVersion = "CNS_VERSION" - envInstallCNS = "INSTALL_CNS" - envInstallAzilium = "INSTALL_AZILIUM" - envInstallAzureVnet = "INSTALL_AZURE_VNET" - envInstallOverlay = "INSTALL_OVERLAY" - envInstallAzureCNIOverlay = "INSTALL_AZURE_CNI_OVERLAY" - envInstallDualStackOverlay = "INSTALL_DUALSTACK_OVERLAY" - - // relative cns manifest paths - cnsManifestFolder = "manifests/cns" - cnsConfigFolder = "manifests/cnsconfig" - cnsDaemonSetPath = cnsManifestFolder + "/daemonset.yaml" - cnsClusterRolePath = cnsManifestFolder + "/clusterrole.yaml" - cnsClusterRoleBindingPath = cnsManifestFolder + "/clusterrolebinding.yaml" - cnsSwiftConfigMapPath = cnsConfigFolder + "/swiftconfigmap.yaml" - cnsCiliumConfigMapPath = cnsConfigFolder + "/ciliumconfigmap.yaml" - cnsOverlayConfigMapPath = cnsConfigFolder + "/overlayconfigmap.yaml" - cnsAzureCNIOverlayConfigMapPath = cnsConfigFolder + "/azurecnioverlayconfigmap.yaml" - cnsRolePath = cnsManifestFolder + "/role.yaml" - cnsRoleBindingPath = cnsManifestFolder + "/rolebinding.yaml" - cnsServiceAccountPath = cnsManifestFolder + "/serviceaccount.yaml" - cnsLabelSelector = "k8s-app=azure-cns" - // relative log directory logDir = "logs/" ) @@ -76,282 +49,23 @@ func TestMain(m *testing.M) { os.Exit(exitCode) }() - clientset, err := k8sutils.MustGetClientset() + clientset, err := kubernetes.MustGetClientset() if err != nil { return } ctx := context.Background() - if installopt := os.Getenv(envInstallCNS); installopt != "" { - // create dirty cns ds - if installCNS, err := strconv.ParseBool(installopt); err == nil && installCNS == true { - if cnscleanup, err = installCNSDaemonset(ctx, clientset, logDir); err != nil { - log.Print(err) - exitCode = 2 - return - } + installopt := os.Getenv(kubernetes.EnvInstallCNS) + // create dirty cns ds + if installCNS, err := strconv.ParseBool(installopt); err == nil && installCNS == true { + if cnscleanup, err = kubernetes.InstallCNSDaemonset(ctx, clientset, logDir); err != nil { + log.Print(err) + exitCode = 2 + return } } else { - log.Printf("Env %v not set to true, skipping", envInstallCNS) + log.Printf("Env %v not set to true, skipping", kubernetes.EnvInstallCNS) } exitCode = m.Run() } - -func installCNSDaemonset(ctx context.Context, clientset *kubernetes.Clientset, logDir string) (func() error, error) { - cniDropgzVersion := os.Getenv(envCNIDropgzVersion) - cnsVersion := os.Getenv(envCNSVersion) - - // setup daemonset - cns, err := k8sutils.MustParseDaemonSet(cnsDaemonSetPath) - if err != nil { - return nil, err - } - - image, _ := k8sutils.ParseImageString(cns.Spec.Template.Spec.Containers[0].Image) - cns.Spec.Template.Spec.Containers[0].Image = k8sutils.GetImageString(image, cnsVersion) - - // check environment scenario - log.Printf("Checking environment scenario") - if installBoolDropgz := os.Getenv(envTestDropgz); installBoolDropgz != "" { - if testDropgzScenario, err := strconv.ParseBool(installBoolDropgz); err == nil && testDropgzScenario == true { - log.Printf("Env %v set to true, deploy cniTest.Dockerfile", envTestDropgz) - initImage, _ := k8sutils.ParseImageString("acnpublic.azurecr.io/cni-dropgz-test:latest") - cns.Spec.Template.Spec.InitContainers[0].Image = k8sutils.GetImageString(initImage, cniDropgzVersion) - } - } else { - log.Printf("Env %v not set to true, deploying cni.Dockerfile", envTestDropgz) - initImage, _ := k8sutils.ParseImageString(cns.Spec.Template.Spec.InitContainers[0].Image) - cns.Spec.Template.Spec.InitContainers[0].Image = k8sutils.GetImageString(initImage, cniDropgzVersion) - } - - if installBool1 := os.Getenv(envInstallAzureVnet); installBool1 != "" { - if azureVnetScenario, err := strconv.ParseBool(installBool1); err == nil && azureVnetScenario == true { - log.Printf("Env %v set to true, deploy azure-vnet", envInstallAzureVnet) - cns.Spec.Template.Spec.InitContainers[0].Args = []string{"deploy", "azure-vnet", "-o", "/opt/cni/bin/azure-vnet", "azure-vnet-telemetry", "-o", "/opt/cni/bin/azure-vnet-telemetry", "azure-vnet-ipam", "-o", "/opt/cni/bin/azure-vnet-ipam", "azure-swift.conflist", "-o", "/etc/cni/net.d/10-azure.conflist"} - } - // setup the CNS swiftconfigmap - if err := k8sutils.MustSetupConfigMap(ctx, clientset, cnsSwiftConfigMapPath); err != nil { - return nil, err - } - } else { - log.Printf("Env %v not set to true, skipping", envInstallAzureVnet) - } - - if installBool2 := os.Getenv(envInstallAzilium); installBool2 != "" { - if aziliumScenario, err := strconv.ParseBool(installBool2); err == nil && aziliumScenario == true { - log.Printf("Env %v set to true, deploy azure-ipam and cilium-cni", envInstallAzilium) - cns.Spec.Template.Spec.InitContainers[0].Args = []string{"deploy", "azure-ipam", "-o", "/opt/cni/bin/azure-ipam"} - } - // setup the CNS ciliumconfigmap - if err := k8sutils.MustSetupConfigMap(ctx, clientset, cnsCiliumConfigMapPath); err != nil { - return nil, err - } - } else { - log.Printf("Env %v not set to true, skipping", envInstallAzilium) - } - - if installBool3 := os.Getenv(envInstallOverlay); installBool3 != "" { - if overlayScenario, err := strconv.ParseBool(installBool3); err == nil && overlayScenario == true { - log.Printf("Env %v set to true, deploy azure-ipam and cilium-cni", envInstallOverlay) - cns.Spec.Template.Spec.InitContainers[0].Args = []string{"deploy", "azure-ipam", "-o", "/opt/cni/bin/azure-ipam"} - } - // setup the CNS ciliumconfigmap - if err := k8sutils.MustSetupConfigMap(ctx, clientset, cnsOverlayConfigMapPath); err != nil { - return nil, err - } - } else { - log.Printf("Env %v not set to true, skipping", envInstallOverlay) - } - - if installBool4 := os.Getenv(envInstallAzureCNIOverlay); installBool4 != "" { - if overlayScenario, err := strconv.ParseBool(installBool4); err == nil && overlayScenario { - log.Printf("Env %v set to true, deploy azure-cni and azure-cns", envInstallAzureCNIOverlay) - cns.Spec.Template.Spec.InitContainers[0].Args = []string{"deploy", "azure-vnet", "-o", "/opt/cni/bin/azure-vnet", "azure-vnet-telemetry", "-o", "/opt/cni/bin/azure-vnet-telemetry"} - - // override the volumes and volume mounts - cns.Spec.Template.Spec.Volumes = volumesForAzureCNIOverlay() - cns.Spec.Template.Spec.InitContainers[0].VolumeMounts = dropgzVolumeMountsForAzureCNIOverlay() - cns.Spec.Template.Spec.Containers[0].VolumeMounts = cnsVolumeMountsForAzureCNIOverlay() - - // set up the CNS conifgmap for azure cni overlay - if err := k8sutils.MustSetupConfigMap(ctx, clientset, cnsAzureCNIOverlayConfigMapPath); err != nil { - return nil, err - } - } - } else { - log.Printf("Env %v not set to true, skipping", envInstallAzureCNIOverlay) - } - - if installBool5 := os.Getenv(envInstallDualStackOverlay); installBool5 != "" { - if dualStackOverlayScenario, err := strconv.ParseBool(installBool5); err == nil && dualStackOverlayScenario == true { - log.Printf("Env %v set to true, deploy azure-vnet", envInstallDualStackOverlay) - cns.Spec.Template.Spec.InitContainers[0].Args = []string{"deploy", "azure-vnet", "-o", "/opt/cni/bin/azure-vnet", "azure-vnet-telemetry", "-o", "/opt/cni/bin/azure-vnet-telemetry", "azure-vnet-ipam", "-o", "/opt/cni/bin/azure-vnet-ipam", "azure-swift-overlay-dualstack.conflist", "-o", "/etc/cni/net.d/10-azure.conflist"} - } - // setup the CNS swiftconfigmap - if err := k8sutils.MustSetupConfigMap(ctx, clientset, cnsSwiftConfigMapPath); err != nil { - return nil, err - } - } else { - log.Printf("Env %v not set to true, skipping", envInstallDualStackOverlay) - } - - cnsDaemonsetClient := clientset.AppsV1().DaemonSets(cns.Namespace) - - log.Printf("Installing CNS with image %s", cns.Spec.Template.Spec.Containers[0].Image) - - // setup common RBAC, ClusteerRole, ClusterRoleBinding, ServiceAccount - if _, err := k8sutils.MustSetUpClusterRBAC(ctx, clientset, cnsClusterRolePath, cnsClusterRoleBindingPath, cnsServiceAccountPath); err != nil { - return nil, err - } - - // setup RBAC, Role, RoleBinding - if err := k8sutils.MustSetUpRBAC(ctx, clientset, cnsRolePath, cnsRoleBindingPath); err != nil { - return nil, err - } - - if err = k8sutils.MustCreateDaemonset(ctx, cnsDaemonsetClient, cns); err != nil { - return nil, err - } - - if err = k8sutils.WaitForPodsRunning(ctx, clientset, cns.Namespace, cnsLabelSelector); err != nil { - return nil, err - } - - cleanupds := func() error { - if err := k8sutils.ExportLogsByLabelSelector(ctx, clientset, cns.Namespace, cnsLabelSelector, logDir); err != nil { - return err - } - return nil - } - - return cleanupds, nil -} - -func hostPathTypePtr(h corev1.HostPathType) *corev1.HostPathType { - return &h -} - -func volumesForAzureCNIOverlay() []corev1.Volume { - return []corev1.Volume{ - { - Name: "log", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/var/log/azure-cns", - Type: hostPathTypePtr(corev1.HostPathDirectoryOrCreate), - }, - }, - }, - { - Name: "cns-state", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/var/lib/azure-network", - Type: hostPathTypePtr(corev1.HostPathDirectoryOrCreate), - }, - }, - }, - { - Name: "cni-bin", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/opt/cni/bin", - Type: hostPathTypePtr(corev1.HostPathDirectory), - }, - }, - }, - { - Name: "azure-vnet", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/var/run/azure-vnet", - Type: hostPathTypePtr(corev1.HostPathDirectoryOrCreate), - }, - }, - }, - { - Name: "cni-lock", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/var/lock/azure-vnet", - Type: hostPathTypePtr(corev1.HostPathDirectoryOrCreate), - }, - }, - }, - { - Name: "legacy-cni-state", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/var/run/azure-vnet.json", - Type: hostPathTypePtr(corev1.HostPathFileOrCreate), - }, - }, - }, - { - Name: "cni-conflist", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/etc/cni/net.d", - Type: hostPathTypePtr(corev1.HostPathDirectory), - }, - }, - }, - { - Name: "cns-config", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "cns-config", - }, - }, - }, - }, - } -} - -func dropgzVolumeMountsForAzureCNIOverlay() []corev1.VolumeMount { - return []corev1.VolumeMount{ - { - Name: "cni-bin", - MountPath: "/opt/cni/bin", - }, - } -} - -func cnsVolumeMountsForAzureCNIOverlay() []corev1.VolumeMount { - return []corev1.VolumeMount{ - { - Name: "log", - MountPath: "/var/log", - }, - { - Name: "cns-state", - MountPath: "/var/lib/azure-network", - }, - { - Name: "cns-config", - MountPath: "/etc/azure-cns", - }, - { - Name: "cni-bin", - MountPath: "/opt/cni/bin", - }, - { - Name: "azure-vnet", - MountPath: "/var/run/azure-vnet", - }, - { - Name: "cni-lock", - MountPath: "/var/lock/azure-vnet", - }, - { - Name: "legacy-cni-state", - MountPath: "/var/run/azure-vnet.json", - }, - { - Name: "cni-conflist", - MountPath: "/etc/cni/net.d", - }, - } -} diff --git a/test/internal/datapath/datapath_win.go b/test/internal/datapath/datapath_win.go index d59bb53f69..433184d53c 100644 --- a/test/internal/datapath/datapath_win.go +++ b/test/internal/datapath/datapath_win.go @@ -6,7 +6,7 @@ import ( "net" "strings" - "github.com/Azure/azure-container-networking/test/internal/k8sutils" + acnk8s "github.com/Azure/azure-container-networking/test/internal/kubernetes" "github.com/pkg/errors" "github.com/sirupsen/logrus" apiv1 "k8s.io/api/core/v1" @@ -19,7 +19,7 @@ var ipv6PrefixPolicy = []string{"curl", "-6", "-I", "-v", "www.bing.com"} func podTest(ctx context.Context, clientset *kubernetes.Clientset, srcPod *apiv1.Pod, cmd []string, rc *restclient.Config, passFunc func(string) error) error { logrus.Infof("podTest() - %v %v", srcPod.Name, cmd) - output, err := k8sutils.ExecCmdOnPod(ctx, clientset, srcPod.Namespace, srcPod.Name, cmd, rc) + output, err := acnk8s.ExecCmdOnPod(ctx, clientset, srcPod.Namespace, srcPod.Name, cmd, rc) if err != nil { return errors.Wrapf(err, "failed to execute command on pod: %v", srcPod.Name) } @@ -28,7 +28,7 @@ func podTest(ctx context.Context, clientset *kubernetes.Clientset, srcPod *apiv1 func WindowsPodToPodPingTestSameNode(ctx context.Context, clientset *kubernetes.Clientset, nodeName, podNamespace, labelSelector string, rc *restclient.Config) error { logrus.Infof("Get Pods for Node: %s", nodeName) - pods, err := k8sutils.GetPodsByNode(ctx, clientset, podNamespace, labelSelector, nodeName) + pods, err := acnk8s.GetPodsByNode(ctx, clientset, podNamespace, labelSelector, nodeName) if err != nil { logrus.Error(err) return errors.Wrap(err, "k8s api call") @@ -77,7 +77,7 @@ func WindowsPodToPodPingTestSameNode(ctx context.Context, clientset *kubernetes. func WindowsPodToPodPingTestDiffNode(ctx context.Context, clientset *kubernetes.Clientset, nodeName1, nodeName2, podNamespace, labelSelector string, rc *restclient.Config) error { logrus.Infof("Get Pods for Node 1: %s", nodeName1) // Node 1 - pods, err := k8sutils.GetPodsByNode(ctx, clientset, podNamespace, labelSelector, nodeName1) + pods, err := acnk8s.GetPodsByNode(ctx, clientset, podNamespace, labelSelector, nodeName1) if err != nil { logrus.Error(err) return errors.Wrap(err, "k8s api call") @@ -90,7 +90,7 @@ func WindowsPodToPodPingTestDiffNode(ctx context.Context, clientset *kubernetes. logrus.Infof("Get Pods for Node 2: %s", nodeName2) // Node 2 - pods, err = k8sutils.GetPodsByNode(ctx, clientset, podNamespace, labelSelector, nodeName2) + pods, err = acnk8s.GetPodsByNode(ctx, clientset, podNamespace, labelSelector, nodeName2) if err != nil { logrus.Error(err) return errors.Wrap(err, "k8s api call") @@ -123,7 +123,7 @@ func WindowsPodToPodPingTestDiffNode(ctx context.Context, clientset *kubernetes. func WindowsPodToNode(ctx context.Context, clientset *kubernetes.Clientset, nodeName, nodeIP, podNamespace, labelSelector string, rc *restclient.Config) error { logrus.Infof("Get Pods by Node: %s %s", nodeName, nodeIP) - pods, err := k8sutils.GetPodsByNode(ctx, clientset, podNamespace, labelSelector, nodeName) + pods, err := acnk8s.GetPodsByNode(ctx, clientset, podNamespace, labelSelector, nodeName) if err != nil { logrus.Error(err) return errors.Wrap(err, "k8s api call") @@ -162,7 +162,7 @@ func WindowsPodToNode(ctx context.Context, clientset *kubernetes.Clientset, node func WindowsPodToInternet(ctx context.Context, clientset *kubernetes.Clientset, nodeName, podNamespace, labelSelector string, rc *restclient.Config) error { logrus.Infof("Get Pods by Node: %s", nodeName) - pods, err := k8sutils.GetPodsByNode(ctx, clientset, podNamespace, labelSelector, nodeName) + pods, err := acnk8s.GetPodsByNode(ctx, clientset, podNamespace, labelSelector, nodeName) if err != nil { logrus.Error(err) return errors.Wrap(err, "k8s api call") diff --git a/test/internal/k8sutils/utils_create.go b/test/internal/k8sutils/utils_create.go deleted file mode 100644 index 8d21af1035..0000000000 --- a/test/internal/k8sutils/utils_create.go +++ /dev/null @@ -1,176 +0,0 @@ -package k8sutils - -import ( - "context" - "log" - - "github.com/pkg/errors" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" - typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" - typedrbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" -) - -func MustCreateOrUpdatePod(ctx context.Context, podI typedcorev1.PodInterface, pod corev1.Pod) error { - if err := MustDeletePod(ctx, podI, pod); err != nil { - if !apierrors.IsNotFound(err) { - return err - } - } - if _, err := podI.Create(ctx, &pod, metav1.CreateOptions{}); err != nil { - return errors.Wrapf(err, "failed to create pod %v", pod.Name) - } - - return nil -} - -func MustCreateDaemonset(ctx context.Context, daemonsets typedappsv1.DaemonSetInterface, ds appsv1.DaemonSet) error { - if err := mustDeleteDaemonset(ctx, daemonsets, ds); err != nil { - return err - } - log.Printf("Creating Daemonset %v", ds.Name) - if _, err := daemonsets.Create(ctx, &ds, metav1.CreateOptions{}); err != nil { - return err - } - - return nil -} - -func MustCreateDeployment(ctx context.Context, deployments typedappsv1.DeploymentInterface, d appsv1.Deployment) error { - if err := mustDeleteDeployment(ctx, deployments, d); err != nil { - return err - } - log.Printf("Creating Deployment %v", d.Name) - if _, err := deployments.Create(ctx, &d, metav1.CreateOptions{}); err != nil { - return err - } - - return nil -} - -func mustCreateServiceAccount(ctx context.Context, svcAccounts typedcorev1.ServiceAccountInterface, s corev1.ServiceAccount) error { - if err := svcAccounts.Delete(ctx, s.Name, metav1.DeleteOptions{}); err != nil { - if !apierrors.IsNotFound(err) { - return err - } - } - log.Printf("Creating ServiceAccount %v", s.Name) - if _, err := svcAccounts.Create(ctx, &s, metav1.CreateOptions{}); err != nil { - return err - } - - return nil -} - -func mustCreateClusterRole(ctx context.Context, clusterRoles typedrbacv1.ClusterRoleInterface, cr rbacv1.ClusterRole) error { - if err := clusterRoles.Delete(ctx, cr.Name, metav1.DeleteOptions{}); err != nil { - if !apierrors.IsNotFound(err) { - return err - } - } - log.Printf("Creating ClusterRoles %v", cr.Name) - if _, err := clusterRoles.Create(ctx, &cr, metav1.CreateOptions{}); err != nil { - return err - } - - return nil -} - -func mustCreateClusterRoleBinding(ctx context.Context, crBindings typedrbacv1.ClusterRoleBindingInterface, crb rbacv1.ClusterRoleBinding) error { - if err := crBindings.Delete(ctx, crb.Name, metav1.DeleteOptions{}); err != nil { - if !apierrors.IsNotFound(err) { - return err - } - } - log.Printf("Creating RoleBinding %v", crb.Name) - if _, err := crBindings.Create(ctx, &crb, metav1.CreateOptions{}); err != nil { - return err - } - - return nil -} - -func mustCreateRole(ctx context.Context, rs typedrbacv1.RoleInterface, r rbacv1.Role) error { - if err := rs.Delete(ctx, r.Name, metav1.DeleteOptions{}); err != nil { - if !apierrors.IsNotFound(err) { - return err - } - } - log.Printf("Creating Role %v", r.Name) - if _, err := rs.Create(ctx, &r, metav1.CreateOptions{}); err != nil { - return err - } - - return nil -} - -func mustCreateRoleBinding(ctx context.Context, rbi typedrbacv1.RoleBindingInterface, rb rbacv1.RoleBinding) error { - if err := rbi.Delete(ctx, rb.Name, metav1.DeleteOptions{}); err != nil { - if !apierrors.IsNotFound(err) { - return err - } - } - log.Printf("Creating RoleBinding %v", rb.Name) - if _, err := rbi.Create(ctx, &rb, metav1.CreateOptions{}); err != nil { - return err - } - - return nil -} - -func mustCreateConfigMap(ctx context.Context, cmi typedcorev1.ConfigMapInterface, cm corev1.ConfigMap) error { - if err := cmi.Delete(ctx, cm.Name, metav1.DeleteOptions{}); err != nil { - if !apierrors.IsNotFound(err) { - return err - } - } - log.Printf("Creating ConfigMap %v", cm.Name) - if _, err := cmi.Create(ctx, &cm, metav1.CreateOptions{}); err != nil { - return err - } - - return nil -} - -func MustScaleDeployment(ctx context.Context, - deploymentsClient typedappsv1.DeploymentInterface, - deployment appsv1.Deployment, - clientset *kubernetes.Clientset, - namespace, - podLabelSelector string, - replicas int, - skipWait bool, -) error { - log.Printf("Scaling deployment %v to %v replicas", deployment.Name, replicas) - err := MustUpdateReplica(ctx, deploymentsClient, deployment.Name, int32(replicas)) - if err != nil { - return err - } - - if !skipWait { - log.Printf("Waiting for pods to be ready..") - err = WaitForPodDeployment(ctx, clientset, namespace, deployment.Name, podLabelSelector, replicas) - if err != nil { - return err - } - } - return nil -} - -func MustCreateNamespace(ctx context.Context, clienset *kubernetes.Clientset, namespace string) error { - _, err := clienset.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - }, - }, metav1.CreateOptions{}) - - if err != nil { - return errors.Wrapf(err, "failed to create namespace %v", namespace) - } - return nil -} diff --git a/test/internal/k8sutils/label.go b/test/internal/kubernetes/label.go similarity index 69% rename from test/internal/k8sutils/label.go rename to test/internal/kubernetes/label.go index 51079c43a0..6c2555e537 100644 --- a/test/internal/k8sutils/label.go +++ b/test/internal/kubernetes/label.go @@ -1,9 +1,10 @@ -package k8sutils +package kubernetes import ( "context" "encoding/json" + "github.com/pkg/errors" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -24,8 +25,13 @@ func AddNodeLabels(ctx context.Context, nodes corev1.NodeInterface, nodeName str bs, err := json.Marshal(mergeData) if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to marshal labels") } - return nodes.Patch(ctx, nodeName, types.MergePatchType, bs, metav1.PatchOptions{}) + node, err := nodes.Patch(ctx, nodeName, types.MergePatchType, bs, metav1.PatchOptions{}) + if err != nil { + return nil, errors.Wrap(err, "failed to path nodes") + } + + return node, nil } diff --git a/test/internal/k8sutils/utils.go b/test/internal/kubernetes/utils.go similarity index 89% rename from test/internal/k8sutils/utils.go rename to test/internal/kubernetes/utils.go index 560695bfb7..57571e11e7 100644 --- a/test/internal/k8sutils/utils.go +++ b/test/internal/kubernetes/utils.go @@ -1,4 +1,4 @@ -package k8sutils +package kubernetes import ( "bytes" @@ -13,8 +13,6 @@ import ( "testing" "time" - // crd "dnc/requestcontroller/kubernetes" - "github.com/Azure/azure-container-networking/test/internal/retry" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -45,11 +43,11 @@ var Kubeconfig = flag.String("test-kubeconfig", filepath.Join(homedir.HomeDir(), func MustGetClientset() (*kubernetes.Clientset, error) { config, err := clientcmd.BuildConfigFromFlags("", *Kubeconfig) if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to build config from flags") } clientset, err := kubernetes.NewForConfig(config) if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to get clientset") } return clientset, nil } @@ -65,14 +63,14 @@ func MustGetRestConfig(t *testing.T) *rest.Config { func mustParseResource(path string, out interface{}) error { f, err := os.Open(path) if err != nil { - return err + return errors.Wrap(err, "failed to open path") } defer func() { _ = f.Close() }() if err := yaml.NewYAMLOrJSONDecoder(f, 0).Decode(out); err != nil { - return err + return errors.Wrap(err, "failed to decode") } - return err + return nil } func MustLabelSwiftNodes(ctx context.Context, t *testing.T, clientset *kubernetes.Clientset, delegatedSubnetID, delegatedSubnetName string) { @@ -85,7 +83,8 @@ func MustLabelSwiftNodes(ctx context.Context, t *testing.T, clientset *kubernete if err != nil { t.Fatalf("could not list nodes: %v", err) } - for _, node := range res.Items { + for index := range res.Items { + node := res.Items[index] _, err := AddNodeLabels(ctx, clientset.CoreV1().Nodes(), node.Name, swiftNodeLabels) if err != nil { t.Fatalf("could not add labels to node: %v", err) @@ -134,15 +133,15 @@ func MustSetUpClusterRBAC(ctx context.Context, clientset *kubernetes.Clientset, log.Print("rbac cleaned up") } - if err = mustCreateServiceAccount(ctx, serviceAccounts, serviceAccount); err != nil { + if err := mustCreateServiceAccount(ctx, serviceAccounts, serviceAccount); err != nil { return cleanupFunc, err } - if err = mustCreateClusterRole(ctx, clusterRoles, clusterRole); err != nil { + if err := mustCreateClusterRole(ctx, clusterRoles, clusterRole); err != nil { return cleanupFunc, err } - if err = mustCreateClusterRoleBinding(ctx, clusterRoleBindings, clusterRoleBinding); err != nil { + if err := mustCreateClusterRoleBinding(ctx, clusterRoleBindings, clusterRoleBinding); err != nil { return cleanupFunc, err } @@ -167,15 +166,11 @@ func MustSetUpRBAC(ctx context.Context, clientset *kubernetes.Clientset, rolePat roles := clientset.RbacV1().Roles(role.Namespace) roleBindings := clientset.RbacV1().RoleBindings(roleBinding.Namespace) - if err = mustCreateRole(ctx, roles, role); err != nil { + if err := mustCreateRole(ctx, roles, role); err != nil { return err } - if err = mustCreateRoleBinding(ctx, roleBindings, roleBinding); err != nil { - return err - } - - return nil + return mustCreateRoleBinding(ctx, roleBindings, roleBinding) } func MustSetupConfigMap(ctx context.Context, clientset *kubernetes.Clientset, configMapPath string) error { @@ -217,13 +212,15 @@ func WaitForPodsRunning(ctx context.Context, clientset *kubernetes.Clientset, na return errors.New("no pods scheduled") } - for _, pod := range podList.Items { + for index := range podList.Items { + pod := podList.Items[index] if pod.Status.Phase == corev1.PodPending { return errors.New("some pods still pending") } } - for _, pod := range podList.Items { + for index := range podList.Items { + pod := podList.Items[index] if pod.Status.PodIP == "" { return errors.Wrapf(err, "Pod %s/%s has not been allocated an IP yet with reason %s", pod.Namespace, pod.Name, pod.Status.Message) } @@ -233,7 +230,7 @@ func WaitForPodsRunning(ctx context.Context, clientset *kubernetes.Clientset, na } retrier := retry.Retrier{Attempts: RetryAttempts, Delay: RetryDelay} - return retrier.Do(ctx, checkPodIPsFn) + return errors.Wrap(retrier.Do(ctx, checkPodIPsFn), "failed to check if pods were running") } func WaitForPodDeployment(ctx context.Context, clientset *kubernetes.Clientset, namespace, deploymentName, podLabelSelector string, replicas int) error { @@ -321,21 +318,22 @@ func ExportLogsByLabelSelector(ctx context.Context, clientset *kubernetes.Client logExtension := ".log" podList, err := podsClient.List(ctx, metav1.ListOptions{LabelSelector: labelselector}) if err != nil { - return err + return errors.Wrap(err, "failed to list pods") } - for _, pod := range podList.Items { + for index := range podList.Items { + pod := podList.Items[index] req := podsClient.GetLogs(pod.Name, &podLogOpts) podLogs, err := req.Stream(ctx) if err != nil { - return err + return errors.Wrap(err, "failed to get pod logs as stream") } - defer podLogs.Close() buf := new(bytes.Buffer) _, err = io.Copy(buf, podLogs) + podLogs.Close() if err != nil { - return err + return errors.Wrap(err, "failed to copy pod logs") } str := buf.String() err = writeToFile(logDir, pod.Name+logExtension, str) @@ -349,23 +347,25 @@ func ExportLogsByLabelSelector(ctx context.Context, clientset *kubernetes.Client func writeToFile(dir, fileName, str string) error { if _, err := os.Stat(dir); os.IsNotExist(err) { // your dir does not exist - os.MkdirAll(dir, 0o666) + if err := os.MkdirAll(dir, 0o666); err != nil { //nolint + return errors.Wrap(err, "failed to make directory") + } } // open output file f, err := os.Create(dir + fileName) if err != nil { - return err + return errors.Wrap(err, "failed to create output file") } // close fo on exit and check for its returned error defer func() { - if err := f.Close(); err != nil { - panic(err) + if closeErr := f.Close(); closeErr != nil { + panic(closeErr) } }() // If write went ok then err is nil _, err = f.WriteString(str) - return err + return errors.Wrap(err, "failed to write string") } func ExecCmdOnPod(ctx context.Context, clientset *kubernetes.Clientset, namespace, podName string, cmd []string, config *rest.Config) ([]byte, error) { diff --git a/test/internal/kubernetes/utils_create.go b/test/internal/kubernetes/utils_create.go new file mode 100644 index 0000000000..988a7e58bc --- /dev/null +++ b/test/internal/kubernetes/utils_create.go @@ -0,0 +1,494 @@ +package kubernetes + +import ( + "context" + "fmt" + "log" + "os" + "strconv" + + "github.com/pkg/errors" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + typedappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + typedrbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" +) + +type cnsScenario struct { + initContainerArgs []string + volumes []corev1.Volume + initContainerVolumeMounts []corev1.VolumeMount + containerVolumeMounts []corev1.VolumeMount + configMapPath string +} + +const ( + envTestDropgz = "TEST_DROPGZ" + envCNIDropgzVersion = "CNI_DROPGZ_VERSION" + envCNSVersion = "CNS_VERSION" + EnvInstallCNS = "INSTALL_CNS" + envInstallAzilium = "INSTALL_AZILIUM" + envInstallAzureVnet = "INSTALL_AZURE_VNET" + envInstallOverlay = "INSTALL_OVERLAY" + envInstallAzureCNIOverlay = "INSTALL_AZURE_CNI_OVERLAY" + envInstallDualStackOverlay = "INSTALL_DUALSTACK_OVERLAY" + + // relative cns manifest paths + cnsManifestFolder = "manifests/cns" + cnsConfigFolder = "manifests/cnsconfig" + cnsDaemonSetPath = cnsManifestFolder + "/daemonset.yaml" + cnsClusterRolePath = cnsManifestFolder + "/clusterrole.yaml" + cnsClusterRoleBindingPath = cnsManifestFolder + "/clusterrolebinding.yaml" + cnsSwiftConfigMapPath = cnsConfigFolder + "/swiftconfigmap.yaml" + cnsCiliumConfigMapPath = cnsConfigFolder + "/ciliumconfigmap.yaml" + cnsOverlayConfigMapPath = cnsConfigFolder + "/overlayconfigmap.yaml" + cnsAzureCNIOverlayConfigMapPath = cnsConfigFolder + "/azurecnioverlayconfigmap.yaml" + cnsRolePath = cnsManifestFolder + "/role.yaml" + cnsRoleBindingPath = cnsManifestFolder + "/rolebinding.yaml" + cnsServiceAccountPath = cnsManifestFolder + "/serviceaccount.yaml" + cnsLabelSelector = "k8s-app=azure-cns" +) + +var ( + ErrUnsupportedCNSScenario = errors.New("Unsupported CNS scenario") + cnsScenarioMap = map[string]cnsScenario{ + envInstallAzureVnet: { + initContainerArgs: []string{ + "deploy", "azure-vnet", "-o", "/opt/cni/bin/azure-vnet", "azure-vnet-telemetry", + "-o", "/opt/cni/bin/azure-vnet-telemetry", "azure-vnet-ipam", "-o", "/opt/cni/bin/azure-vnet-ipam", + "azure-swift.conflist", "-o", "/etc/cni/net.d/10-azure.conflist", + }, + configMapPath: cnsSwiftConfigMapPath, + }, + envInstallAzilium: { + initContainerArgs: []string{ + "deploy", "azure-ipam", "-o", "/opt/cni/bin/azure-ipam", + }, + configMapPath: cnsCiliumConfigMapPath, + }, + envInstallOverlay: { + initContainerArgs: []string{"deploy", "azure-ipam", "-o", "/opt/cni/bin/azure-ipam"}, + configMapPath: cnsOverlayConfigMapPath, + }, + envInstallAzureCNIOverlay: { + initContainerArgs: []string{ + "deploy", "azure-vnet", "-o", "/opt/cni/bin/azure-vnet", "azure-vnet-telemetry", "-o", "/opt/cni/bin/azure-vnet-telemetry", + }, + volumes: volumesForAzureCNIOverlay(), + initContainerVolumeMounts: dropgzVolumeMountsForAzureCNIOverlay(), + containerVolumeMounts: cnsVolumeMountsForAzureCNIOverlay(), + configMapPath: cnsAzureCNIOverlayConfigMapPath, + }, + envInstallDualStackOverlay: { + initContainerArgs: []string{ + "deploy", "azure-vnet", "-o", "/opt/cni/bin/azure-vnet", + "azure-vnet-telemetry", "-o", "/opt/cni/bin/azure-vnet-telemetry", "azure-vnet-ipam", "-o", + "/opt/cni/bin/azure-vnet-ipam", "azure-swift-overlay-dualstack.conflist", "-o", "/etc/cni/net.d/10-azure.conflist", + }, + configMapPath: cnsSwiftConfigMapPath, + }, + } +) + +func MustCreateOrUpdatePod(ctx context.Context, podI typedcorev1.PodInterface, pod corev1.Pod) error { + if err := MustDeletePod(ctx, podI, pod); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrap(err, "failed to delete pod") + } + } + if _, err := podI.Create(ctx, &pod, metav1.CreateOptions{}); err != nil { + return errors.Wrapf(err, "failed to create pod %v", pod.Name) + } + + return nil +} + +func MustCreateDaemonset(ctx context.Context, daemonsets typedappsv1.DaemonSetInterface, ds appsv1.DaemonSet) error { + if err := mustDeleteDaemonset(ctx, daemonsets, ds); err != nil { + return errors.Wrap(err, "failed to delete daemonset") + } + log.Printf("Creating Daemonset %v", ds.Name) + if _, err := daemonsets.Create(ctx, &ds, metav1.CreateOptions{}); err != nil { + return errors.Wrap(err, "failed to create daemonset") + } + + return nil +} + +func MustCreateDeployment(ctx context.Context, deployments typedappsv1.DeploymentInterface, d appsv1.Deployment) error { + if err := mustDeleteDeployment(ctx, deployments, d); err != nil { + return errors.Wrap(err, "failed to delete deployment") + } + log.Printf("Creating Deployment %v", d.Name) + if _, err := deployments.Create(ctx, &d, metav1.CreateOptions{}); err != nil { + return errors.Wrap(err, "failed to create deployment") + } + + return nil +} + +func mustCreateServiceAccount(ctx context.Context, svcAccounts typedcorev1.ServiceAccountInterface, s corev1.ServiceAccount) error { + if err := svcAccounts.Delete(ctx, s.Name, metav1.DeleteOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrap(err, "failed to delete svc account") + } + } + log.Printf("Creating ServiceAccount %v", s.Name) + if _, err := svcAccounts.Create(ctx, &s, metav1.CreateOptions{}); err != nil { + return errors.Wrap(err, "failed to create svc account") + } + + return nil +} + +func mustCreateClusterRole(ctx context.Context, clusterRoles typedrbacv1.ClusterRoleInterface, cr rbacv1.ClusterRole) error { + if err := clusterRoles.Delete(ctx, cr.Name, metav1.DeleteOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrap(err, "failed to delete cluster role") + } + } + log.Printf("Creating ClusterRoles %v", cr.Name) + if _, err := clusterRoles.Create(ctx, &cr, metav1.CreateOptions{}); err != nil { + return errors.Wrap(err, "failed to create cluster role") + } + + return nil +} + +func mustCreateClusterRoleBinding(ctx context.Context, crBindings typedrbacv1.ClusterRoleBindingInterface, crb rbacv1.ClusterRoleBinding) error { + if err := crBindings.Delete(ctx, crb.Name, metav1.DeleteOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrap(err, "failed to delete cluster role binding") + } + } + log.Printf("Creating RoleBinding %v", crb.Name) + if _, err := crBindings.Create(ctx, &crb, metav1.CreateOptions{}); err != nil { + return errors.Wrap(err, "failed to create role binding") + } + + return nil +} + +func mustCreateRole(ctx context.Context, rs typedrbacv1.RoleInterface, r rbacv1.Role) error { + if err := rs.Delete(ctx, r.Name, metav1.DeleteOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrap(err, "failed to delete role") + } + } + log.Printf("Creating Role %v", r.Name) + if _, err := rs.Create(ctx, &r, metav1.CreateOptions{}); err != nil { + return errors.Wrap(err, "failed to create role") + } + + return nil +} + +func mustCreateRoleBinding(ctx context.Context, rbi typedrbacv1.RoleBindingInterface, rb rbacv1.RoleBinding) error { + if err := rbi.Delete(ctx, rb.Name, metav1.DeleteOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrap(err, "failed to delete role binding") + } + } + log.Printf("Creating RoleBinding %v", rb.Name) + if _, err := rbi.Create(ctx, &rb, metav1.CreateOptions{}); err != nil { + return errors.Wrap(err, "failed to create role binding") + } + + return nil +} + +func mustCreateConfigMap(ctx context.Context, cmi typedcorev1.ConfigMapInterface, cm corev1.ConfigMap) error { + if err := cmi.Delete(ctx, cm.Name, metav1.DeleteOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + return errors.Wrap(err, "failed to delete configmap") + } + } + log.Printf("Creating ConfigMap %v", cm.Name) + if _, err := cmi.Create(ctx, &cm, metav1.CreateOptions{}); err != nil { + return errors.Wrap(err, "failed to create configmap") + } + + return nil +} + +func MustScaleDeployment(ctx context.Context, + deploymentsClient typedappsv1.DeploymentInterface, + deployment appsv1.Deployment, + clientset *kubernetes.Clientset, + namespace, + podLabelSelector string, + replicas int, + skipWait bool, +) error { + log.Printf("Scaling deployment %v to %v replicas", deployment.Name, replicas) + err := MustUpdateReplica(ctx, deploymentsClient, deployment.Name, int32(replicas)) + if err != nil { + return errors.Wrap(err, "failed to scale deployment") + } + + if !skipWait { + log.Printf("Waiting for pods to be ready..") + err = WaitForPodDeployment(ctx, clientset, namespace, deployment.Name, podLabelSelector, replicas) + if err != nil { + return errors.Wrap(err, "failed to wait for pod deployment") + } + } + return nil +} + +func MustCreateNamespace(ctx context.Context, clienset *kubernetes.Clientset, namespace string) error { + _, err := clienset.CoreV1().Namespaces().Create(ctx, &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + }, metav1.CreateOptions{}) + if err != nil { + return errors.Wrapf(err, "failed to create namespace %v", namespace) + } + return nil +} + +func InstallCNSDaemonset(ctx context.Context, clientset *kubernetes.Clientset, logDir string) (func() error, error) { + cniDropgzVersion := os.Getenv(envCNIDropgzVersion) + cnsVersion := os.Getenv(envCNSVersion) + + cns, err := loadCNSDaemonset(ctx, clientset, cnsVersion, cniDropgzVersion) + if err != nil { + return nil, errors.Wrap(err, "failed to load CNS daemonset") + } + + cleanupds := func() error { + if err := ExportLogsByLabelSelector(ctx, clientset, cns.Namespace, cnsLabelSelector, logDir); err != nil { + return errors.Wrapf(err, "failed to export logs by label selector %s", cnsLabelSelector) + } + return nil + } + + return cleanupds, nil +} + +func hostPathTypePtr(h corev1.HostPathType) *corev1.HostPathType { + return &h +} + +func volumesForAzureCNIOverlay() []corev1.Volume { + return []corev1.Volume{ + { + Name: "log", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/var/log/azure-cns", + Type: hostPathTypePtr(corev1.HostPathDirectoryOrCreate), + }, + }, + }, + { + Name: "cns-state", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/var/lib/azure-network", + Type: hostPathTypePtr(corev1.HostPathDirectoryOrCreate), + }, + }, + }, + { + Name: "cni-bin", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/opt/cni/bin", + Type: hostPathTypePtr(corev1.HostPathDirectory), + }, + }, + }, + { + Name: "azure-vnet", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/var/run/azure-vnet", + Type: hostPathTypePtr(corev1.HostPathDirectoryOrCreate), + }, + }, + }, + { + Name: "cni-lock", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/var/lock/azure-vnet", + Type: hostPathTypePtr(corev1.HostPathDirectoryOrCreate), + }, + }, + }, + { + Name: "legacy-cni-state", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/var/run/azure-vnet.json", + Type: hostPathTypePtr(corev1.HostPathFileOrCreate), + }, + }, + }, + { + Name: "cni-conflist", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/etc/cni/net.d", + Type: hostPathTypePtr(corev1.HostPathDirectory), + }, + }, + }, + { + Name: "cns-config", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "cns-config", + }, + }, + }, + }, + } +} + +func dropgzVolumeMountsForAzureCNIOverlay() []corev1.VolumeMount { + return []corev1.VolumeMount{ + { + Name: "cni-bin", + MountPath: "/opt/cni/bin", + }, + } +} + +func cnsVolumeMountsForAzureCNIOverlay() []corev1.VolumeMount { + return []corev1.VolumeMount{ + { + Name: "log", + MountPath: "/var/log", + }, + { + Name: "cns-state", + MountPath: "/var/lib/azure-network", + }, + { + Name: "cns-config", + MountPath: "/etc/azure-cns", + }, + { + Name: "cni-bin", + MountPath: "/opt/cni/bin", + }, + { + Name: "azure-vnet", + MountPath: "/var/run/azure-vnet", + }, + { + Name: "cni-lock", + MountPath: "/var/lock/azure-vnet", + }, + { + Name: "legacy-cni-state", + MountPath: "/var/run/azure-vnet.json", + }, + { + Name: "cni-conflist", + MountPath: "/etc/cni/net.d", + }, + } +} + +func loadCNSDaemonset(ctx context.Context, clientset *kubernetes.Clientset, cnsVersion, cniDropgzVersion string) (appsv1.DaemonSet, error) { + cns, err := MustParseDaemonSet(cnsDaemonSetPath) + if err != nil { + return appsv1.DaemonSet{}, errors.Wrapf(err, "failed to parse daemonset") + } + + image, _ := ParseImageString(cns.Spec.Template.Spec.Containers[0].Image) + cns.Spec.Template.Spec.Containers[0].Image = GetImageString(image, cnsVersion) + + log.Printf("Checking environment scenario") + cns = loadDropgzImage(cns, cniDropgzVersion) + + for cnsScenario := range cnsScenarioMap { + cns, err = setupCNSDaemonset(ctx, clientset, cns, cnsScenario) + if err != nil { + return appsv1.DaemonSet{}, errors.Wrap(err, fmt.Sprintf("failed to setup %s cns scenario", cnsScenario)) + } + } + + cnsDaemonsetClient := clientset.AppsV1().DaemonSets(cns.Namespace) + + log.Printf("Installing CNS with image %s", cns.Spec.Template.Spec.Containers[0].Image) + + // setup common RBAC, ClusteerRole, ClusterRoleBinding, ServiceAccount + if _, err := MustSetUpClusterRBAC(ctx, clientset, cnsClusterRolePath, cnsClusterRoleBindingPath, cnsServiceAccountPath); err != nil { + return appsv1.DaemonSet{}, errors.Wrap(err, "failed to setup common RBAC, ClusteerRole, ClusterRoleBinding and ServiceAccount") + } + + // setup RBAC, Role, RoleBinding + if err := MustSetUpRBAC(ctx, clientset, cnsRolePath, cnsRoleBindingPath); err != nil { + return appsv1.DaemonSet{}, errors.Wrap(err, "failed to setup RBAC, Role and RoleBinding") + } + + if err := MustCreateDaemonset(ctx, cnsDaemonsetClient, cns); err != nil { + return appsv1.DaemonSet{}, errors.Wrap(err, "failed to create daemonset") + } + + if err := WaitForPodDaemonset(ctx, clientset, cns.Namespace, cns.Name, cnsLabelSelector); err != nil { + return appsv1.DaemonSet{}, errors.Wrap(err, "failed to check daemonset running") + } + + return cns, nil +} + +func setupCNSDaemonset(ctx context.Context, clientset *kubernetes.Clientset, cns appsv1.DaemonSet, flag string) (appsv1.DaemonSet, error) { + cnsScenarioConfig, ok := cnsScenarioMap[flag] + if !ok { + return cns, errors.Wrap(ErrUnsupportedCNSScenario, fmt.Sprintf("%s not a supported cns scneario", flag)) + } + + flagValue := os.Getenv(flag) + + if scenario, err := strconv.ParseBool(flagValue); err == nil && scenario { + log.Printf("Env %v set to true", flag) + + // override init container args + cns.Spec.Template.Spec.InitContainers[0].Args = cnsScenarioConfig.initContainerArgs + + // override the volumes and volume mounts (if present) + if len(cnsScenarioConfig.volumes) > 0 { + cns.Spec.Template.Spec.Volumes = cnsScenarioConfig.volumes + } + if len(cnsScenarioConfig.initContainerVolumeMounts) > 0 { + cns.Spec.Template.Spec.InitContainers[0].VolumeMounts = cnsScenarioConfig.initContainerVolumeMounts + } + if len(cnsScenarioConfig.containerVolumeMounts) > 0 { + cns.Spec.Template.Spec.Containers[0].VolumeMounts = cnsScenarioConfig.containerVolumeMounts + } + + // setup the CNS configmap + if err := MustSetupConfigMap(ctx, clientset, cnsScenarioConfig.configMapPath); err != nil { + return cns, errors.Wrap(err, fmt.Sprintf("failed to setup CNS %s configMap", cnsScenarioConfig.configMapPath)) + } + } else { + log.Printf("Env %v not set to true, skipping", flag) + } + return cns, nil +} + +func loadDropgzImage(cns appsv1.DaemonSet, dropgzVersion string) appsv1.DaemonSet { + installFlag := os.Getenv(envTestDropgz) + if testDropgzScenario, err := strconv.ParseBool(installFlag); err == nil && testDropgzScenario { + log.Printf("Env %v set to true, deploy cniTest.Dockerfile", envTestDropgz) + initImage, _ := ParseImageString("acnpublic.azurecr.io/cni-dropgz-test:latest") + cns.Spec.Template.Spec.InitContainers[0].Image = GetImageString(initImage, dropgzVersion) + } else { + log.Printf("Env %v not set to true, deploying cni.Dockerfile", envTestDropgz) + initImage, _ := ParseImageString(cns.Spec.Template.Spec.InitContainers[0].Image) + cns.Spec.Template.Spec.InitContainers[0].Image = GetImageString(initImage, dropgzVersion) + } + return cns +} diff --git a/test/internal/k8sutils/utils_delete.go b/test/internal/kubernetes/utils_delete.go similarity index 91% rename from test/internal/k8sutils/utils_delete.go rename to test/internal/kubernetes/utils_delete.go index 1032b406eb..ef0bc481fb 100644 --- a/test/internal/k8sutils/utils_delete.go +++ b/test/internal/kubernetes/utils_delete.go @@ -1,4 +1,4 @@ -package k8sutils +package kubernetes import ( "context" @@ -25,7 +25,7 @@ func MustDeletePod(ctx context.Context, podI typedcorev1.PodInterface, pod corev func mustDeleteDaemonset(ctx context.Context, daemonsets typedappsv1.DaemonSetInterface, ds appsv1.DaemonSet) error { if err := daemonsets.Delete(ctx, ds.Name, metav1.DeleteOptions{}); err != nil { if !apierrors.IsNotFound(err) { - return err + return errors.Wrap(err, "failed to delete daemonset") } } @@ -35,7 +35,7 @@ func mustDeleteDaemonset(ctx context.Context, daemonsets typedappsv1.DaemonSetIn func mustDeleteDeployment(ctx context.Context, deployments typedappsv1.DeploymentInterface, d appsv1.Deployment) error { if err := deployments.Delete(ctx, d.Name, metav1.DeleteOptions{}); err != nil { if !apierrors.IsNotFound(err) { - return err + return errors.Wrap(err, "failed to delete deployment") } } diff --git a/test/internal/k8sutils/utils_get.go b/test/internal/kubernetes/utils_get.go similarity index 95% rename from test/internal/k8sutils/utils_get.go rename to test/internal/kubernetes/utils_get.go index 6c1ff2b0e6..ce4e569362 100644 --- a/test/internal/k8sutils/utils_get.go +++ b/test/internal/kubernetes/utils_get.go @@ -1,4 +1,4 @@ -package k8sutils +package kubernetes import ( "context" @@ -41,7 +41,7 @@ func GetPodsByNode(ctx context.Context, clientset *kubernetes.Clientset, namespa func GetPodsIpsByNode(ctx context.Context, clientset *kubernetes.Clientset, namespace, labelselector, nodeName string) ([]string, error) { pods, err := GetPodsByNode(ctx, clientset, namespace, labelselector, nodeName) if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to get pods by node") } ips := make([]string, 0, len(pods.Items)*2) //nolint for index := range pods.Items { diff --git a/test/internal/k8sutils/utils_parse.go b/test/internal/kubernetes/utils_parse.go similarity index 98% rename from test/internal/k8sutils/utils_parse.go rename to test/internal/kubernetes/utils_parse.go index 9113788d35..a69c360a7a 100644 --- a/test/internal/k8sutils/utils_parse.go +++ b/test/internal/kubernetes/utils_parse.go @@ -1,4 +1,4 @@ -package k8sutils +package kubernetes import ( appsv1 "k8s.io/api/apps/v1" diff --git a/test/validate/utils.go b/test/validate/utils.go index 4c81fe145a..70d4d699d9 100644 --- a/test/validate/utils.go +++ b/test/validate/utils.go @@ -4,7 +4,7 @@ import ( "context" "reflect" - "github.com/Azure/azure-container-networking/test/internal/k8sutils" + acnk8s "github.com/Azure/azure-container-networking/test/internal/kubernetes" corev1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" ) @@ -26,7 +26,7 @@ func compareIPs(expected map[string]string, actual []string) bool { // func to get the pods ip without the node ip (ie. host network as false) func getPodIPsWithoutNodeIP(ctx context.Context, clientset *kubernetes.Clientset, node corev1.Node) []string { podsIpsWithoutNodeIP := []string{} - podIPs, err := k8sutils.GetPodsIpsByNode(ctx, clientset, "", "", node.Name) + podIPs, err := acnk8s.GetPodsIpsByNode(ctx, clientset, "", "", node.Name) if err != nil { return podsIpsWithoutNodeIP } diff --git a/test/validate/validate.go b/test/validate/validate.go index d5e5f851c0..cf91abd73f 100644 --- a/test/validate/validate.go +++ b/test/validate/validate.go @@ -4,7 +4,7 @@ import ( "context" "log" - k8sutils "github.com/Azure/azure-container-networking/test/internal/k8sutils" + acnk8s "github.com/Azure/azure-container-networking/test/internal/kubernetes" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" @@ -46,16 +46,16 @@ type check struct { func CreateValidator(ctx context.Context, clientset *kubernetes.Clientset, config *rest.Config, namespace, cni string, restartCase bool, os string) (*Validator, error) { // deploy privileged pod - privilegedDaemonSet, err := k8sutils.MustParseDaemonSet(privilegedDaemonSetPathMap[os]) + privilegedDaemonSet, err := acnk8s.MustParseDaemonSet(privilegedDaemonSetPathMap[os]) if err != nil { return nil, errors.Wrap(err, "unable to parse daemonset") } daemonsetClient := clientset.AppsV1().DaemonSets(privilegedNamespace) - if err := k8sutils.MustCreateDaemonset(ctx, daemonsetClient, privilegedDaemonSet); err != nil { + if err := acnk8s.MustCreateDaemonset(ctx, daemonsetClient, privilegedDaemonSet); err != nil { return nil, errors.Wrap(err, "unable to create daemonset") } // Ensures that pods have been replaced if test is re-run after failure - if err := k8sutils.WaitForPodDaemonset(ctx, clientset, privilegedNamespace, privilegedDaemonSet.Name, privilegedLabelSelector); err != nil { + if err := acnk8s.WaitForPodDaemonset(ctx, clientset, privilegedNamespace, privilegedDaemonSet.Name, privilegedLabelSelector); err != nil { return nil, errors.Wrap(err, "unable to wait for daemonset") } @@ -109,25 +109,25 @@ func (v *Validator) ValidateStateFile(ctx context.Context) error { } func (v *Validator) ValidateRestartNetwork(ctx context.Context) error { - nodes, err := k8sutils.GetNodeList(ctx, v.clientset) + nodes, err := acnk8s.GetNodeList(ctx, v.clientset) if err != nil { return errors.Wrapf(err, "failed to get node list") } for index := range nodes.Items { // get the privileged pod - pod, err := k8sutils.GetPodsByNode(ctx, v.clientset, privilegedNamespace, privilegedLabelSelector, nodes.Items[index].Name) + pod, err := acnk8s.GetPodsByNode(ctx, v.clientset, privilegedNamespace, privilegedLabelSelector, nodes.Items[index].Name) if err != nil { return errors.Wrapf(err, "failed to get privileged pod") } privelegedPod := pod.Items[0] // exec into the pod to get the state file - _, err = k8sutils.ExecCmdOnPod(ctx, v.clientset, privilegedNamespace, privelegedPod.Name, restartNetworkCmd, v.config) + _, err = acnk8s.ExecCmdOnPod(ctx, v.clientset, privilegedNamespace, privelegedPod.Name, restartNetworkCmd, v.config) if err != nil { return errors.Wrapf(err, "failed to exec into privileged pod - %s", privelegedPod.Name) } - err = k8sutils.WaitForPodsRunning(ctx, v.clientset, "", "") + err = acnk8s.WaitForPodsRunning(ctx, v.clientset, "", "") if err != nil { return errors.Wrapf(err, "failed to wait for pods running") } @@ -137,20 +137,20 @@ func (v *Validator) ValidateRestartNetwork(ctx context.Context) error { func (v *Validator) validateIPs(ctx context.Context, stateFileIps stateFileIpsFunc, cmd []string, checkType, namespace, labelSelector string) error { log.Printf("Validating %s state file", checkType) - nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, v.clientset, nodeSelectorMap[v.os]) + nodes, err := acnk8s.GetNodeListByLabelSelector(ctx, v.clientset, nodeSelectorMap[v.os]) if err != nil { return errors.Wrapf(err, "failed to get node list") } for index := range nodes.Items { // get the privileged pod - pod, err := k8sutils.GetPodsByNode(ctx, v.clientset, namespace, labelSelector, nodes.Items[index].Name) + pod, err := acnk8s.GetPodsByNode(ctx, v.clientset, namespace, labelSelector, nodes.Items[index].Name) if err != nil { return errors.Wrapf(err, "failed to get privileged pod") } podName := pod.Items[0].Name // exec into the pod to get the state file - result, err := k8sutils.ExecCmdOnPod(ctx, v.clientset, namespace, podName, cmd, v.config) + result, err := acnk8s.ExecCmdOnPod(ctx, v.clientset, namespace, podName, cmd, v.config) if err != nil { return errors.Wrapf(err, "failed to exec into privileged pod - %s", podName) } @@ -177,7 +177,7 @@ func (v *Validator) validateIPs(ctx context.Context, stateFileIps stateFileIpsFu func (v *Validator) validateDualStackNodeProperties(ctx context.Context) error { log.Print("Validating Dualstack Overlay Node properties") - nodes, err := k8sutils.GetNodeListByLabelSelector(ctx, v.clientset, nodeSelectorMap[v.os]) + nodes, err := acnk8s.GetNodeListByLabelSelector(ctx, v.clientset, nodeSelectorMap[v.os]) if err != nil { return errors.Wrapf(err, "failed to get node list") }