diff --git a/multicluster/test/e2e/fixtures.go b/multicluster/test/e2e/fixtures.go index 25942e6d220..c8b472910ef 100644 --- a/multicluster/test/e2e/fixtures.go +++ b/multicluster/test/e2e/fixtures.go @@ -26,7 +26,7 @@ func createDirectory(path string) error { return os.Mkdir(path, 0700) } -func (data *TestData) setupLogDirectoryForTest(testName string) error { +func (data *MCTestData) setupLogDirectoryForTest(testName string) error { path := filepath.Join(testOptions.logsExportDir, testName) // remove directory if it already exists. This ensures that we start with an empty // directory @@ -39,7 +39,7 @@ func (data *TestData) setupLogDirectoryForTest(testName string) error { return nil } -func setupTest(tb testing.TB) (*TestData, error) { +func setupTest(tb testing.TB) (*MCTestData, error) { if err := testData.setupLogDirectoryForTest(tb.Name()); err != nil { tb.Errorf("Error creating logs directory '%s': %v", testData.logsDirForTestCase, err) return nil, err @@ -51,7 +51,7 @@ func setupTest(tb testing.TB) (*TestData, error) { } }() tb.Logf("Creating '%s' K8s Namespace", multiClusterTestNamespace) - if err := testData.createTestNamespace(); err != nil { + if err := testData.createTestNamespaces(); err != nil { return nil, err } @@ -59,13 +59,13 @@ func setupTest(tb testing.TB) (*TestData, error) { return testData, nil } -func teardownTest(tb testing.TB, data *TestData) { +func teardownTest(tb testing.TB, data *MCTestData) { if empty, _ := IsDirEmpty(data.logsDirForTestCase); empty { _ = os.Remove(data.logsDirForTestCase) } } -func createPodWrapper(tb testing.TB, data *TestData, cluster string, namespace string, name string, image string, ctr string, command []string, +func createPodWrapper(tb testing.TB, data *MCTestData, cluster string, namespace string, name string, image string, ctr string, command []string, args []string, env []corev1.EnvVar, ports []corev1.ContainerPort, hostNetwork bool, mutateFunc func(pod *corev1.Pod)) error { tb.Logf("Creating Pod '%s'", name) if err := data.createPod(cluster, name, namespace, ctr, image, command, args, env, ports, hostNetwork, mutateFunc); err != nil { @@ -79,14 +79,14 @@ func createPodWrapper(tb testing.TB, data *TestData, cluster string, namespace s return err } -func deletePodWrapper(tb testing.TB, data *TestData, clusterName string, namespace string, name string) { +func deletePodWrapper(tb testing.TB, data *MCTestData, clusterName string, namespace string, name string) { tb.Logf("Deleting Pod '%s'", name) if err := data.deletePod(clusterName, namespace, name); err != nil { tb.Logf("Error when deleting Pod: %v", err) } } -func deleteServiceWrapper(tb testing.TB, data *TestData, clusterName string, namespace string, name string) { +func deleteServiceWrapper(tb testing.TB, data *MCTestData, clusterName string, namespace string, name string) { tb.Logf("Deleting Service '%s'", name) if err := data.deleteService(clusterName, namespace, name); err != nil { tb.Logf("Error when deleting Service: %v", err) diff --git a/multicluster/test/e2e/framework.go b/multicluster/test/e2e/framework.go index 521a5604cb6..8fb3ed433ca 100644 --- a/multicluster/test/e2e/framework.go +++ b/multicluster/test/e2e/framework.go @@ -15,8 +15,6 @@ package e2e import ( - "bytes" - "context" "fmt" "math/rand" "os" @@ -25,18 +23,8 @@ import ( log "github.com/sirupsen/logrus" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - restclient "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/remotecommand" crdv1alpha1 "antrea.io/antrea/pkg/apis/crd/v1alpha1" - crdclientset "antrea.io/antrea/pkg/client/clientset/versioned" antreae2e "antrea.io/antrea/test/e2e" "antrea.io/antrea/test/e2e/providers" ) @@ -47,7 +35,6 @@ var ( const ( defaultTimeout = 90 * time.Second - defaultInterval = 1 * time.Second importServiceDelay = 10 * time.Second multiClusterTestNamespace string = "antrea-multicluster-test" @@ -75,24 +62,15 @@ type TestOptions struct { var testOptions TestOptions -type TestData struct { - kubeconfigs map[string]*restclient.Config - clients map[string]kubernetes.Interface - crdClients map[string]crdclientset.Interface - clusters []string - +type MCTestData struct { + clusters []string + clusterTestDataMap map[string]antreae2e.TestData logsDirForTestCase string } -var testData *TestData - -func (data *TestData) createClients() error { - data.clients = make(map[string]kubernetes.Interface) - data.kubeconfigs = make(map[string]*restclient.Config) - data.crdClients = make(map[string]crdclientset.Interface) - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - configOverrides := &clientcmd.ConfigOverrides{} +var testData *MCTestData +func (data *MCTestData) createClients() error { kubeConfigPaths := []string{ testOptions.leaderClusterKubeConfigPath, testOptions.eastClusterKubeConfigPath, @@ -101,106 +79,130 @@ func (data *TestData) createClients() error { data.clusters = []string{ leaderCluster, eastCluster, westCluster, } + data.clusterTestDataMap = map[string]antreae2e.TestData{} for i, cluster := range data.clusters { - loadingRules.ExplicitPath = kubeConfigPaths[i] - kubeConfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides).ClientConfig() - if err != nil { - return fmt.Errorf("error when building kube config of cluster %s: %v", cluster, err) - } - clusterClient, err := kubernetes.NewForConfig(kubeConfig) - if err != nil { - return fmt.Errorf("error when creating Kubernetes client of cluster %s: %v", cluster, err) + testData := antreae2e.TestData{} + if err := testData.CreateClient(kubeConfigPaths[i]); err != nil { + return fmt.Errorf("error initializing clients for cluster %s: %v", cluster, err) } - crdClient, err := crdclientset.NewForConfig(kubeConfig) - if err != nil { - return fmt.Errorf("error when creating crd client of cluster %s: %v", cluster, err) + data.clusterTestDataMap[cluster] = testData + } + return nil +} + +func (data *MCTestData) initProviders() error { + for cluster, d := range data.clusterTestDataMap { + if err := d.InitProvider("remote", "multicluster"); err != nil { + log.Errorf("Failed to initialize provider for cluster %s", cluster) + return err } - data.kubeconfigs[cluster] = kubeConfig - data.clients[cluster] = clusterClient - data.crdClients[cluster] = crdClient } + provider, _ = providers.NewRemoteProvider("multicluster") return nil } -func (data *TestData) createTestNamespace() error { - for _, client := range data.clients { - if err := createNamespace(client, multiClusterTestNamespace, nil); err != nil { +func (data *MCTestData) createTestNamespaces() error { + for cluster, d := range data.clusterTestDataMap { + if err := d.CreateNamespace(multiClusterTestNamespace, nil); err != nil { + log.Errorf("Failed to create test namespace for cluster %s", cluster) return err } } return nil } -func (data *TestData) deletePod(clusterName string, namespace string, name string) error { - client := data.getClientOfCluster(clusterName) - return deletePod(client, namespace, name) +func (data *MCTestData) deletePod(clusterName string, namespace string, name string) error { + if d, ok := data.clusterTestDataMap[clusterName]; ok { + if err := d.DeletePod(namespace, name); err != nil { + return err + } + } + return nil } -func (data *TestData) deleteService(clusterName string, namespace string, name string) error { - client := data.getClientOfCluster(clusterName) - return deleteService(client, namespace, name) +func (data *MCTestData) deleteService(clusterName string, namespace string, name string) error { + if d, ok := data.clusterTestDataMap[clusterName]; ok { + if err := d.DeleteService(namespace, name); err != nil { + return err + } + } + return nil } -func (data *TestData) deleteTestNamespace(timeout time.Duration) error { - return data.deleteNamespaceInAllClusters(multiClusterTestNamespace, timeout) +func (data *MCTestData) getService(clusterName string, namespace string, name string) (*corev1.Service, error) { + if d, ok := data.clusterTestDataMap[clusterName]; ok { + return d.GetService(namespace, name) + } + return nil, fmt.Errorf("clusterName %s not found", clusterName) } -func (data *TestData) deleteNamespace(clusterName string, namespace string, timeout time.Duration) error { - client := data.getClientOfCluster(clusterName) - return deleteNamespace(client, namespace, timeout) +func (data *MCTestData) deleteTestNamespaces(timeout time.Duration) error { + var failedClusters []string + for cluster, d := range data.clusterTestDataMap { + if err := d.DeleteNamespace(multiClusterTestNamespace, timeout); err != nil { + failedClusters = append(failedClusters, cluster) + } + } + if len(failedClusters) > 0 { + return fmt.Errorf("failed to delete mcs test namespace in clusters %v", failedClusters) + } + return nil } -func (data *TestData) deleteNamespaceInAllClusters(namespace string, timeout time.Duration) error { - for _, client := range data.clients { - if err := deleteNamespace(client, namespace, timeout); err != nil { +func (data *MCTestData) deleteNamespace(clusterName string, namespace string, timeout time.Duration) error { + if d, ok := data.clusterTestDataMap[clusterName]; ok { + if err := d.DeleteNamespace(namespace, timeout); err != nil { return err } } return nil } -func (data *TestData) createPod(clusterName string, name string, namespace string, ctrName string, image string, command []string, +func (data *MCTestData) createPod(clusterName string, name string, namespace string, ctrName string, image string, command []string, args []string, env []corev1.EnvVar, ports []corev1.ContainerPort, hostNetwork bool, mutateFunc func(pod *corev1.Pod)) error { - client := data.getClientOfCluster(clusterName) - return createPod(client, name, namespace, ctrName, image, command, args, env, ports, hostNetwork, mutateFunc) -} - -func (data *TestData) getService(clusterName string, namespace string, serviceName string) (*corev1.Service, error) { - client := data.getClientOfCluster(clusterName) - return getService(client, namespace, serviceName) + if d, ok := data.clusterTestDataMap[clusterName]; ok { + if err := d.CreatePodOnNodeInNamespace(name, namespace, "", ctrName, image, command, args, env, ports, hostNetwork, mutateFunc); err != nil { + return err + } + } + return nil } -func (data *TestData) createService(cluster string, serviceName string, namespace string, port int32, targetPort int32, +func (data *MCTestData) createService(clusterName string, serviceName string, namespace string, port int32, targetPort int32, protocol corev1.Protocol, selector map[string]string, affinity bool, nodeLocalExternal bool, serviceType corev1.ServiceType, ipFamily *corev1.IPFamily, annotation map[string]string) (*corev1.Service, error) { - client := data.getClientOfCluster(cluster) - return createService(client, serviceName, namespace, port, targetPort, protocol, selector, affinity, nodeLocalExternal, serviceType, ipFamily, annotation) + if d, ok := data.clusterTestDataMap[clusterName]; ok { + svc, err := d.CreateServiceWithAnnotations(serviceName, namespace, port, targetPort, protocol, selector, affinity, nodeLocalExternal, serviceType, ipFamily, annotation) + if err != nil { + return nil, err + } + return svc, nil + } + return nil, fmt.Errorf("clusterName %s not found", clusterName) } -func (data *TestData) getClientOfCluster(clusterName string) kubernetes.Interface { - return data.clients[clusterName] +func (data *MCTestData) createOrUpdateANP(clusterName string, anp *crdv1alpha1.NetworkPolicy) (*crdv1alpha1.NetworkPolicy, error) { + if d, ok := data.clusterTestDataMap[clusterName]; ok { + return d.CreateOrUpdateANP(anp) + } + return nil, fmt.Errorf("clusterName %s not found", clusterName) } -func (data *TestData) getCRDClientOfCluster(clusterName string) crdclientset.Interface { - return data.crdClients[clusterName] +// deleteANP is a convenience function for deleting ANP by name and Namespace. +func (data *MCTestData) deleteANP(clusterName string, ns, name string) error { + if d, ok := data.clusterTestDataMap[clusterName]; ok { + return d.DeleteANP(ns, name) + } + return fmt.Errorf("clusterName %s not found", clusterName) } -type PodCondition func(*corev1.Pod) (bool, error) - // podWaitFor polls the K8s apiserver until the specified Pod is found (in the test Namespace) and // the condition predicate is met (or until the provided timeout expires). -func (data *TestData) podWaitFor(timeout time.Duration, clusterName string, name string, namespace string, condition PodCondition) (*corev1.Pod, error) { - client := data.getClientOfCluster(clusterName) - return podWaitFor(client, timeout, name, namespace, condition) -} - -func initProvider() error { - newProvider, err := providers.NewRemoteProvider("multicluster") - if err != nil { - return err +func (data *MCTestData) podWaitFor(timeout time.Duration, clusterName string, name string, namespace string, condition antreae2e.PodCondition) (*corev1.Pod, error) { + if d, ok := data.clusterTestDataMap[clusterName]; ok { + return d.PodWaitFor(timeout, name, namespace, condition) } - provider = newProvider - return nil + return nil, fmt.Errorf("clusterName %s not found", clusterName) } // A DNS-1123 subdomain must consist of lower case alphanumeric characters @@ -220,124 +222,7 @@ func randName(prefix string) string { return prefix + randSeq(nameSuffixLength) } -func createNamespace(client kubernetes.Interface, namespace string, mutateFunc func(namespace2 *corev1.Namespace)) error { - ns := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - }, - } - if mutateFunc != nil { - mutateFunc(ns) - } - - if ns, err := client.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}); err != nil { - // Ignore error if the namespace already exists - if !errors.IsAlreadyExists(err) { - return fmt.Errorf("error when creating '%s' Namespace: %v", namespace, err) - } - // When namespace already exists, check phase - if ns.Status.Phase == corev1.NamespaceTerminating { - return fmt.Errorf("error when creating '%s' Namespace: namespace exists but is in 'Terminating' phase", namespace) - } - } - return nil -} - -func deletePod(client kubernetes.Interface, namespace string, name string) error { - var gracePeriodSeconds int64 = 5 - deleteOptions := metav1.DeleteOptions{ - GracePeriodSeconds: &gracePeriodSeconds, - } - - if err := client.CoreV1().Pods(namespace).Delete(context.TODO(), name, deleteOptions); err != nil { - if !errors.IsNotFound(err) { - return err - } - } - - return nil -} - -func deleteService(client kubernetes.Interface, namespace string, name string) error { - var gracePeriodSeconds int64 = 5 - deleteOptions := metav1.DeleteOptions{ - GracePeriodSeconds: &gracePeriodSeconds, - } - - return client.CoreV1().Services(namespace).Delete(context.TODO(), name, deleteOptions) -} - -func deleteNamespace(client kubernetes.Interface, namespace string, timeout time.Duration) error { - var gracePeriodSeconds int64 - var propagationPolicy = metav1.DeletePropagationForeground - deleteOptions := metav1.DeleteOptions{ - GracePeriodSeconds: &gracePeriodSeconds, - PropagationPolicy: &propagationPolicy, - } - - if err := client.CoreV1().Namespaces().Delete(context.TODO(), namespace, deleteOptions); err != nil { - if errors.IsNotFound(err) { - // namespace does not exist, we return right away - return nil - } - return fmt.Errorf("error when deleting '%s' Namespace: %v", namespace, err) - } - err := wait.Poll(defaultInterval, timeout, func() (bool, error) { - if ns, err := client.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{}); err != nil { - if errors.IsNotFound(err) { - // Success - return true, nil - } - return false, fmt.Errorf("error when getting Namespace '%s' after delete: %v", namespace, err) - } else if ns.Status.Phase != corev1.NamespaceTerminating { - return false, fmt.Errorf("deleted Namespace '%s' should be in 'Terminating' phase", namespace) - } - - // Keep trying - return false, nil - }) - - return err -} - -func createPod(client kubernetes.Interface, name string, namespace string, ctrName string, image string, command []string, - args []string, env []corev1.EnvVar, ports []corev1.ContainerPort, hostNetwork bool, mutateFunc func(pod *corev1.Pod)) error { - podSpec := corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: ctrName, - Image: image, - ImagePullPolicy: corev1.PullIfNotPresent, - Command: command, - Args: args, - Env: env, - Ports: ports, - }, - }, - RestartPolicy: corev1.RestartPolicyNever, - HostNetwork: hostNetwork, - } - - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{ - "antrea-multicluster-e2e": name, - "app": ctrName, - }, - }, - Spec: podSpec, - } - - if mutateFunc != nil { - mutateFunc(pod) - } - - _, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) - return err -} - -func (data *TestData) probe( +func (data *MCTestData) probeFromPodInCluster( cluster string, podNamespace string, podName string, @@ -374,156 +259,17 @@ func (data *TestData) probe( if stderr == "" { return antreae2e.Error } - return decideProbeResult(stderr, 3) + return antreae2e.DecideProbeResult(stderr, 3) } return antreae2e.Connected } -// decideProbeResult uses the probe stderr to decide the connectivity. -func decideProbeResult(stderr string, probeNum int) antreae2e.PodConnectivityMark { - countConnected := probeNum - strings.Count(stderr, "\n") - countDropped := strings.Count(stderr, "TIMEOUT") - // For our UDP rejection cases, agnhost will return: - // For IPv4: 'UNKNOWN: read udp [src]->[dst]: read: no route to host' - // For IPv6: 'UNKNOWN: read udp [src]->[dst]: read: permission denied' - // To avoid incorrect identification, we use 'no route to host' and - // `permission denied`, instead of 'UNKNOWN' as key string. - // For our other protocols rejection cases, agnhost will return 'REFUSED'. - countRejected := strings.Count(stderr, "REFUSED") + strings.Count(stderr, "no route to host") + strings.Count(stderr, "permission denied") - - if countRejected == 0 && countConnected > 0 { - return antreae2e.Connected - } - if countConnected == 0 && countRejected > 0 { - return antreae2e.Rejected - } - if countDropped == probeNum { - return antreae2e.Dropped - } - return antreae2e.Error -} - // Run the provided command in the specified Container for the given Pod and returns the contents of // stdout and stderr as strings. An error either indicates that the command couldn't be run or that // the command returned a non-zero error code. -func (data *TestData) runCommandFromPod(cluster, podNamespace, podName, containerName string, cmd []string) (stdout string, stderr string, err error) { - request := data.clients[cluster].CoreV1().RESTClient().Post(). - Namespace(podNamespace). - Resource("pods"). - Name(podName). - SubResource("exec"). - Param("container", containerName). - VersionedParams(&corev1.PodExecOptions{ - Command: cmd, - Stdin: false, - Stdout: true, - Stderr: true, - TTY: false, - }, scheme.ParameterCodec) - exec, err := remotecommand.NewSPDYExecutor(data.kubeconfigs[cluster], "POST", request.URL()) - if err != nil { - return "", "", err +func (data *MCTestData) runCommandFromPod(clusterName, podNamespace, podName, containerName string, cmd []string) (stdout string, stderr string, err error) { + if d, ok := data.clusterTestDataMap[clusterName]; ok { + return d.RunCommandFromPod(podNamespace, podName, containerName, cmd) } - var stdoutB, stderrB bytes.Buffer - if err := exec.Stream(remotecommand.StreamOptions{ - Stdout: &stdoutB, - Stderr: &stderrB, - }); err != nil { - return stdoutB.String(), stderrB.String(), err - } - return stdoutB.String(), stderrB.String(), nil -} - -func createService(client kubernetes.Interface, serviceName string, namespace string, port int32, targetPort int32, - protocol corev1.Protocol, selector map[string]string, affinity bool, nodeLocalExternal bool, serviceType corev1.ServiceType, - ipFamily *corev1.IPFamily, annotation map[string]string) (*corev1.Service, error) { - affinityType := corev1.ServiceAffinityNone - var ipFamilies []corev1.IPFamily - if ipFamily != nil { - ipFamilies = append(ipFamilies, *ipFamily) - } - if affinity { - affinityType = corev1.ServiceAffinityClientIP - } - service := corev1.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceName, - Namespace: namespace, - Labels: map[string]string{ - "antrea-multicluster-e2e": serviceName, - "app": serviceName, - }, - Annotations: annotation, - }, - Spec: corev1.ServiceSpec{ - SessionAffinity: affinityType, - Ports: []corev1.ServicePort{{ - Port: port, - TargetPort: intstr.FromInt(int(targetPort)), - Protocol: protocol, - }}, - Type: serviceType, - Selector: selector, - IPFamilies: ipFamilies, - }, - } - if (serviceType == corev1.ServiceTypeNodePort || serviceType == corev1.ServiceTypeLoadBalancer) && nodeLocalExternal { - service.Spec.ExternalTrafficPolicy = corev1.ServiceExternalTrafficPolicyTypeLocal - } - - return client.CoreV1().Services(namespace).Create(context.TODO(), &service, metav1.CreateOptions{}) -} - -func getService(client kubernetes.Interface, namespace string, serviceName string) (*corev1.Service, error) { - svc, err := client.CoreV1().Services(namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}) - if err != nil { - return nil, fmt.Errorf("Error when Getting service %s/%s", namespace, serviceName) - } - return svc, err -} - -func podWaitFor(client kubernetes.Interface, timeout time.Duration, name string, namespace string, condition PodCondition) (*corev1.Pod, error) { - err := wait.Poll(defaultInterval, timeout, func() (bool, error) { - pod, err := client.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - if errors.IsNotFound(err) { - return false, nil - } - return false, fmt.Errorf("error when getting Pod '%s' in west clsuter: %v", name, err) - } - return condition(pod) - }) - if err != nil { - return nil, err - } - return client.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) -} - -// createOrUpdateANP is a convenience function for updating/creating Antrea NetworkPolicies. -func createOrUpdateANP(crdClient crdclientset.Interface, anp *crdv1alpha1.NetworkPolicy) (*crdv1alpha1.NetworkPolicy, error) { - log.Infof("Creating/updating Antrea NetworkPolicy %s/%s", anp.Namespace, anp.Name) - cnpReturned, err := crdClient.CrdV1alpha1().NetworkPolicies(anp.Namespace).Get(context.TODO(), anp.Name, metav1.GetOptions{}) - if err != nil { - log.Debugf("Creating Antrea NetworkPolicy %s", anp.Name) - anp, err = crdClient.CrdV1alpha1().NetworkPolicies(anp.Namespace).Create(context.TODO(), anp, metav1.CreateOptions{}) - if err != nil { - log.Debugf("Unable to create Antrea NetworkPolicy: %s", err) - } - return anp, err - } else if cnpReturned.Name != "" { - log.Debugf("Antrea NetworkPolicy with name %s already exists, updating", anp.Name) - anp, err = crdClient.CrdV1alpha1().NetworkPolicies(anp.Namespace).Update(context.TODO(), anp, metav1.UpdateOptions{}) - return anp, err - } - return nil, fmt.Errorf("error occurred in creating/updating Antrea NetworkPolicy %s", anp.Name) -} - -// deleteANP is a convenience function for deleting ANP by name and Namespace. -func deleteANP(crdClient crdclientset.Interface, ns, name string) error { - log.Infof("Deleting Antrea NetworkPolicy '%s/%s'", ns, name) - err := crdClient.CrdV1alpha1().NetworkPolicies(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil { - return fmt.Errorf("unable to delete Antrea NetworkPolicy %s: %v", name, err) - } - return nil + return "", "", fmt.Errorf("clusterName %s not found", clusterName) } diff --git a/multicluster/test/e2e/main_test.go b/multicluster/test/e2e/main_test.go index b6335a4691c..5a1f81ebb3f 100644 --- a/multicluster/test/e2e/main_test.go +++ b/multicluster/test/e2e/main_test.go @@ -65,22 +65,20 @@ func testMain(m *testing.M) int { flag.StringVar(&testOptions.leaderClusterKubeConfigPath, "leader-cluster-kubeconfig-path", path.Join(homedir, ".kube", "leader"), "Kubeconfig Path of the leader cluster") flag.StringVar(&testOptions.eastClusterKubeConfigPath, "east-cluster-kubeconfig-path", path.Join(homedir, ".kube", "east"), "Kubeconfig Path of the east cluster") flag.StringVar(&testOptions.westClusterKubeConfigPath, "west-cluster-kubeconfig-path", path.Join(homedir, ".kube", "west"), "Kubeconfig Path of the west cluster") - flag.Parse() - if err := initProvider(); err != nil { - log.Fatalf("Error when initializing provider: %v", err) - } - cleanupLogging := testOptions.setupLogging() defer cleanupLogging() - testData = &TestData{} - log.Println("Creating k8s clientsets") + testData = &MCTestData{} + log.Println("Creating k8s clientsets for clusterset") if err := testData.createClients(); err != nil { log.Fatalf("Error when creating k8s clientset: %v", err) return 1 } + if err := testData.initProviders(); err != nil { + log.Fatalf("error when initializing providers for clusterset: %v", err) + } rand.Seed(time.Now().UnixNano()) ret := m.Run() diff --git a/multicluster/test/e2e/service_test.go b/multicluster/test/e2e/service_test.go index be7077231a7..930900487a0 100644 --- a/multicluster/test/e2e/service_test.go +++ b/multicluster/test/e2e/service_test.go @@ -39,7 +39,7 @@ func TestConnectivity(t *testing.T) { }) } -func testServiceExport(t *testing.T, data *TestData) { +func testServiceExport(t *testing.T, data *MCTestData) { data.testServiceExport(t) } @@ -48,7 +48,7 @@ func testServiceExport(t *testing.T, data *TestData) { // If we got status code 200, it means that the resources is exported by the east cluster // and imported by the west cluster. // TODO(yang): reorg test function contents -func (data *TestData) testServiceExport(t *testing.T) { +func (data *MCTestData) testServiceExport(t *testing.T) { podName := randName("test-nginx-") clientPodName := "test-service-client" @@ -91,7 +91,7 @@ func (data *TestData) testServiceExport(t *testing.T) { eastIP := svc.Spec.ClusterIP if err := data.probeFromCluster(eastCluster, eastIP); err != nil { - t.Fatalf("Error when probe service from %s", eastCluster) + t.Fatalf("Error when probeFromPodInCluster service from %s", eastCluster) } svc, err = data.getService(westCluster, multiClusterTestNamespace, fmt.Sprintf("antrea-mc-%s", eastClusterTestService)) if err != nil { @@ -99,7 +99,7 @@ func (data *TestData) testServiceExport(t *testing.T) { } westIP := svc.Spec.ClusterIP if err := data.probeFromCluster(westCluster, westIP); err != nil { - t.Fatalf("Error when probe service from %s", westCluster) + t.Fatalf("Error when probeFromPodInCluster service from %s", westCluster) } if err := data.createPod(eastCluster, clientPodName, multiClusterTestNamespace, "client", agnhostImage, @@ -122,20 +122,20 @@ func (data *TestData) testServiceExport(t *testing.T) { Name: fmt.Sprintf("antrea-mc-%s", westClusterTestService), Namespace: multiClusterTestNamespace}, }, "", nil, crdv1alpha1.RuleActionDrop) - if _, err := createOrUpdateANP(data.getCRDClientOfCluster(eastCluster), anpBuilder.Get()); err != nil { + if _, err := data.createOrUpdateANP(eastCluster, anpBuilder.Get()); err != nil { t.Fatalf("Error creating ANP %s: %v", anpBuilder.Name, err) } - defer deleteANP(data.getCRDClientOfCluster(eastCluster), multiClusterTestNamespace, anpBuilder.Name) + defer data.deleteANP(eastCluster, multiClusterTestNamespace, anpBuilder.Name) - connectivity := data.probe(eastCluster, multiClusterTestNamespace, clientPodName, "client", westIP, "westClusterServiceIP", 80, corev1.ProtocolTCP) + connectivity := data.probeFromPodInCluster(eastCluster, multiClusterTestNamespace, clientPodName, "client", westIP, "westClusterServiceIP", 80, corev1.ProtocolTCP) if connectivity == antreae2e.Error { - t.Errorf("Failure -- could not complete probe: %v", err) + t.Errorf("Failure -- could not complete probeFromPodInCluster: %v", err) } else if connectivity != antreae2e.Dropped { t.Errorf("Failure -- wrong result from probing exported Service after applying toService AntreaNetworkPolicy. Expected: %v, Actual: %v", antreae2e.Dropped, connectivity) } } -func (data *TestData) deployServiceExport(clusterName string) error { +func (data *MCTestData) deployServiceExport(clusterName string) error { var rc int var err error rc, _, _, err = provider.RunCommandOnNode(clusterName, fmt.Sprintf("kubectl apply -f %s", serviceExportYML)) @@ -146,7 +146,7 @@ func (data *TestData) deployServiceExport(clusterName string) error { return nil } -func (data *TestData) deleteServiceExport(clusterName string) error { +func (data *MCTestData) deleteServiceExport(clusterName string) error { var rc int var err error rc, _, _, err = provider.RunCommandOnNode(clusterName, fmt.Sprintf("kubectl delete -f %s", serviceExportYML)) @@ -157,7 +157,7 @@ func (data *TestData) deleteServiceExport(clusterName string) error { return nil } -func (data *TestData) probeFromCluster(clusterName string, url string) error { +func (data *MCTestData) probeFromCluster(clusterName string, url string) error { var rc int var err error rc, _, _, err = provider.RunCommandOnNode(clusterName, fmt.Sprintf("curl --connect-timeout 5 -s %s", url)) diff --git a/test/e2e/antctl_test.go b/test/e2e/antctl_test.go index 2a7d37908d6..0529ff2dad1 100644 --- a/test/e2e/antctl_test.go +++ b/test/e2e/antctl_test.go @@ -78,7 +78,7 @@ func runAntctl(podName string, cmds []string, data *TestData) (string, string, e containerName = "antrea-controller" namespace = antreaNamespace } - stdout, stderr, err := data.runCommandFromPod(namespace, podName, containerName, cmds) + stdout, stderr, err := data.RunCommandFromPod(namespace, podName, containerName, cmds) // remove Bincover metadata if needed if err == nil { index := strings.Index(stdout, "START_BINCOVER_METADATA") @@ -127,10 +127,10 @@ func copyAntctlToNode(data *TestData, nodeName string, antctlName string, nodeAn return fmt.Errorf("error when retrieving Antrea Controller Pod: %v", err) } // Just try our best to clean up. - RunCommandOnNode(nodeName, fmt.Sprintf("rm -f %s", nodeAntctlPath)) + data.RunCommandOnNode(nodeName, fmt.Sprintf("rm -f %s", nodeAntctlPath)) // Copy antctl from the controller Pod to the Node. cmd := fmt.Sprintf("kubectl cp %s/%s:/usr/local/bin/%s %s", antreaNamespace, pod.Name, antctlName, nodeAntctlPath) - rc, stdout, stderr, err := RunCommandOnNode(nodeName, cmd) + rc, stdout, stderr, err := data.RunCommandOnNode(nodeName, cmd) if err != nil { return fmt.Errorf("error when running command '%s' on Node: %v", cmd, err) } @@ -139,7 +139,7 @@ func copyAntctlToNode(data *TestData, nodeName string, antctlName string, nodeAn } // Make sure the antctl binary is executable on the Node. cmd = fmt.Sprintf("chmod +x %s", nodeAntctlPath) - rc, stdout, stderr, err = RunCommandOnNode(nodeName, cmd) + rc, stdout, stderr, err = data.RunCommandOnNode(nodeName, cmd) if err != nil { return fmt.Errorf("error when running command '%s' on Node: %v", cmd, err) } @@ -195,7 +195,7 @@ func testAntctlControllerRemoteAccess(t *testing.T, data *TestData) { for _, tc := range testCmds { cmd := strings.Join(tc.args, " ") t.Run(cmd, func(t *testing.T) { - rc, stdout, stderr, err := RunCommandOnNode(controlPlaneNodeName(), cmd) + rc, stdout, stderr, err := data.RunCommandOnNode(controlPlaneNodeName(), cmd) if err != nil { t.Fatalf("Error when running `%s` from %s: %v\n%s", cmd, controlPlaneNodeName(), err, antctlOutput(stdout, stderr)) } @@ -236,7 +236,7 @@ func testAntctlVerboseMode(t *testing.T, data *TestData) { // runAntctProxy runs the antctl reverse proxy on the provided Node; to stop the // proxy call the returned function. -func runAntctProxy(nodeName string, antctlName string, nodeAntctlPath string, proxyPort int, agentNodeName string, address string) (func() error, error) { +func runAntctProxy(nodeName string, antctlName string, nodeAntctlPath string, proxyPort int, agentNodeName string, address string, data *TestData) (func() error, error) { waitCh := make(chan struct{}) proxyCmd := []string{nodeAntctlPath, "proxy", "--port", fmt.Sprint(proxyPort), "--address", address} if agentNodeName == "" { @@ -245,7 +245,7 @@ func runAntctProxy(nodeName string, antctlName string, nodeAntctlPath string, pr proxyCmd = append(proxyCmd, "--agent-node", agentNodeName) } go func() { - RunCommandOnNode(nodeName, strings.Join(proxyCmd, " ")) + data.RunCommandOnNode(nodeName, strings.Join(proxyCmd, " ")) waitCh <- struct{}{} }() @@ -253,7 +253,7 @@ func runAntctProxy(nodeName string, antctlName string, nodeAntctlPath string, pr // it errors on start. time.Sleep(time.Second) cmd := fmt.Sprintf("pgrep %s", antctlName) - rc, stdout, stderr, err := RunCommandOnNode(nodeName, cmd) + rc, stdout, stderr, err := data.RunCommandOnNode(nodeName, cmd) if err != nil { return nil, fmt.Errorf("error when running command '%s' on Node: %v", cmd, err) } @@ -264,7 +264,7 @@ func runAntctProxy(nodeName string, antctlName string, nodeAntctlPath string, pr pid := strings.TrimSpace(stdout) return func() error { cmd := fmt.Sprintf("kill -INT %s", pid) - rc, stdout, stderr, err := RunCommandOnNode(nodeName, cmd) + rc, stdout, stderr, err := data.RunCommandOnNode(nodeName, cmd) if err != nil { return fmt.Errorf("error when running command '%s' on Node: %v", cmd, err) } @@ -293,7 +293,7 @@ func testAntctlProxy(t *testing.T, data *TestData) { checkAPIAccess := func(url string) error { t.Logf("Checking for API access through antctl proxy") cmd := fmt.Sprintf("curl %s/apis", net.JoinHostPort(url, fmt.Sprint(proxyPort))) - rc, stdout, stderr, err := RunCommandOnNode(controlPlaneNodeName(), cmd) + rc, stdout, stderr, err := data.RunCommandOnNode(controlPlaneNodeName(), cmd) if err != nil { return fmt.Errorf("error when running command '%s' on Node: %v", cmd, err) } @@ -323,7 +323,7 @@ func testAntctlProxy(t *testing.T, data *TestData) { t.Skipf("Skipping this testcase since cluster network family doesn't fit") } t.Logf("Starting antctl proxy") - stopProxyFn, err := runAntctProxy(controlPlaneNodeName(), antctlName, nodeAntctlPath, proxyPort, tc.agentNodeName, tc.address) + stopProxyFn, err := runAntctProxy(controlPlaneNodeName(), antctlName, nodeAntctlPath, proxyPort, tc.agentNodeName, tc.address, data) assert.NoError(t, err, "Could not start antctl proxy: %v", err) if err := checkAPIAccess(tc.address); err != nil { t.Errorf("API check failed: %v", err) diff --git a/test/e2e/antreaipam_test.go b/test/e2e/antreaipam_test.go index 89515c3a04c..8086619fdf7 100644 --- a/test/e2e/antreaipam_test.go +++ b/test/e2e/antreaipam_test.go @@ -388,7 +388,7 @@ func checkStatefulSetIPPoolAllocation(tb testing.TB, data *TestData, name string func deleteAntreaIPAMNamespace(tb testing.TB, data *TestData) { tb.Logf("Deleting '%s' K8s Namespace", testAntreaIPAMNamespace) - if err := data.deleteNamespace(testAntreaIPAMNamespace, defaultTimeout); err != nil { + if err := data.DeleteNamespace(testAntreaIPAMNamespace, defaultTimeout); err != nil { tb.Logf("Error when tearing down test: %v", err) } } diff --git a/test/e2e/antreapolicy_test.go b/test/e2e/antreapolicy_test.go index c7a29509f4b..e09497babfa 100644 --- a/test/e2e/antreapolicy_test.go +++ b/test/e2e/antreapolicy_test.go @@ -1914,7 +1914,7 @@ func testAuditLoggingBasic(t *testing.T, data *TestData) { cmd := []string{"cat", logDir + logfileName} if err := wait.Poll(1*time.Second, 10*time.Second, func() (bool, error) { - stdout, stderr, err := data.runCommandFromPod(antreaNamespace, antreaPodName, "antrea-agent", cmd) + stdout, stderr, err := data.RunCommandFromPod(antreaNamespace, antreaPodName, "antrea-agent", cmd) if err != nil || stderr != "" { // file may not exist yet t.Logf("Error when printing the audit log file, err: %v, stderr: %v", err, stderr) @@ -3212,11 +3212,11 @@ func testANPNetworkPolicyStatsWithDropAction(t *testing.T, data *TestData) { // So we need to "warm-up" the tunnel. if clusterInfo.podV4NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv4.String())} - data.runCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) } if clusterInfo.podV6NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv6.String())} - data.runCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) } var anp = &crdv1alpha1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: "np1", Labels: map[string]string{"antrea-e2e": "np1"}}, @@ -3274,14 +3274,14 @@ func testANPNetworkPolicyStatsWithDropAction(t *testing.T, data *TestData) { if clusterInfo.podV4NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 80", serverIPs.ipv4.String())} cmd2 := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 443", serverIPs.ipv4.String())} - data.runCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) - data.runCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd2) + data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd2) } if clusterInfo.podV6NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 80", serverIPs.ipv6.String())} cmd2 := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 443", serverIPs.ipv6.String())} - data.runCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) - data.runCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd2) + data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd2) } wg.Done() }() @@ -3347,11 +3347,11 @@ func testAntreaClusterNetworkPolicyStats(t *testing.T, data *TestData) { // So we need to "warm-up" the tunnel. if clusterInfo.podV4NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv4.String())} - data.runCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) } if clusterInfo.podV6NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv6.String())} - data.runCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) } var acnp = &crdv1alpha1.ClusterNetworkPolicy{ ObjectMeta: metav1.ObjectMeta{Namespace: testNamespace, Name: "cnp1", Labels: map[string]string{"antrea-e2e": "cnp1"}}, @@ -3409,14 +3409,14 @@ func testAntreaClusterNetworkPolicyStats(t *testing.T, data *TestData) { if clusterInfo.podV4NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 800", serverIPs.ipv4.String())} cmd2 := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 4430", serverIPs.ipv4.String())} - data.runCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) - data.runCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd2) + data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd2) } if clusterInfo.podV6NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 800", serverIPs.ipv6.String())} cmd2 := []string{"/bin/sh", "-c", fmt.Sprintf("echo test | nc -w 4 -u %s 4430", serverIPs.ipv6.String())} - data.runCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) - data.runCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd2) + data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd2) } wg.Done() }() diff --git a/test/e2e/bandwidth_test.go b/test/e2e/bandwidth_test.go index cccc622828a..52bb5e63b25 100644 --- a/test/e2e/bandwidth_test.go +++ b/test/e2e/bandwidth_test.go @@ -82,7 +82,7 @@ func testBenchmarkBandwidthIntraNode(t *testing.T, data *TestData) { t.Fatalf("Error when getting the perftest server Pod's IP: %v", err) } podBIP := podBIPs.ipv4.String() - stdout, _, err := data.runCommandFromPod(testNamespace, "perftest-a", "perftool", []string{"bash", "-c", fmt.Sprintf("iperf3 -c %s|grep sender|awk '{print $7,$8}'", podBIP)}) + stdout, _, err := data.RunCommandFromPod(testNamespace, "perftest-a", "perftool", []string{"bash", "-c", fmt.Sprintf("iperf3 -c %s|grep sender|awk '{print $7,$8}'", podBIP)}) if err != nil { t.Fatalf("Error when running iperf3 client: %v", err) } @@ -91,7 +91,7 @@ func testBenchmarkBandwidthIntraNode(t *testing.T, data *TestData) { } func benchmarkBandwidthService(t *testing.T, endpointNode, clientNode string, data *TestData) { - svc, err := data.createService("perftest-b", testNamespace, iperfPort, iperfPort, map[string]string{"antrea-e2e": "perftest-b"}, false, false, v1.ServiceTypeClusterIP, nil) + svc, err := data.CreateService("perftest-b", testNamespace, iperfPort, iperfPort, map[string]string{"antrea-e2e": "perftest-b"}, false, false, v1.ServiceTypeClusterIP, nil) if err != nil { t.Fatalf("Error when creating perftest service: %v", err) } @@ -107,7 +107,7 @@ func benchmarkBandwidthService(t *testing.T, endpointNode, clientNode string, da if err := data.podWaitForRunning(defaultTimeout, "perftest-b", testNamespace); err != nil { t.Fatalf("Error when getting the perftest server Pod's IP: %v", err) } - stdout, stderr, err := data.runCommandFromPod(testNamespace, "perftest-a", perftoolContainerName, []string{"bash", "-c", fmt.Sprintf("iperf3 -c %s|grep sender|awk '{print $7,$8}'", svc.Spec.ClusterIP)}) + stdout, stderr, err := data.RunCommandFromPod(testNamespace, "perftest-a", perftoolContainerName, []string{"bash", "-c", fmt.Sprintf("iperf3 -c %s|grep sender|awk '{print $7,$8}'", svc.Spec.ClusterIP)}) if err != nil { t.Fatalf("Error when running iperf3 client: %v, stderr: %s", err, stderr) } @@ -134,7 +134,7 @@ func testPodTrafficShaping(t *testing.T, data *TestData) { // So we disable it except for IPv4 single-stack clusters for now. skipIfIPv6Cluster(t) nodeName := controlPlaneNodeName() - skipIfMissingKernelModule(t, nodeName, []string{"ifb", "sch_tbf", "sch_ingress"}) + skipIfMissingKernelModule(t, data, nodeName, []string{"ifb", "sch_tbf", "sch_ingress"}) tests := []struct { name string @@ -185,7 +185,7 @@ func testPodTrafficShaping(t *testing.T, data *TestData) { } runIperf := func(cmd []string) { - stdout, _, err := data.runCommandFromPod(testNamespace, clientPodName, "perftool", cmd) + stdout, _, err := data.RunCommandFromPod(testNamespace, clientPodName, "perftool", cmd) if err != nil { t.Fatalf("Error when running iperf3 client: %v", err) } diff --git a/test/e2e/basic_test.go b/test/e2e/basic_test.go index 8bb8bcdf953..1b26258e0d6 100644 --- a/test/e2e/basic_test.go +++ b/test/e2e/basic_test.go @@ -163,7 +163,7 @@ func (data *TestData) testDeletePod(t *testing.T, podName string, nodeName strin } else { doesInterfaceExist = func() bool { cmd := []string{"ip", "link", "show", ifName} - stdout, stderr, err := data.runCommandFromPod(antreaNamespace, antreaPodName, agentContainerName, cmd) + stdout, stderr, err := data.RunCommandFromPod(antreaNamespace, antreaPodName, agentContainerName, cmd) if err != nil { if strings.Contains(stderr, "does not exist") { return false @@ -181,7 +181,7 @@ func (data *TestData) testDeletePod(t *testing.T, podName string, nodeName strin } doesIPAllocationExist = func(podIP string) bool { cmd := []string{"test", "-f", "/var/run/antrea/cni/networks/antrea/" + podIP} - _, _, err := data.runCommandFromPod(antreaNamespace, antreaPodName, agentContainerName, cmd) + _, _, err := data.RunCommandFromPod(antreaNamespace, antreaPodName, agentContainerName, cmd) return err == nil } } @@ -388,7 +388,7 @@ func testReconcileGatewayRoutesOnStartup(t *testing.T, data *TestData, isIPv6 bo cmd = []string{"ip", "-6", "route", "list", "dev", antreaGWName} } podName := antreaPodName() - stdout, stderr, err := data.runCommandFromPod(antreaNamespace, podName, agentContainerName, cmd) + stdout, stderr, err := data.RunCommandFromPod(antreaNamespace, podName, agentContainerName, cmd) if err != nil { return nil, fmt.Errorf("error when running ip command in Pod '%s': %v - stdout: %s - stderr: %s", podName, err, stdout, stderr) } @@ -476,7 +476,7 @@ func testReconcileGatewayRoutesOnStartup(t *testing.T, data *TestData, isIPv6 bo } else { cmd = []string{"ip", "-6", "route", "del", route.peerPodCIDR.String()} } - _, _, err := data.runCommandFromPod(antreaNamespace, antreaPodName(), agentContainerName, cmd) + _, _, err := data.RunCommandFromPod(antreaNamespace, antreaPodName(), agentContainerName, cmd) if err != nil { return fmt.Errorf("error when running ip command on Node '%s': %v", nodeName, err) } @@ -490,7 +490,7 @@ func testReconcileGatewayRoutesOnStartup(t *testing.T, data *TestData, isIPv6 bo } else { cmd = []string{"ip", "-6", "route", "add", route.peerPodCIDR.String(), "via", route.peerPodGW.String(), "dev", antreaGWName, "onlink"} } - _, _, err := data.runCommandFromPod(antreaNamespace, antreaPodName(), agentContainerName, cmd) + _, _, err := data.RunCommandFromPod(antreaNamespace, antreaPodName(), agentContainerName, cmd) if err != nil { return fmt.Errorf("error when running ip command on Node '%s': %v", nodeName, err) } @@ -584,7 +584,7 @@ func getRoundNumber(data *TestData, podName string) (uint64, error) { return 0, fmt.Errorf("error when marshalling OVSDB query: %v", err) } cmd := []string{"ovsdb-client", "query", string(b)} - stdout, stderr, err := data.runCommandFromPod(antreaNamespace, podName, ovsContainerName, cmd) + stdout, stderr, err := data.RunCommandFromPod(antreaNamespace, podName, ovsContainerName, cmd) if err != nil { return 0, fmt.Errorf("cannot retrieve round number: stderr: <%v>, err: <%v>", stderr, err) } @@ -678,7 +678,7 @@ func testDeletePreviousRoundFlowsOnStartup(t *testing.T, data *TestData) { "ovs-ofctl", "add-flow", defaultBridgeName, fmt.Sprintf("table=0,cookie=%#x,priority=0,actions=drop", cookieID), } - _, stderr, err := data.runCommandFromPod(antreaNamespace, podName, ovsContainerName, cmd) + _, stderr, err := data.RunCommandFromPod(antreaNamespace, podName, ovsContainerName, cmd) if err != nil { t.Fatalf("error when adding flow: <%v>, err: <%v>", stderr, err) } @@ -692,7 +692,7 @@ func testDeletePreviousRoundFlowsOnStartup(t *testing.T, data *TestData) { // ignore potential error as it is possible for the container to exit with code 137 // if the container does not restart properly, we will know when we try to get the // new round number below. - data.runCommandFromPod(antreaNamespace, podName, agentContainerName, cmd) + data.RunCommandFromPod(antreaNamespace, podName, agentContainerName, cmd) } t.Logf("Restarting antrea-agent container on Node %s", nodeName) stopAgent() @@ -714,7 +714,7 @@ func testDeletePreviousRoundFlowsOnStartup(t *testing.T, data *TestData) { "ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=0,cookie=%#x/%#x", cookieID, cookieMask), } - stdout, stderr, err := data.runCommandFromPod(antreaNamespace, podName, ovsContainerName, cmd) + stdout, stderr, err := data.RunCommandFromPod(antreaNamespace, podName, ovsContainerName, cmd) if err != nil { t.Fatalf("error when dumping flows: <%v>, err: <%v>", stderr, err) } @@ -764,7 +764,7 @@ func testGratuitousARP(t *testing.T, data *TestData, namespace string) { time.Sleep(100 * time.Millisecond) cmd := []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=10,arp,arp_spa=%s", podIP.ipv4.String())} - stdout, _, err := data.runCommandFromPod(antreaNamespace, antreaPodName, ovsContainerName, cmd) + stdout, _, err := data.RunCommandFromPod(antreaNamespace, antreaPodName, ovsContainerName, cmd) if err != nil { t.Fatalf("Error when querying openflow: %v", err) } diff --git a/test/e2e/batch_test.go b/test/e2e/batch_test.go index 49ebcaf6b38..f66121f5e87 100644 --- a/test/e2e/batch_test.go +++ b/test/e2e/batch_test.go @@ -42,12 +42,12 @@ func TestBatchCreatePods(t *testing.T) { getFDs := func() string { // In case that antrea-agent is not running as Pid 1 in future. cmds := []string{"pgrep", "-o", "antrea-agent"} - pid, _, err := data.runCommandFromPod(antreaNamespace, podName, "antrea-agent", cmds) + pid, _, err := data.RunCommandFromPod(antreaNamespace, podName, "antrea-agent", cmds) assert.NoError(t, err) // Ignore the difference of modification time by specifying "--time-style +". cmds = []string{"ls", "-l", "--time-style", "+", fmt.Sprintf("/proc/%s/fd/", strings.TrimSpace(pid))} - stdout, _, err := data.runCommandFromPod(antreaNamespace, podName, "antrea-agent", cmds) + stdout, _, err := data.RunCommandFromPod(antreaNamespace, podName, "antrea-agent", cmds) assert.NoError(t, err) return stdout } diff --git a/test/e2e/connectivity_test.go b/test/e2e/connectivity_test.go index 15bfb8d97d5..1f9b500dcda 100644 --- a/test/e2e/connectivity_test.go +++ b/test/e2e/connectivity_test.go @@ -154,7 +154,7 @@ func (data *TestData) testHostPortPodConnectivity(t *testing.T, clientNamespace, } defer deletePodWrapper(t, data, serverNamespace, hpPodName) // Retrieve the IP Address of the Node on which the Pod is scheduled. - hpPod, err := data.podWaitFor(defaultTimeout, hpPodName, serverNamespace, func(pod *corev1.Pod) (bool, error) { + hpPod, err := data.PodWaitFor(defaultTimeout, hpPodName, serverNamespace, func(pod *corev1.Pod) (bool, error) { return pod.Status.Phase == corev1.PodRunning, nil }) if err != nil { @@ -327,7 +327,7 @@ func testOVSRestartSameNode(t *testing.T, data *TestData, namespace string) { // utility in busybox does not let us choose a smaller interval than 1 second. count := 25 cmd := fmt.Sprintf("arping -c %d %s", count, podIPs[1].ipv4.String()) - stdout, stderr, err := data.runCommandFromPod(namespace, podNames[0], busyboxContainerName, strings.Fields(cmd)) + stdout, stderr, err := data.RunCommandFromPod(namespace, podNames[0], busyboxContainerName, strings.Fields(cmd)) if err != nil { return fmt.Errorf("error when running arping command: %v - stdout: %s - stderr: %s", err, stdout, stderr) } @@ -399,7 +399,7 @@ func testOVSFlowReplay(t *testing.T, data *TestData, namespace string) { countFlows := func() int { cmd := []string{"ovs-ofctl", "dump-flows", defaultBridgeName} - stdout, stderr, err := data.runCommandFromPod(antreaNamespace, antreaPodName, ovsContainerName, cmd) + stdout, stderr, err := data.RunCommandFromPod(antreaNamespace, antreaPodName, ovsContainerName, cmd) if err != nil { t.Fatalf("error when dumping flows: <%v>, err: <%v>", stderr, err) } @@ -409,7 +409,7 @@ func testOVSFlowReplay(t *testing.T, data *TestData, namespace string) { } countGroups := func() int { cmd := []string{"ovs-ofctl", "dump-groups", defaultBridgeName} - stdout, stderr, err := data.runCommandFromPod(antreaNamespace, antreaPodName, ovsContainerName, cmd) + stdout, stderr, err := data.RunCommandFromPod(antreaNamespace, antreaPodName, ovsContainerName, cmd) if err != nil { t.Fatalf("error when dumping groups: <%v>, err: <%v>", stderr, err) } @@ -427,12 +427,12 @@ func testOVSFlowReplay(t *testing.T, data *TestData, namespace string) { if !testOptions.enableAntreaIPAM { delFlowsAndGroups := func() { cmd := []string{"ovs-ofctl", "del-flows", defaultBridgeName} - _, stderr, err := data.runCommandFromPod(antreaNamespace, antreaPodName, ovsContainerName, cmd) + _, stderr, err := data.RunCommandFromPod(antreaNamespace, antreaPodName, ovsContainerName, cmd) if err != nil { t.Fatalf("error when deleting flows: <%v>, err: <%v>", stderr, err) } cmd = []string{"ovs-ofctl", "del-groups", defaultBridgeName} - _, stderr, err = data.runCommandFromPod(antreaNamespace, antreaPodName, ovsContainerName, cmd) + _, stderr, err = data.RunCommandFromPod(antreaNamespace, antreaPodName, ovsContainerName, cmd) if err != nil { t.Fatalf("error when deleting groups: <%v>, err: <%v>", stderr, err) } @@ -443,7 +443,7 @@ func testOVSFlowReplay(t *testing.T, data *TestData, namespace string) { // run one command to delete flows and groups and to restart OVS to avoid connectivity issue restartCmd = []string{"bash", "-c", fmt.Sprintf("ovs-ofctl del-flows %s ; ovs-ofctl del-groups %s ; /usr/share/openvswitch/scripts/ovs-ctl --system-id=random restart --db-file=/var/run/openvswitch/conf.db", defaultBridgeName, defaultBridgeName)} } - if stdout, stderr, err := data.runCommandFromPod(antreaNamespace, antreaPodName, ovsContainerName, restartCmd); err != nil { + if stdout, stderr, err := data.RunCommandFromPod(antreaNamespace, antreaPodName, ovsContainerName, restartCmd); err != nil { t.Fatalf("Error when restarting OVS with ovs-ctl: %v - stdout: %s - stderr: %s", err, stdout, stderr) } else { t.Logf("Restarted OVS with ovs-ctl: stdout: %s - stderr: %s", stdout, stderr) diff --git a/test/e2e/egress_test.go b/test/e2e/egress_test.go index 163bcb434ee..3873d2cbce1 100644 --- a/test/e2e/egress_test.go +++ b/test/e2e/egress_test.go @@ -169,7 +169,7 @@ ip netns exec %[1]s /agnhost netexec // getClientIP gets the translated client IP by accessing the API that replies the request's client IP. getClientIP := func(pod string) (string, string, error) { cmd := []string{"wget", "-T", "3", "-O", "-", fmt.Sprintf("%s:8080/clientip", serverIPStr)} - return data.runCommandFromPod(testNamespace, pod, busyboxContainerName, cmd) + return data.RunCommandFromPod(testNamespace, pod, busyboxContainerName, cmd) } // assertClientIP asserts the Pod is translated to the provided client IP. @@ -252,8 +252,8 @@ ip netns exec %[1]s /agnhost netexec }); err != nil { t.Fatalf("Failed to create Pod initial-ip-checker: %v", err) } - defer data.deletePod(testNamespace, initialIPChecker) - _, err = data.podWaitFor(timeout, initialIPChecker, testNamespace, func(pod *v1.Pod) (bool, error) { + defer data.DeletePod(testNamespace, initialIPChecker) + _, err = data.PodWaitFor(timeout, initialIPChecker, testNamespace, func(pod *v1.Pod) (bool, error) { if pod.Status.Phase == v1.PodFailed { return false, fmt.Errorf("Pod terminated with failure") } @@ -712,7 +712,7 @@ func hasIP(data *TestData, nodeName string, ip string) (bool, error) { return false, err } cmd := []string{"ip", "-br", "addr"} - stdout, _, err := data.runCommandFromPod(antreaNamespace, antreaPodName, agentContainerName, cmd) + stdout, _, err := data.RunCommandFromPod(antreaNamespace, antreaPodName, agentContainerName, cmd) if err != nil { return false, err } diff --git a/test/e2e/fixtures.go b/test/e2e/fixtures.go index 6bba915f4fb..0e003092d2c 100644 --- a/test/e2e/fixtures.go +++ b/test/e2e/fixtures.go @@ -92,11 +92,11 @@ func skipIfNotIPv6Cluster(tb testing.TB) { } } -func skipIfMissingKernelModule(tb testing.TB, nodeName string, requiredModules []string) { +func skipIfMissingKernelModule(tb testing.TB, data *TestData, nodeName string, requiredModules []string) { for _, module := range requiredModules { // modprobe with "--dry-run" does not require root privileges cmd := fmt.Sprintf("modprobe --dry-run %s", module) - rc, stdout, stderr, err := RunCommandOnNode(nodeName, cmd) + rc, stdout, stderr, err := data.RunCommandOnNode(nodeName, cmd) if err != nil { tb.Skipf("Skipping test as modprobe could not be run to confirm the presence of module '%s': %v", module, err) } @@ -286,7 +286,7 @@ func exportLogs(tb testing.TB, data *TestData, logsSubDir string, writeNodeLogs // runKubectl runs the provided kubectl command on the control-plane Node and returns the // output. It returns an empty string in case of error. runKubectl := func(cmd string) string { - rc, stdout, _, err := RunCommandOnNode(controlPlaneNodeName(), cmd) + rc, stdout, _, err := data.RunCommandOnNode(controlPlaneNodeName(), cmd) if err != nil || rc != 0 { tb.Errorf("Error when running this kubectl command on control-plane Node: %s", cmd) return "" @@ -358,7 +358,7 @@ func exportLogs(tb testing.TB, data *TestData, logsSubDir string, writeNodeLogs if clusterInfo.nodesOS[nodeName] == "windows" { cmd = "Get-EventLog -LogName \"System\" -Source \"Service Control Manager\" | grep kubelet ; Get-EventLog -LogName \"Application\" -Source \"nssm\" | grep kubelet" } - rc, stdout, _, err := RunCommandOnNode(nodeName, cmd) + rc, stdout, _, err := data.RunCommandOnNode(nodeName, cmd) if err != nil || rc != 0 { // return an error and skip subsequent Nodes return fmt.Errorf("error when running journalctl on Node '%s', is it available? Error: %v", nodeName, err) @@ -383,7 +383,7 @@ func teardownFlowAggregator(tb testing.TB, data *TestData) { } } tb.Logf("Deleting '%s' K8s Namespace", flowAggregatorNamespace) - if err := data.deleteNamespace(flowAggregatorNamespace, defaultTimeout); err != nil { + if err := data.DeleteNamespace(flowAggregatorNamespace, defaultTimeout); err != nil { tb.Logf("Error when tearing down flow aggregator: %v", err) } } @@ -401,7 +401,7 @@ func teardownTest(tb testing.TB, data *TestData) { func deletePodWrapper(tb testing.TB, data *TestData, namespace, name string) { tb.Logf("Deleting Pod '%s'", name) - if err := data.deletePod(namespace, name); err != nil { + if err := data.DeletePod(namespace, name); err != nil { tb.Logf("Error when deleting Pod: %v", err) } } diff --git a/test/e2e/flowaggregator_test.go b/test/e2e/flowaggregator_test.go index ad2e6c413ce..07c888fbe12 100644 --- a/test/e2e/flowaggregator_test.go +++ b/test/e2e/flowaggregator_test.go @@ -560,7 +560,7 @@ func checkAntctlGetFlowRecordsJson(t *testing.T, data *TestData, podName string, dstIP = podBIPs.ipv6.String() cmdStr = fmt.Sprintf("iperf3 -6 -c %s -t %d", dstIP, iperfTimeSecShort) } - stdout, _, err := data.runCommandFromPod(testNamespace, "perftest-a", "perftool", []string{"bash", "-c", cmdStr}) + stdout, _, err := data.RunCommandFromPod(testNamespace, "perftest-a", "perftool", []string{"bash", "-c", cmdStr}) require.NoErrorf(t, err, "Error when running iperf3 client: %v", err) _, srcPort, dstPort := getBandwidthAndPorts(stdout) @@ -619,7 +619,7 @@ func checkRecordsForFlows(t *testing.T, data *TestData, srcIP string, dstIP stri } else { cmdStr = fmt.Sprintf("iperf3 -6 -c %s -t %d -b %s", dstIP, iperfTimeSec, iperfBandwidth) } - stdout, _, err := data.runCommandFromPod(testNamespace, "perftest-a", "perftool", []string{"bash", "-c", cmdStr}) + stdout, _, err := data.RunCommandFromPod(testNamespace, "perftest-a", "perftool", []string{"bash", "-c", cmdStr}) require.NoErrorf(t, err, "Error when running iperf3 client: %v", err) bwSlice, srcPort, _ := getBandwidthAndPorts(stdout) require.Equal(t, 2, len(bwSlice), "bandwidth value and / or bandwidth unit are not available") @@ -633,7 +633,7 @@ func checkRecordsForFlows(t *testing.T, data *TestData, srcIP string, dstIP stri t.Fatalf("Unit of the traffic bandwidth reported by iperf should be Mbits.") } - collectorOutput, recordSlices := getCollectorOutput(t, srcIP, dstIP, srcPort, checkService, true, isIPv6) + collectorOutput, recordSlices := getCollectorOutput(t, srcIP, dstIP, srcPort, checkService, true, isIPv6, data) // Iterate over recordSlices and build some results to test with expected results dataRecordsCount := 0 src, dst := matchSrcAndDstAddress(srcIP, dstIP, checkService, isIPv6) @@ -716,10 +716,10 @@ func checkRecordsForToExternalFlows(t *testing.T, data *TestData, srcNodeName st } else { cmd = fmt.Sprintf("wget -O- [%s]:%d", dstIP, dstPort) } - stdout, stderr, err := data.runCommandFromPod(testNamespace, srcPodName, busyboxContainerName, strings.Fields(cmd)) + stdout, stderr, err := data.RunCommandFromPod(testNamespace, srcPodName, busyboxContainerName, strings.Fields(cmd)) require.NoErrorf(t, err, "Error when running wget command, stdout: %s, stderr: %s", stdout, stderr) - _, recordSlices := getCollectorOutput(t, srcIP, dstIP, "", false, false, isIPv6) + _, recordSlices := getCollectorOutput(t, srcIP, dstIP, "", false, false, isIPv6, data) for _, record := range recordSlices { if strings.Contains(record, srcIP) && strings.Contains(record, dstIP) { checkPodAndNodeData(t, record, srcPodName, srcNodeName, "", "") @@ -742,13 +742,13 @@ func checkRecordsForDenyFlows(t *testing.T, data *TestData, testFlow1, testFlow2 cmdStr1 = fmt.Sprintf("iperf3 -6 -c %s -n 1", testFlow1.dstIP) cmdStr2 = fmt.Sprintf("iperf3 -6 -c %s -n 1", testFlow2.dstIP) } - _, _, err := data.runCommandFromPod(testNamespace, testFlow1.srcPodName, "", []string{"timeout", "2", "bash", "-c", cmdStr1}) + _, _, err := data.RunCommandFromPod(testNamespace, testFlow1.srcPodName, "", []string{"timeout", "2", "bash", "-c", cmdStr1}) assert.Error(t, err) - _, _, err = data.runCommandFromPod(testNamespace, testFlow2.srcPodName, "", []string{"timeout", "2", "bash", "-c", cmdStr2}) + _, _, err = data.RunCommandFromPod(testNamespace, testFlow2.srcPodName, "", []string{"timeout", "2", "bash", "-c", cmdStr2}) assert.Error(t, err) - _, recordSlices1 := getCollectorOutput(t, testFlow1.srcIP, testFlow1.dstIP, "", false, false, isIPv6) - _, recordSlices2 := getCollectorOutput(t, testFlow2.srcIP, testFlow2.dstIP, "", false, false, isIPv6) + _, recordSlices1 := getCollectorOutput(t, testFlow1.srcIP, testFlow1.dstIP, "", false, false, isIPv6, data) + _, recordSlices2 := getCollectorOutput(t, testFlow2.srcIP, testFlow2.dstIP, "", false, false, isIPv6, data) recordSlices := append(recordSlices1, recordSlices2...) src_flow1, dst_flow1 := matchSrcAndDstAddress(testFlow1.srcIP, testFlow1.dstIP, false, isIPv6) src_flow2, dst_flow2 := matchSrcAndDstAddress(testFlow2.srcIP, testFlow2.dstIP, false, isIPv6) @@ -853,7 +853,7 @@ func getUint64FieldFromRecord(t *testing.T, record string, field string) uint64 // received all the expected records for a given flow with source IP, destination IP // and source port. We send source port to ignore the control flows during the // iperf test. -func getCollectorOutput(t *testing.T, srcIP, dstIP, srcPort string, isDstService bool, checkAllRecords bool, isIPv6 bool) (string, []string) { +func getCollectorOutput(t *testing.T, srcIP, dstIP, srcPort string, isDstService bool, checkAllRecords bool, isIPv6 bool, data *TestData) (string, []string) { var collectorOutput string var recordSlices []string // In the ToExternalFlows test, flow record will arrive 5.5s (exporterActiveFlowExportTimeout+aggregatorActiveFlowRecordTimeout) after executing wget command @@ -862,7 +862,7 @@ func getCollectorOutput(t *testing.T, srcIP, dstIP, srcPort string, isDstService var rc int var err error // `pod-running-timeout` option is added to cover scenarios where ipfix flow-collector has crashed after being deployed - rc, collectorOutput, _, err = provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl logs --pod-running-timeout=%v ipfix-collector -n antrea-test", aggregatorInactiveFlowRecordTimeout.String())) + rc, collectorOutput, _, err = data.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl logs --pod-running-timeout=%v ipfix-collector -n antrea-test", aggregatorInactiveFlowRecordTimeout.String())) if err != nil || rc != 0 { return false, err } @@ -1105,12 +1105,12 @@ func createPerftestServices(data *TestData, isIPv6 bool) (svcB *corev1.Service, svcIPFamily = corev1.IPv6Protocol } - svcB, err = data.createService("perftest-b", testNamespace, iperfPort, iperfPort, map[string]string{"antrea-e2e": "perftest-b"}, false, false, corev1.ServiceTypeClusterIP, &svcIPFamily) + svcB, err = data.CreateService("perftest-b", testNamespace, iperfPort, iperfPort, map[string]string{"antrea-e2e": "perftest-b"}, false, false, corev1.ServiceTypeClusterIP, &svcIPFamily) if err != nil { return nil, nil, fmt.Errorf("Error when creating perftest-b Service: %v", err) } - svcC, err = data.createService("perftest-c", testNamespace, iperfPort, iperfPort, map[string]string{"antrea-e2e": "perftest-c"}, false, false, corev1.ServiceTypeClusterIP, &svcIPFamily) + svcC, err = data.CreateService("perftest-c", testNamespace, iperfPort, iperfPort, map[string]string{"antrea-e2e": "perftest-c"}, false, false, corev1.ServiceTypeClusterIP, &svcIPFamily) if err != nil { return nil, nil, fmt.Errorf("Error when creating perftest-c Service: %v", err) } @@ -1120,7 +1120,7 @@ func createPerftestServices(data *TestData, isIPv6 bool) (svcB *corev1.Service, func deletePerftestServices(t *testing.T, data *TestData) { for _, serviceName := range []string{"perftest-b", "perftest-c"} { - err := data.deleteService(serviceName) + err := data.DeleteService(testNamespace, serviceName) if err != nil { t.Logf("Error when deleting %s Service: %v", serviceName, err) } diff --git a/test/e2e/framework.go b/test/e2e/framework.go index ea688581311..ab9f4776981 100644 --- a/test/e2e/framework.go +++ b/test/e2e/framework.go @@ -185,8 +185,6 @@ type TestOptions struct { var testOptions TestOptions -var provider providers.ProviderInterface - // podInfo combines OS info with a Pod name. It is useful when choosing commands and options on Pods of different OS (Windows, Linux). type podInfo struct { name string @@ -197,6 +195,7 @@ type podInfo struct { // TestData stores the state required for each test case. type TestData struct { + provider providers.ProviderInterface kubeConfig *restclient.Config clientset kubernetes.Interface aggregatorClient aggregatorclientset.Interface @@ -384,30 +383,30 @@ func controlPlaneNoScheduleToleration() corev1.Toleration { } } -func initProvider() error { +func (data *TestData) InitProvider(providerName, providerConfigPath string) error { providerFactory := map[string]func(string) (providers.ProviderInterface, error){ "vagrant": providers.NewVagrantProvider, "kind": providers.NewKindProvider, "remote": providers.NewRemoteProvider, } - if fn, ok := providerFactory[testOptions.providerName]; ok { - newProvider, err := fn(testOptions.providerConfigPath) + if fn, ok := providerFactory[providerName]; ok { + newProvider, err := fn(providerConfigPath) if err != nil { return err } - provider = newProvider + data.provider = newProvider } else { - return fmt.Errorf("unknown provider '%s'", testOptions.providerName) + return fmt.Errorf("unknown provider '%s'", providerName) } return nil } // RunCommandOnNode is a convenience wrapper around the Provider interface RunCommandOnNode method. -func RunCommandOnNode(nodeName string, cmd string) (code int, stdout string, stderr string, err error) { - return provider.RunCommandOnNode(nodeName, cmd) +func (data *TestData) RunCommandOnNode(nodeName string, cmd string) (code int, stdout string, stderr string, err error) { + return data.provider.RunCommandOnNode(nodeName, cmd) } -func collectClusterInfo() error { +func (data *TestData) collectClusterInfo() error { // retrieve Node information nodes, err := testData.clientset.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) if err != nil { @@ -497,7 +496,7 @@ func collectClusterInfo() error { retrieveCIDRs := func(cmd string, reg string) ([]string, error) { res := make([]string, 2) - rc, stdout, _, err := RunCommandOnNode(controlPlaneNodeName(), cmd) + rc, stdout, _, err := data.RunCommandOnNode(controlPlaneNodeName(), cmd) if err != nil || rc != 0 { return res, fmt.Errorf("error when running the following command `%s` on control-plane Node: %v, %s", cmd, err, stdout) } @@ -570,8 +569,8 @@ func collectClusterInfo() error { return nil } -// createNamespace creates the provided namespace. -func (data *TestData) createNamespace(namespace string, mutateFunc func(*corev1.Namespace)) error { +// CreateNamespace creates the provided namespace. +func (data *TestData) CreateNamespace(namespace string, mutateFunc func(*corev1.Namespace)) error { ns := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: namespace, @@ -595,7 +594,7 @@ func (data *TestData) createNamespace(namespace string, mutateFunc func(*corev1. // createTestNamespace creates the namespace used for tests. func (data *TestData) createTestNamespace() error { - return data.createNamespace(testNamespace, nil) + return data.CreateNamespace(testNamespace, nil) } // createNamespaceWithAnnotations creates the namespace with Annotations. @@ -611,11 +610,11 @@ func (data *TestData) createNamespaceWithAnnotations(namespace string, annotatio } } } - return data.createNamespace(namespace, mutateFunc) + return data.CreateNamespace(namespace, mutateFunc) } -// deleteNamespace deletes the provided namespace and waits for deletion to actually complete. -func (data *TestData) deleteNamespace(namespace string, timeout time.Duration) error { +// DeleteNamespace deletes the provided namespace and waits for deletion to actually complete. +func (data *TestData) DeleteNamespace(namespace string, timeout time.Duration) error { var gracePeriodSeconds int64 var propagationPolicy = metav1.DeletePropagationForeground deleteOptions := metav1.DeleteOptions{ @@ -648,23 +647,23 @@ func (data *TestData) deleteNamespace(namespace string, timeout time.Duration) e // deleteTestNamespace deletes test namespace and waits for deletion to actually complete. func (data *TestData) deleteTestNamespace(timeout time.Duration) error { - return data.deleteNamespace(testNamespace, timeout) + return data.DeleteNamespace(testNamespace, timeout) } // deployAntreaCommon deploys Antrea using kubectl on the control-plane Node. func (data *TestData) deployAntreaCommon(yamlFile string, extraOptions string, waitForAgentRollout bool) error { // TODO: use the K8s apiserver when server side apply is available? // See https://kubernetes.io/docs/reference/using-api/api-concepts/#server-side-apply - rc, _, _, err := provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl apply %s -f %s", extraOptions, yamlFile)) + rc, _, _, err := data.provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl apply %s -f %s", extraOptions, yamlFile)) if err != nil || rc != 0 { return fmt.Errorf("error when deploying Antrea; is %s available on the control-plane Node?", yamlFile) } - rc, stdout, stderr, err := provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s rollout status deploy/%s --timeout=%v", antreaNamespace, antreaDeployment, defaultTimeout)) + rc, stdout, stderr, err := data.provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s rollout status deploy/%s --timeout=%v", antreaNamespace, antreaDeployment, defaultTimeout)) if err != nil || rc != 0 { return fmt.Errorf("error when waiting for antrea-controller rollout to complete - rc: %v - stdout: %v - stderr: %v - err: %v", rc, stdout, stderr, err) } if waitForAgentRollout { - rc, stdout, stderr, err = provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s rollout status ds/%s --timeout=%v", antreaNamespace, antreaDaemonSet, defaultTimeout)) + rc, stdout, stderr, err = data.provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s rollout status ds/%s --timeout=%v", antreaNamespace, antreaDaemonSet, defaultTimeout)) if err != nil || rc != 0 { return fmt.Errorf("error when waiting for antrea-agent rollout to complete - rc: %v - stdout: %v - stderr: %v - err: %v", rc, stdout, stderr, err) } @@ -702,7 +701,7 @@ func (data *TestData) deployFlowAggregator(ipfixCollector string) (string, error if testOptions.enableCoverage { flowAggYaml = flowAggregatorCovYML } - rc, _, _, err := provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl apply -f %s", flowAggYaml)) + rc, _, _, err := data.provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl apply -f %s", flowAggYaml)) if err != nil || rc != 0 { return "", fmt.Errorf("error when deploying the Flow Aggregator; %s not available on the control-plane Node", flowAggYaml) } @@ -713,9 +712,9 @@ func (data *TestData) deployFlowAggregator(ipfixCollector string) (string, error if err = data.mutateFlowAggregatorConfigMap(ipfixCollector, svc.Spec.ClusterIP); err != nil { return "", err } - if rc, _, _, err = provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s rollout status deployment/%s --timeout=%v", flowAggregatorNamespace, flowAggregatorDeployment, 2*defaultTimeout)); err != nil || rc != 0 { - _, stdout, _, _ := provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s describe pod", flowAggregatorNamespace)) - _, logStdout, _, _ := provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s logs -l app=flow-aggregator", flowAggregatorNamespace)) + if rc, _, _, err = data.provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s rollout status deployment/%s --timeout=%v", flowAggregatorNamespace, flowAggregatorDeployment, 2*defaultTimeout)); err != nil || rc != 0 { + _, stdout, _, _ := data.provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s describe pod", flowAggregatorNamespace)) + _, logStdout, _, _ := data.provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s logs -l app=flow-aggregator", flowAggregatorNamespace)) return stdout, fmt.Errorf("error when waiting for the Flow Aggregator rollout to complete. kubectl describe output: %s, logs: %s", stdout, logStdout) } return svc.Spec.ClusterIP, nil @@ -853,7 +852,7 @@ func (data *TestData) waitForAntreaDaemonSetPods(timeout time.Duration) error { return true, nil }) if err == wait.ErrWaitTimeout { - _, stdout, _, _ := provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s describe pod", antreaNamespace)) + _, stdout, _, _ := data.provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s describe pod", antreaNamespace)) return fmt.Errorf("antrea-agent DaemonSet not ready within %v; kubectl describe pod output: %v", defaultTimeout, stdout) } else if err != nil { return err @@ -912,13 +911,8 @@ func (data *TestData) checkCoreDNSPods(timeout time.Duration) error { return data.restartCoreDNSPods(timeout) } -// createClient initializes the K8s clientset in the TestData structure. -func (data *TestData) createClient() error { - kubeconfigPath, err := provider.GetKubeconfigPath() - if err != nil { - return fmt.Errorf("error when getting Kubeconfig path: %v", err) - } - +// CreateClient initializes the K8s clientset in the TestData structure. +func (data *TestData) CreateClient(kubeconfigPath string) error { loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() loadingRules.ExplicitPath = kubeconfigPath configOverrides := &clientcmd.ConfigOverrides{} @@ -1010,13 +1004,13 @@ func (data *TestData) createPodOnNode(name string, ns string, nodeName string, i // image could be a fully qualified URI which can't be used as container name and label value, // extract the image name from it. imageName := getImageName(image) - return data.createPodOnNodeInNamespace(name, ns, nodeName, imageName, image, command, args, env, ports, hostNetwork, mutateFunc) + return data.CreatePodOnNodeInNamespace(name, ns, nodeName, imageName, image, command, args, env, ports, hostNetwork, mutateFunc) } -// createPodOnNodeInNamespace creates a pod in the provided namespace with a container whose type is decided by imageName. +// CreatePodOnNodeInNamespace creates a pod in the provided namespace with a container whose type is decided by imageName. // Pod will be scheduled on the specified Node (if nodeName is not empty). // mutateFunc can be used to customize the Pod if the other parameters don't meet the requirements. -func (data *TestData) createPodOnNodeInNamespace(name, ns string, nodeName, ctrName string, image string, command []string, args []string, env []corev1.EnvVar, ports []corev1.ContainerPort, hostNetwork bool, mutateFunc func(*corev1.Pod)) error { +func (data *TestData) CreatePodOnNodeInNamespace(name, ns string, nodeName, ctrName string, image string, command []string, args []string, env []corev1.EnvVar, ports []corev1.ContainerPort, hostNetwork bool, mutateFunc func(*corev1.Pod)) error { podSpec := corev1.PodSpec{ Containers: []corev1.Container{ { @@ -1118,11 +1112,11 @@ func (data *TestData) createServerPodWithLabels(name, ns string, portNum int32, pod.Labels[k] = v } } - return data.createPodOnNodeInNamespace(name, ns, "", containerName, agnhostImage, cmd, nil, []corev1.EnvVar{env}, []corev1.ContainerPort{port}, false, mutateLabels) + return data.CreatePodOnNodeInNamespace(name, ns, "", containerName, agnhostImage, cmd, nil, []corev1.EnvVar{env}, []corev1.ContainerPort{port}, false, mutateLabels) } -// deletePod deletes a Pod in the test namespace. -func (data *TestData) deletePod(namespace, name string) error { +// DeletePod deletes a Pod in the test namespace. +func (data *TestData) DeletePod(namespace, name string) error { var gracePeriodSeconds int64 = 5 deleteOptions := metav1.DeleteOptions{ GracePeriodSeconds: &gracePeriodSeconds, @@ -1136,9 +1130,9 @@ func (data *TestData) deletePod(namespace, name string) error { } // Deletes a Pod in the test namespace then waits us to timeout for the Pod not to be visible to the -// client any more. +// client anymore. func (data *TestData) deletePodAndWait(timeout time.Duration, name string, ns string) error { - if err := data.deletePod(ns, name); err != nil { + if err := data.DeletePod(ns, name); err != nil { return err } err := wait.Poll(defaultInterval, timeout, func() (bool, error) { @@ -1152,16 +1146,16 @@ func (data *TestData) deletePodAndWait(timeout time.Duration, name string, ns st return false, nil }) if err == wait.ErrWaitTimeout { - return fmt.Errorf("Pod '%s' still visible to client after %v", name, timeout) + return fmt.Errorf("pod '%s' still visible to client after %v", name, timeout) } return err } type PodCondition func(*corev1.Pod) (bool, error) -// podWaitFor polls the K8s apiserver until the specified Pod is found (in the test Namespace) and +// PodWaitFor polls the K8s apiserver until the specified Pod is found (in the test Namespace) and // the condition predicate is met (or until the provided timeout expires). -func (data *TestData) podWaitFor(timeout time.Duration, name, namespace string, condition PodCondition) (*corev1.Pod, error) { +func (data *TestData) PodWaitFor(timeout time.Duration, name, namespace string, condition PodCondition) (*corev1.Pod, error) { var pod *corev1.Pod err := wait.Poll(defaultInterval, timeout, func() (bool, error) { var err error @@ -1186,7 +1180,7 @@ func (data *TestData) podWaitFor(timeout time.Duration, name, namespace string, // podWaitForRunning polls the k8s apiserver until the specified Pod is in the "running" state (or // until the provided timeout expires). func (data *TestData) podWaitForRunning(timeout time.Duration, name, namespace string) error { - _, err := data.podWaitFor(timeout, name, namespace, func(pod *corev1.Pod) (bool, error) { + _, err := data.PodWaitFor(timeout, name, namespace, func(pod *corev1.Pod) (bool, error) { return pod.Status.Phase == corev1.PodRunning, nil }) return err @@ -1197,7 +1191,7 @@ func (data *TestData) podWaitForRunning(timeout time.Duration, name, namespace s // Pod is not using "hostNetwork", the function also checks that an IP address exists in each required // Address Family in the cluster. func (data *TestData) podWaitForIPs(timeout time.Duration, name, namespace string) (*PodIPs, error) { - pod, err := data.podWaitFor(timeout, name, namespace, func(pod *corev1.Pod) (bool, error) { + pod, err := data.PodWaitFor(timeout, name, namespace, func(pod *corev1.Pod) (bool, error) { return pod.Status.Phase == corev1.PodRunning, nil }) if err != nil { @@ -1457,15 +1451,15 @@ func validatePodIP(podNetworkCIDR string, ip net.IP) (bool, error) { return cidr.Contains(ip), nil } -// createService creates a service with port and targetPort. -func (data *TestData) createService(serviceName, namespace string, port, targetPort int32, selector map[string]string, affinity, nodeLocalExternal bool, +// CreateService creates a service with port and targetPort. +func (data *TestData) CreateService(serviceName, namespace string, port, targetPort int32, selector map[string]string, affinity, nodeLocalExternal bool, serviceType corev1.ServiceType, ipFamily *corev1.IPFamily) (*corev1.Service, error) { annotation := make(map[string]string) - return data.createServiceWithAnnotations(serviceName, namespace, port, targetPort, corev1.ProtocolTCP, selector, affinity, nodeLocalExternal, serviceType, ipFamily, annotation) + return data.CreateServiceWithAnnotations(serviceName, namespace, port, targetPort, corev1.ProtocolTCP, selector, affinity, nodeLocalExternal, serviceType, ipFamily, annotation) } -// createService creates a service with Annotation -func (data *TestData) createServiceWithAnnotations(serviceName, namespace string, port, targetPort int32, protocol corev1.Protocol, selector map[string]string, affinity, nodeLocalExternal bool, +// CreateServiceWithAnnotations creates a service with Annotation +func (data *TestData) CreateServiceWithAnnotations(serviceName, namespace string, port, targetPort int32, protocol corev1.Protocol, selector map[string]string, affinity, nodeLocalExternal bool, serviceType corev1.ServiceType, ipFamily *corev1.IPFamily, annotations map[string]string) (*corev1.Service, error) { affinityType := corev1.ServiceAffinityNone var ipFamilies []corev1.IPFamily @@ -1505,7 +1499,7 @@ func (data *TestData) createServiceWithAnnotations(serviceName, namespace string // createNginxClusterIPServiceWithAnnotations creates nginx service with Annotation func (data *TestData) createNginxClusterIPServiceWithAnnotations(affinity bool, ipFamily *corev1.IPFamily, annotation map[string]string) (*corev1.Service, error) { - return data.createServiceWithAnnotations("nginx", testNamespace, 80, 80, corev1.ProtocolTCP, map[string]string{"app": "nginx"}, affinity, false, corev1.ServiceTypeClusterIP, ipFamily, annotation) + return data.CreateServiceWithAnnotations("nginx", testNamespace, 80, 80, corev1.ProtocolTCP, map[string]string{"app": "nginx"}, affinity, false, corev1.ServiceTypeClusterIP, ipFamily, annotation) } // createNginxClusterIPService creates a nginx service with the given name. @@ -1513,17 +1507,17 @@ func (data *TestData) createNginxClusterIPService(name, namespace string, affini if name == "" { name = "nginx" } - return data.createService(name, namespace, 80, 80, map[string]string{"app": "nginx"}, affinity, false, corev1.ServiceTypeClusterIP, ipFamily) + return data.CreateService(name, namespace, 80, 80, map[string]string{"app": "nginx"}, affinity, false, corev1.ServiceTypeClusterIP, ipFamily) } // createAgnhostNodePortService creates a NodePort agnhost service with the given name. func (data *TestData) createAgnhostNodePortService(serviceName string, affinity, nodeLocalExternal bool, ipFamily *corev1.IPFamily) (*corev1.Service, error) { - return data.createService(serviceName, testNamespace, 8080, 8080, map[string]string{"app": "agnhost"}, affinity, nodeLocalExternal, corev1.ServiceTypeNodePort, ipFamily) + return data.CreateService(serviceName, testNamespace, 8080, 8080, map[string]string{"app": "agnhost"}, affinity, nodeLocalExternal, corev1.ServiceTypeNodePort, ipFamily) } // createNginxNodePortService creates a NodePort nginx service with the given name. func (data *TestData) createNginxNodePortService(serviceName string, affinity, nodeLocalExternal bool, ipFamily *corev1.IPFamily) (*corev1.Service, error) { - return data.createService(serviceName, testNamespace, 80, 80, map[string]string{"app": "nginx"}, affinity, nodeLocalExternal, corev1.ServiceTypeNodePort, ipFamily) + return data.CreateService(serviceName, testNamespace, 80, 80, map[string]string{"app": "nginx"}, affinity, nodeLocalExternal, corev1.ServiceTypeNodePort, ipFamily) } func (data *TestData) updateServiceExternalTrafficPolicy(serviceName string, nodeLocalExternal bool) (*corev1.Service, error) { @@ -1542,7 +1536,7 @@ func (data *TestData) updateServiceExternalTrafficPolicy(serviceName string, nod // createAgnhostLoadBalancerService creates a LoadBalancer agnhost service with the given name. func (data *TestData) createAgnhostLoadBalancerService(serviceName string, affinity, nodeLocalExternal bool, ingressIPs []string, ipFamily *corev1.IPFamily) (*corev1.Service, error) { - svc, err := data.createService(serviceName, testNamespace, 8080, 8080, map[string]string{"app": "agnhost"}, affinity, nodeLocalExternal, corev1.ServiceTypeLoadBalancer, ipFamily) + svc, err := data.CreateService(serviceName, testNamespace, 8080, 8080, map[string]string{"app": "agnhost"}, affinity, nodeLocalExternal, corev1.ServiceTypeLoadBalancer, ipFamily) if err != nil { return svc, err } @@ -1561,7 +1555,7 @@ func (data *TestData) createAgnhostLoadBalancerService(serviceName string, affin } func (data *TestData) createNginxLoadBalancerService(affinity bool, ingressIPs []string, ipFamily *corev1.IPFamily) (*corev1.Service, error) { - svc, err := data.createService(nginxLBService, testNamespace, 80, 80, map[string]string{"app": "nginx"}, affinity, false, corev1.ServiceTypeLoadBalancer, ipFamily) + svc, err := data.CreateService(nginxLBService, testNamespace, 80, 80, map[string]string{"app": "nginx"}, affinity, false, corev1.ServiceTypeLoadBalancer, ipFamily) if err != nil { return svc, err } @@ -1578,21 +1572,12 @@ func (data *TestData) createNginxLoadBalancerService(affinity bool, ingressIPs [ return data.clientset.CoreV1().Services(svc.Namespace).Patch(context.TODO(), svc.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "status") } -// deleteService deletes the service. -func (data *TestData) deleteService(name string) error { - if err := data.clientset.CoreV1().Services(testNamespace).Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { - return fmt.Errorf("unable to cleanup service %v: %v", name, err) - } - return nil -} - // Deletes a Service in the test namespace then waits us to timeout for the Service not to be visible to the -// client any more. +// client anymore. func (data *TestData) deleteServiceAndWait(timeout time.Duration, name string) error { - if err := data.deleteService(name); err != nil { + if err := data.DeleteService(testNamespace, name); err != nil { return err } - err := wait.Poll(defaultInterval, timeout, func() (bool, error) { if _, err := data.clientset.CoreV1().Services(testNamespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil { if errors.IsNotFound(err) { @@ -1651,7 +1636,7 @@ func randName(prefix string) string { // Run the provided command in the specified Container for the give Pod and returns the contents of // stdout and stderr as strings. An error either indicates that the command couldn't be run or that // the command returned a non-zero error code. -func (data *TestData) runCommandFromPod(podNamespace string, podName string, containerName string, cmd []string) (stdout string, stderr string, err error) { +func (data *TestData) RunCommandFromPod(podNamespace string, podName string, containerName string, cmd []string) (stdout string, stderr string, err error) { request := data.clientset.CoreV1().RESTClient().Post(). Namespace(podNamespace). Resource("pods"). @@ -1749,13 +1734,13 @@ func (data *TestData) runPingCommandFromTestPod(podInfo podInfo, ns string, targ } if targetPodIPs.ipv4 != nil { cmdV4 := append(cmd, "-4", targetPodIPs.ipv4.String()) - if stdout, stderr, err := data.runCommandFromPod(ns, podInfo.name, ctrName, cmdV4); err != nil { + if stdout, stderr, err := data.RunCommandFromPod(ns, podInfo.name, ctrName, cmdV4); err != nil { return fmt.Errorf("error when running ping command '%s': %v - stdout: %s - stderr: %s", strings.Join(cmdV4, " "), err, stdout, stderr) } } if targetPodIPs.ipv6 != nil { cmdV6 := append(cmd, "-6", targetPodIPs.ipv6.String()) - if stdout, stderr, err := data.runCommandFromPod(ns, podInfo.name, ctrName, cmdV6); err != nil { + if stdout, stderr, err := data.RunCommandFromPod(ns, podInfo.name, ctrName, cmdV6); err != nil { return fmt.Errorf("error when running ping command '%s': %v - stdout: %s - stderr: %s", strings.Join(cmdV6, " "), err, stdout, stderr) } } @@ -1780,7 +1765,7 @@ func (data *TestData) runNetcatCommandFromTestPodWithProtocol(podName string, ns protocolOption, server, port), } - stdout, stderr, err := data.runCommandFromPod(ns, podName, busyboxContainerName, cmd) + stdout, stderr, err := data.RunCommandFromPod(ns, podName, busyboxContainerName, cmd) if err == nil { return nil } @@ -1789,7 +1774,7 @@ func (data *TestData) runNetcatCommandFromTestPodWithProtocol(podName string, ns func (data *TestData) doesOVSPortExist(antreaPodName string, portName string) (bool, error) { cmd := []string{"ovs-vsctl", "port-to-br", portName} - _, stderr, err := data.runCommandFromPod(antreaNamespace, antreaPodName, ovsContainerName, cmd) + _, stderr, err := data.RunCommandFromPod(antreaNamespace, antreaPodName, ovsContainerName, cmd) if err == nil { return true, nil } else if strings.Contains(stderr, "no port named") { @@ -1800,7 +1785,7 @@ func (data *TestData) doesOVSPortExist(antreaPodName string, portName string) (b func (data *TestData) doesOVSPortExistOnWindows(nodeName, portName string) (bool, error) { cmd := fmt.Sprintf("ovs-vsctl port-to-br %s", portName) - _, _, stderr, err := RunCommandOnNode(nodeName, cmd) + _, _, stderr, err := data.RunCommandOnNode(nodeName, cmd) if strings.Contains(stderr, "no port named") { return false, nil } else if err == nil { @@ -1921,8 +1906,8 @@ func (data *TestData) GetMulticastInterfaces(antreaNamespace string) ([]string, return agentConf.MulticastInterfaces, nil } -func GetTransportInterface() (string, error) { - _, transportInterfaceUntrimmed, _, err := RunCommandOnNode(nodeName(0), fmt.Sprintf("ip -br addr show | grep %s | awk '{print $1}'", clusterInfo.nodes[0].ipv4Addr)) +func GetTransportInterface(data *TestData) (string, error) { + _, transportInterfaceUntrimmed, _, err := data.RunCommandOnNode(nodeName(0), fmt.Sprintf("ip -br addr show | grep %s | awk '{print $1}'", clusterInfo.nodes[0].ipv4Addr)) if err != nil { return "", err } @@ -2053,13 +2038,13 @@ func (data *TestData) gracefulExitAntreaController(covDir string) error { } cmds := []string{"pgrep", "-f", antreaControllerCovBinary, "-P", "1"} - stdout, stderr, err := data.runCommandFromPod(antreaNamespace, podName, "antrea-controller", cmds) + stdout, stderr, err := data.RunCommandFromPod(antreaNamespace, podName, "antrea-controller", cmds) if err != nil { return fmt.Errorf("error when getting pid of '%s', stderr: <%v>, err: <%v>", antreaControllerCovBinary, stderr, err) } cmds = []string{"kill", "-SIGINT", strings.TrimSpace(stdout)} - _, stderr, err = data.runCommandFromPod(antreaNamespace, podName, "antrea-controller", cmds) + _, stderr, err = data.RunCommandFromPod(antreaNamespace, podName, "antrea-controller", cmds) if err != nil { return fmt.Errorf("error when sending SIGINT signal to '%s', stderr: <%v>, err: <%v>", antreaControllerCovBinary, stderr, err) } @@ -2094,12 +2079,12 @@ func (data *TestData) gracefulExitAntreaAgent(covDir string, nodeName string) er } cmds := []string{"pgrep", "-f", antreaAgentCovBinary, "-P", "1"} - stdout, stderr, err := data.runCommandFromPod(antreaNamespace, podName, "antrea-agent", cmds) + stdout, stderr, err := data.RunCommandFromPod(antreaNamespace, podName, "antrea-agent", cmds) if err != nil { return fmt.Errorf("error when getting pid of '%s', stderr: <%v>, err: <%v>", antreaAgentCovBinary, stderr, err) } cmds = []string{"kill", "-SIGINT", strings.TrimSpace(stdout)} - _, stderr, err = data.runCommandFromPod(antreaNamespace, podName, "antrea-agent", cmds) + _, stderr, err = data.RunCommandFromPod(antreaNamespace, podName, "antrea-agent", cmds) if err != nil { return fmt.Errorf("error when sending SIGINT signal to '%s', stderr: <%v>, err: <%v>", antreaAgentCovBinary, stderr, err) } @@ -2121,13 +2106,13 @@ func (data *TestData) gracefulExitFlowAggregator(covDir string) error { podName := flowAggPod.Name cmds := []string{"pgrep", "-f", flowAggregatorCovBinary, "-P", "1"} - stdout, stderr, err := data.runCommandFromPod(flowAggregatorNamespace, podName, "flow-aggregator", cmds) + stdout, stderr, err := data.RunCommandFromPod(flowAggregatorNamespace, podName, "flow-aggregator", cmds) if err != nil { - _, describeStdout, _, _ := provider.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s describe pod", flowAggregatorNamespace)) + _, describeStdout, _, _ := data.RunCommandOnNode(controlPlaneNodeName(), fmt.Sprintf("kubectl -n %s describe pod", flowAggregatorNamespace)) return fmt.Errorf("error when getting pid of '%s', stdout: <%v>, stderr: <%v>, err: <%v>, describe stdout: <%v>", flowAggregatorCovBinary, stdout, stderr, err, describeStdout) } cmds = []string{"kill", "-SIGINT", strings.TrimSpace(stdout)} - if _, stderr, err = data.runCommandFromPod(flowAggregatorNamespace, podName, "flow-aggregator", cmds); err != nil { + if _, stderr, err = data.RunCommandFromPod(flowAggregatorNamespace, podName, "flow-aggregator", cmds); err != nil { return fmt.Errorf("error when sending SIGINT signal to '%s', stderr: <%v>, err: <%v>", flowAggregatorCovBinary, stderr, err) } if err = data.copyPodFiles(podName, "flow-aggregator", flowAggregatorNamespace, flowAggregatorCovFile, covDir); err != nil { @@ -2141,7 +2126,7 @@ func (data *TestData) gracefulExitFlowAggregator(covDir string) error { func (data *TestData) collectAntctlCovFiles(podName string, containerName string, nsName string, covDir string) error { // copy antctl coverage files from Pod to the coverage directory cmds := []string{"bash", "-c", "find . -maxdepth 1 -name 'antctl*.out' -exec basename {} ';'"} - stdout, stderr, err := data.runCommandFromPod(nsName, podName, containerName, cmds) + stdout, stderr, err := data.RunCommandFromPod(nsName, podName, containerName, cmds) if err != nil { return fmt.Errorf("error when running this find command '%s' on Pod '%s', stderr: <%v>, err: <%v>", cmds, podName, stderr, err) } @@ -2168,7 +2153,7 @@ func (data *TestData) collectAntctlCovFilesFromControlPlaneNode(covDir string) e } else { cmd = "find . -maxdepth 1 -name 'antctl*.out' -exec basename {} ';'" } - rc, stdout, stderr, err := RunCommandOnNode(controlPlaneNodeName(), cmd) + rc, stdout, stderr, err := data.RunCommandOnNode(controlPlaneNodeName(), cmd) if err != nil || rc != 0 { return fmt.Errorf("error when running this find command '%s' on control-plane Node '%s', stderr: <%v>, err: <%v>", cmd, controlPlaneNodeName(), stderr, err) @@ -2212,7 +2197,7 @@ func (data *TestData) copyPodFiles(podName string, containerName string, nsName } defer w.Close() cmd := []string{"cat", fileName} - stdout, stderr, err := data.runCommandFromPod(nsName, podName, containerName, cmd) + stdout, stderr, err := data.RunCommandFromPod(nsName, podName, containerName, cmd) if err != nil { return fmt.Errorf("cannot retrieve content of file '%s' from Pod '%s', stderr: <%v>, err: <%v>", fileName, podName, stderr, err) } @@ -2247,7 +2232,7 @@ func (data *TestData) copyNodeFiles(nodeName string, fileName string, covDir str } defer w.Close() cmd := fmt.Sprintf("cat %s", fileName) - rc, stdout, stderr, err := RunCommandOnNode(controlPlaneNodeName(), cmd) + rc, stdout, stderr, err := data.RunCommandOnNode(controlPlaneNodeName(), cmd) if err != nil || rc != 0 { return fmt.Errorf("cannot retrieve content of file '%s' from Node '%s', stderr: <%v>, err: <%v>", fileName, controlPlaneNodeName(), stderr, err) } diff --git a/test/e2e/ipsec_test.go b/test/e2e/ipsec_test.go index 741af80004a..e7d2235e5e0 100644 --- a/test/e2e/ipsec_test.go +++ b/test/e2e/ipsec_test.go @@ -50,7 +50,7 @@ func (data *TestData) readSecurityAssociationsStatus(nodeName string) (up int, c return 0, 0, err } cmd := []string{"ipsec", "status"} - stdout, stderr, err := data.runCommandFromPod(antreaNamespace, antreaPodName, "antrea-ipsec", cmd) + stdout, stderr, err := data.RunCommandFromPod(antreaNamespace, antreaPodName, "antrea-ipsec", cmd) if err != nil { return 0, 0, fmt.Errorf("error when running 'ipsec status' on '%s': %v - stdout: %s - stderr: %s", nodeName, err, stdout, stderr) } diff --git a/test/e2e/k8s_util.go b/test/e2e/k8s_util.go index 089183167dc..50fb6d56571 100644 --- a/test/e2e/k8s_util.go +++ b/test/e2e/k8s_util.go @@ -106,7 +106,7 @@ func (k *KubernetesUtils) probe( fmt.Sprintf("for i in $(seq 1 3); do /agnhost connect %s:%d --timeout=1s --protocol=%s; done;", dstAddr, port, protocolStr[protocol]), } log.Tracef("Running: kubectl exec %s -c %s -n %s -- %s", pod.Name, containerName, pod.Namespace, strings.Join(cmd, " ")) - stdout, stderr, err := k.runCommandFromPod(pod.Namespace, pod.Name, containerName, cmd) + stdout, stderr, err := k.RunCommandFromPod(pod.Namespace, pod.Name, containerName, cmd) // It needs to check both err and stderr because: // 1. The probe tried 3 times. If it checks err only, failure+failure+success would be considered connected. // 2. There might be an issue in Pod exec API that it sometimes doesn't return error when the probe fails. See #2394. @@ -118,13 +118,13 @@ func (k *KubernetesUtils) probe( if stderr == "" { return Error } - return decideProbeResult(stderr, 3) + return DecideProbeResult(stderr, 3) } return Connected } -// decideProbeResult uses the probe stderr to decide the connectivity. -func decideProbeResult(stderr string, probeNum int) PodConnectivityMark { +// DecideProbeResult uses the probe stderr to decide the connectivity. +func DecideProbeResult(stderr string, probeNum int) PodConnectivityMark { countConnected := probeNum - strings.Count(stderr, "\n") countDropped := strings.Count(stderr, "TIMEOUT") // For our UDP rejection cases, agnhost will return: @@ -211,21 +211,21 @@ func (k *KubernetesUtils) ProbeAddr(ns, podLabelKey, podLabelValue, dstAddr stri } // CreateOrUpdateNamespace is a convenience function for idempotent setup of Namespaces -func (k *KubernetesUtils) CreateOrUpdateNamespace(n string, labels map[string]string) (*v1.Namespace, error) { +func (data *TestData) CreateOrUpdateNamespace(n string, labels map[string]string) (*v1.Namespace, error) { ns := &v1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: n, Labels: labels, }, } - nsr, err := k.clientset.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) + nsr, err := data.clientset.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) if err == nil { log.Infof("Created Namespace %s", n) return nsr, nil } log.Debugf("Unable to create Namespace %s, let's try updating it instead (error: %s)", ns.Name, err) - nsr, err = k.clientset.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{}) + nsr, err = data.clientset.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{}) if err != nil { log.Debugf("Unable to update Namespace %s: %s", ns, err) } @@ -234,7 +234,7 @@ func (k *KubernetesUtils) CreateOrUpdateNamespace(n string, labels map[string]st } // CreateOrUpdateDeployment is a convenience function for idempotent setup of deployments -func (k *KubernetesUtils) CreateOrUpdateDeployment(ns, deploymentName string, replicas int32, labels map[string]string) (*appsv1.Deployment, error) { +func (data *TestData) CreateOrUpdateDeployment(ns, deploymentName string, replicas int32, labels map[string]string) (*appsv1.Deployment, error) { zero := int64(0) log.Infof("Creating/updating Deployment '%s/%s'", ns, deploymentName) makeContainerSpec := func(port int32, protocol v1.Protocol) v1.Container { @@ -298,14 +298,14 @@ func (k *KubernetesUtils) CreateOrUpdateDeployment(ns, deploymentName string, re }, } - d, err := k.clientset.AppsV1().Deployments(ns).Create(context.TODO(), deployment, metav1.CreateOptions{}) + d, err := data.clientset.AppsV1().Deployments(ns).Create(context.TODO(), deployment, metav1.CreateOptions{}) if err == nil { log.Infof("Created deployment '%s/%s'", ns, d.Name) return d, nil } log.Debugf("Unable to create deployment %s in Namespace %s, let's try update instead", deployment.Name, ns) - d, err = k.clientset.AppsV1().Deployments(ns).Update(context.TODO(), deployment, metav1.UpdateOptions{}) + d, err = data.clientset.AppsV1().Deployments(ns).Update(context.TODO(), deployment, metav1.UpdateOptions{}) if err != nil { log.Debugf("Unable to update deployment '%s/%s': %s", ns, deployment.Name, err) } @@ -313,7 +313,7 @@ func (k *KubernetesUtils) CreateOrUpdateDeployment(ns, deploymentName string, re } // BuildService is a convenience function for building a corev1.Service spec. -func (k *KubernetesUtils) BuildService(svcName, svcNS string, port, targetPort int, selector map[string]string, serviceType *v1.ServiceType) *v1.Service { +func (data *TestData) BuildService(svcName, svcNS string, port, targetPort int, selector map[string]string, serviceType *v1.ServiceType) *v1.Service { service := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: svcName, @@ -334,12 +334,12 @@ func (k *KubernetesUtils) BuildService(svcName, svcNS string, port, targetPort i } // CreateOrUpdateService is a convenience function for updating/creating Services. -func (k *KubernetesUtils) CreateOrUpdateService(svc *v1.Service) (*v1.Service, error) { +func (data *TestData) CreateOrUpdateService(svc *v1.Service) (*v1.Service, error) { log.Infof("Creating/updating Service %s in ns %s", svc.Name, svc.Namespace) - svcReturned, err := k.clientset.CoreV1().Services(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{}) + svcReturned, err := data.clientset.CoreV1().Services(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{}) if err != nil { - service, err := k.clientset.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) + service, err := data.clientset.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) if err != nil { log.Infof("Unable to create Service %s/%s: %s", svc.Namespace, svc.Name, err) return nil, err @@ -350,25 +350,21 @@ func (k *KubernetesUtils) CreateOrUpdateService(svc *v1.Service) (*v1.Service, e clusterIP := svcReturned.Spec.ClusterIP svcReturned.Spec = svc.Spec svcReturned.Spec.ClusterIP = clusterIP - service, err := k.clientset.CoreV1().Services(svc.Namespace).Update(context.TODO(), svcReturned, metav1.UpdateOptions{}) + service, err := data.clientset.CoreV1().Services(svc.Namespace).Update(context.TODO(), svcReturned, metav1.UpdateOptions{}) return service, err } return nil, fmt.Errorf("error occurred in creating/updating Service %s", svc.Name) } // GetService is a convenience function for getting Service -func (k *KubernetesUtils) GetService(namespace, name string) (*v1.Service, error) { - res, err := k.clientset.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return res, nil +func (data *TestData) GetService(namespace, name string) (*v1.Service, error) { + return data.clientset.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } // DeleteService is a convenience function for deleting a Service by Namespace and name. -func (k *KubernetesUtils) DeleteService(ns, name string) error { +func (data *TestData) DeleteService(ns, name string) error { log.Infof("Deleting Service %s in ns %s", name, ns) - err := k.clientset.CoreV1().Services(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) + err := data.clientset.CoreV1().Services(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) if err != nil { return errors.Wrapf(err, "unable to delete Service %s", name) } @@ -376,14 +372,14 @@ func (k *KubernetesUtils) DeleteService(ns, name string) error { } // CleanServices is a convenience function for deleting Services in the cluster. -func (k *KubernetesUtils) CleanServices(namespaces []string) error { +func (data *TestData) CleanServices(namespaces []string) error { for _, ns := range namespaces { - l, err := k.clientset.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) + l, err := data.clientset.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { return errors.Wrapf(err, "unable to list Services in ns %s", ns) } for _, svc := range l.Items { - if err := k.DeleteService(svc.Namespace, svc.Name); err != nil { + if err := data.DeleteService(svc.Namespace, svc.Name); err != nil { return err } } @@ -392,7 +388,7 @@ func (k *KubernetesUtils) CleanServices(namespaces []string) error { } // BuildServiceAccount is a convenience function for building a corev1.SerivceAccount spec. -func (k *KubernetesUtils) BuildServiceAccount(name, ns string, labels map[string]string) *v1.ServiceAccount { +func (data *TestData) BuildServiceAccount(name, ns string, labels map[string]string) *v1.ServiceAccount { serviceAccount := &v1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -404,13 +400,12 @@ func (k *KubernetesUtils) BuildServiceAccount(name, ns string, labels map[string } // CreateOrUpdateServiceAccount is a convenience function for updating/creating ServiceAccount. -func (k *KubernetesUtils) CreateOrUpdateServiceAccount(sa *v1.ServiceAccount) (*v1.ServiceAccount, error) { - +func (data *TestData) CreateOrUpdateServiceAccount(sa *v1.ServiceAccount) (*v1.ServiceAccount, error) { log.Infof("Creating/updating ServiceAccount %s in ns %s", sa.Name, sa.Namespace) - saReturned, err := k.clientset.CoreV1().ServiceAccounts(sa.Namespace).Get(context.TODO(), sa.Name, metav1.GetOptions{}) + saReturned, err := data.clientset.CoreV1().ServiceAccounts(sa.Namespace).Get(context.TODO(), sa.Name, metav1.GetOptions{}) if err != nil { - serviceAccount, err := k.clientset.CoreV1().ServiceAccounts(sa.Namespace).Create(context.TODO(), sa, metav1.CreateOptions{}) + serviceAccount, err := data.clientset.CoreV1().ServiceAccounts(sa.Namespace).Create(context.TODO(), sa, metav1.CreateOptions{}) if err != nil { log.Infof("Unable to create ServiceAccount %s/%s: %s", sa.Namespace, sa.Name, err) return nil, err @@ -419,7 +414,7 @@ func (k *KubernetesUtils) CreateOrUpdateServiceAccount(sa *v1.ServiceAccount) (* } log.Debugf("ServiceAccount %s/%s already exists, updating", sa.Namespace, sa.Name) saReturned.Labels = sa.Labels - serviceAccount, err := k.clientset.CoreV1().ServiceAccounts(sa.Namespace).Update(context.TODO(), saReturned, metav1.UpdateOptions{}) + serviceAccount, err := data.clientset.CoreV1().ServiceAccounts(sa.Namespace).Update(context.TODO(), saReturned, metav1.UpdateOptions{}) if err != nil { log.Infof("Unable to update ServiceAccount %s/%s: %s", sa.Namespace, sa.Name, err) return nil, err @@ -428,9 +423,9 @@ func (k *KubernetesUtils) CreateOrUpdateServiceAccount(sa *v1.ServiceAccount) (* } // DeleteServiceAccount is a convenience function for deleting a ServiceAccount by Namespace and name. -func (k *KubernetesUtils) DeleteServiceAccount(ns, name string) error { +func (data *TestData) DeleteServiceAccount(ns, name string) error { log.Infof("Deleting ServiceAccount %s in ns %s", name, ns) - err := k.clientset.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) + err := data.clientset.CoreV1().ServiceAccounts(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) if err != nil { return errors.Wrapf(err, "unable to delete ServiceAccount %s in ns %s", name, ns) } @@ -439,15 +434,15 @@ func (k *KubernetesUtils) DeleteServiceAccount(ns, name string) error { // CreateOrUpdateNetworkPolicy is a convenience function for updating/creating netpols. Updating is important since // some tests update a network policy to confirm that mutation works with a CNI. -func (k *KubernetesUtils) CreateOrUpdateNetworkPolicy(netpol *v1net.NetworkPolicy) (*v1net.NetworkPolicy, error) { +func (data *TestData) CreateOrUpdateNetworkPolicy(netpol *v1net.NetworkPolicy) (*v1net.NetworkPolicy, error) { log.Infof("Creating/updating NetworkPolicy '%s/%s'", netpol.Namespace, netpol.Name) - np, err := k.clientset.NetworkingV1().NetworkPolicies(netpol.Namespace).Update(context.TODO(), netpol, metav1.UpdateOptions{}) + np, err := data.clientset.NetworkingV1().NetworkPolicies(netpol.Namespace).Update(context.TODO(), netpol, metav1.UpdateOptions{}) if err == nil { return np, err } log.Debugf("Unable to update NetworkPolicy '%s/%s', let's try creating it instead (error: %s)", netpol.Namespace, netpol.Name, err) - np, err = k.clientset.NetworkingV1().NetworkPolicies(netpol.Namespace).Create(context.TODO(), netpol, metav1.CreateOptions{}) + np, err = data.clientset.NetworkingV1().NetworkPolicies(netpol.Namespace).Create(context.TODO(), netpol, metav1.CreateOptions{}) if err != nil { log.Debugf("Unable to create network policy: %s", err) } @@ -455,18 +450,14 @@ func (k *KubernetesUtils) CreateOrUpdateNetworkPolicy(netpol *v1net.NetworkPolic } // GetNetworkPolicy is a convenience function for getting k8s NetworkPolicies. -func (k *KubernetesUtils) GetNetworkPolicy(namespace, name string) (*v1net.NetworkPolicy, error) { - res, err := k.clientset.NetworkingV1().NetworkPolicies(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return res, nil +func (data *TestData) GetNetworkPolicy(namespace, name string) (*v1net.NetworkPolicy, error) { + return data.clientset.NetworkingV1().NetworkPolicies(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } // DeleteNetworkPolicy is a convenience function for deleting NetworkPolicy by name and Namespace. -func (k *KubernetesUtils) DeleteNetworkPolicy(ns, name string) error { +func (data *TestData) DeleteNetworkPolicy(ns, name string) error { log.Infof("Deleting NetworkPolicy '%s/%s'", ns, name) - err := k.clientset.NetworkingV1().NetworkPolicies(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) + err := data.clientset.NetworkingV1().NetworkPolicies(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) if err != nil { return errors.Wrapf(err, "unable to delete NetworkPolicy '%s'", name) } @@ -474,14 +465,14 @@ func (k *KubernetesUtils) DeleteNetworkPolicy(ns, name string) error { } // CleanNetworkPolicies is a convenience function for deleting NetworkPolicies in the provided namespaces. -func (k *KubernetesUtils) CleanNetworkPolicies(namespaces []string) error { +func (data *TestData) CleanNetworkPolicies(namespaces []string) error { for _, ns := range namespaces { - l, err := k.clientset.NetworkingV1().NetworkPolicies(ns).List(context.TODO(), metav1.ListOptions{}) + l, err := data.clientset.NetworkingV1().NetworkPolicies(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { return errors.Wrapf(err, "unable to list NetworkPolicy in Namespace '%s'", ns) } for _, np := range l.Items { - if err = k.DeleteNetworkPolicy(np.Namespace, np.Name); err != nil { + if err = data.DeleteNetworkPolicy(np.Namespace, np.Name); err != nil { return err } } @@ -490,15 +481,15 @@ func (k *KubernetesUtils) CleanNetworkPolicies(namespaces []string) error { } // CreateTier is a convenience function for creating an Antrea Policy Tier by name and priority. -func (k *KubernetesUtils) CreateNewTier(name string, tierPriority int32) (*crdv1alpha1.Tier, error) { +func (data *TestData) CreateNewTier(name string, tierPriority int32) (*crdv1alpha1.Tier, error) { log.Infof("Creating tier %s", name) - _, err := k.crdClient.CrdV1alpha1().Tiers().Get(context.TODO(), name, metav1.GetOptions{}) + _, err := data.crdClient.CrdV1alpha1().Tiers().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { tr := &crdv1alpha1.Tier{ ObjectMeta: metav1.ObjectMeta{Name: name}, Spec: crdv1alpha1.TierSpec{Priority: tierPriority}, } - tr, err = k.crdClient.CrdV1alpha1().Tiers().Create(context.TODO(), tr, metav1.CreateOptions{}) + tr, err = data.crdClient.CrdV1alpha1().Tiers().Create(context.TODO(), tr, metav1.CreateOptions{}) if err != nil { log.Debugf("Unable to create tier %s: %s", name, err) } @@ -508,40 +499,36 @@ func (k *KubernetesUtils) CreateNewTier(name string, tierPriority int32) (*crdv1 } // GetTier is a convenience function for getting Tier. -func (k *KubernetesUtils) GetTier(name string) (*crdv1alpha1.Tier, error) { - res, err := k.crdClient.CrdV1alpha1().Tiers().Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return res, nil +func (data *TestData) GetTier(name string) (*crdv1alpha1.Tier, error) { + return data.crdClient.CrdV1alpha1().Tiers().Get(context.TODO(), name, metav1.GetOptions{}) } // UpdateTier is a convenience function for updating an Antrea Policy Tier. -func (k *KubernetesUtils) UpdateTier(tier *crdv1alpha1.Tier) (*crdv1alpha1.Tier, error) { +func (data *TestData) UpdateTier(tier *crdv1alpha1.Tier) (*crdv1alpha1.Tier, error) { log.Infof("Updating tier %s", tier.Name) - updatedTier, err := k.crdClient.CrdV1alpha1().Tiers().Update(context.TODO(), tier, metav1.UpdateOptions{}) + updatedTier, err := data.crdClient.CrdV1alpha1().Tiers().Update(context.TODO(), tier, metav1.UpdateOptions{}) return updatedTier, err } // DeleteTier is a convenience function for deleting an Antrea Policy Tier with specific name. -func (k *KubernetesUtils) DeleteTier(name string) error { - _, err := k.crdClient.CrdV1alpha1().Tiers().Get(context.TODO(), name, metav1.GetOptions{}) +func (data *TestData) DeleteTier(name string) error { + _, err := data.crdClient.CrdV1alpha1().Tiers().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return errors.Wrapf(err, "unable to get tier %s", name) } log.Infof("Deleting tier %s", name) - if err = k.crdClient.CrdV1alpha1().Tiers().Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { + if err = data.crdClient.CrdV1alpha1().Tiers().Delete(context.TODO(), name, metav1.DeleteOptions{}); err != nil { return errors.Wrapf(err, "unable to delete tier %s", name) } return nil } // CreateOrUpdateV1Alpha2CG is a convenience function for idempotent setup of crd/v1alpha2 ClusterGroups -func (k *KubernetesUtils) CreateOrUpdateV1Alpha2CG(cg *crdv1alpha2.ClusterGroup) (*crdv1alpha2.ClusterGroup, error) { +func (data *TestData) CreateOrUpdateV1Alpha2CG(cg *crdv1alpha2.ClusterGroup) (*crdv1alpha2.ClusterGroup, error) { log.Infof("Creating/updating ClusterGroup %s", cg.Name) - cgReturned, err := k.crdClient.CrdV1alpha2().ClusterGroups().Get(context.TODO(), cg.Name, metav1.GetOptions{}) + cgReturned, err := data.crdClient.CrdV1alpha2().ClusterGroups().Get(context.TODO(), cg.Name, metav1.GetOptions{}) if err != nil { - cgr, err := k.crdClient.CrdV1alpha2().ClusterGroups().Create(context.TODO(), cg, metav1.CreateOptions{}) + cgr, err := data.crdClient.CrdV1alpha2().ClusterGroups().Create(context.TODO(), cg, metav1.CreateOptions{}) if err != nil { log.Infof("Unable to create cluster group %s: %v", cg.Name, err) return nil, err @@ -550,18 +537,18 @@ func (k *KubernetesUtils) CreateOrUpdateV1Alpha2CG(cg *crdv1alpha2.ClusterGroup) } else if cgReturned.Name != "" { log.Debugf("ClusterGroup with name %s already exists, updating", cg.Name) cgReturned.Spec = cg.Spec - cgr, err := k.crdClient.CrdV1alpha2().ClusterGroups().Update(context.TODO(), cgReturned, metav1.UpdateOptions{}) + cgr, err := data.crdClient.CrdV1alpha2().ClusterGroups().Update(context.TODO(), cgReturned, metav1.UpdateOptions{}) return cgr, err } return nil, fmt.Errorf("error occurred in creating/updating ClusterGroup %s", cg.Name) } // CreateOrUpdateV1Alpha3CG is a convenience function for idempotent setup of crd/v1alpha3 ClusterGroups -func (k *KubernetesUtils) CreateOrUpdateV1Alpha3CG(cg *crdv1alpha3.ClusterGroup) (*crdv1alpha3.ClusterGroup, error) { +func (data *TestData) CreateOrUpdateV1Alpha3CG(cg *crdv1alpha3.ClusterGroup) (*crdv1alpha3.ClusterGroup, error) { log.Infof("Creating/updating ClusterGroup %s", cg.Name) - cgReturned, err := k.crdClient.CrdV1alpha3().ClusterGroups().Get(context.TODO(), cg.Name, metav1.GetOptions{}) + cgReturned, err := data.crdClient.CrdV1alpha3().ClusterGroups().Get(context.TODO(), cg.Name, metav1.GetOptions{}) if err != nil { - cgr, err := k.crdClient.CrdV1alpha3().ClusterGroups().Create(context.TODO(), cg, metav1.CreateOptions{}) + cgr, err := data.crdClient.CrdV1alpha3().ClusterGroups().Create(context.TODO(), cg, metav1.CreateOptions{}) if err != nil { log.Infof("Unable to create cluster group %s: %v", cg.Name, err) return nil, err @@ -570,24 +557,24 @@ func (k *KubernetesUtils) CreateOrUpdateV1Alpha3CG(cg *crdv1alpha3.ClusterGroup) } else if cgReturned.Name != "" { log.Debugf("ClusterGroup with name %s already exists, updating", cg.Name) cgReturned.Spec = cg.Spec - cgr, err := k.crdClient.CrdV1alpha3().ClusterGroups().Update(context.TODO(), cgReturned, metav1.UpdateOptions{}) + cgr, err := data.crdClient.CrdV1alpha3().ClusterGroups().Update(context.TODO(), cgReturned, metav1.UpdateOptions{}) return cgr, err } return nil, fmt.Errorf("error occurred in creating/updating ClusterGroup %s", cg.Name) } -func (k *KubernetesUtils) GetV1Alpha2CG(cgName string) (*crdv1alpha2.ClusterGroup, error) { - return k.crdClient.CrdV1alpha2().ClusterGroups().Get(context.TODO(), cgName, metav1.GetOptions{}) +func (data *TestData) GetV1Alpha2CG(cgName string) (*crdv1alpha2.ClusterGroup, error) { + return data.crdClient.CrdV1alpha2().ClusterGroups().Get(context.TODO(), cgName, metav1.GetOptions{}) } -func (k *KubernetesUtils) GetV1Alpha3CG(cgName string) (*crdv1alpha3.ClusterGroup, error) { - return k.crdClient.CrdV1alpha3().ClusterGroups().Get(context.TODO(), cgName, metav1.GetOptions{}) +func (data *TestData) GetV1Alpha3CG(cgName string) (*crdv1alpha3.ClusterGroup, error) { + return data.crdClient.CrdV1alpha3().ClusterGroups().Get(context.TODO(), cgName, metav1.GetOptions{}) } // CreateCG is a convenience function for creating an Antrea ClusterGroup by name and selector. -func (k *KubernetesUtils) CreateCG(name string, pSelector, nSelector *metav1.LabelSelector, ipBlocks []crdv1alpha1.IPBlock) (*crdv1alpha3.ClusterGroup, error) { +func (data *TestData) CreateCG(name string, pSelector, nSelector *metav1.LabelSelector, ipBlocks []crdv1alpha1.IPBlock) (*crdv1alpha3.ClusterGroup, error) { log.Infof("Creating clustergroup %s", name) - _, err := k.crdClient.CrdV1alpha3().ClusterGroups().Get(context.TODO(), name, metav1.GetOptions{}) + _, err := data.crdClient.CrdV1alpha3().ClusterGroups().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { cg := &crdv1alpha3.ClusterGroup{ ObjectMeta: metav1.ObjectMeta{ @@ -603,7 +590,7 @@ func (k *KubernetesUtils) CreateCG(name string, pSelector, nSelector *metav1.Lab if len(ipBlocks) > 0 { cg.Spec.IPBlocks = ipBlocks } - cg, err = k.crdClient.CrdV1alpha3().ClusterGroups().Create(context.TODO(), cg, metav1.CreateOptions{}) + cg, err = data.crdClient.CrdV1alpha3().ClusterGroups().Create(context.TODO(), cg, metav1.CreateOptions{}) if err != nil { log.Debugf("Unable to create clustergroup %s: %s", name, err) } @@ -612,19 +599,10 @@ func (k *KubernetesUtils) CreateCG(name string, pSelector, nSelector *metav1.Lab return nil, fmt.Errorf("clustergroup with name %s already exists", name) } -// GetCG is a convenience function for getting ClusterGroups -func (k *KubernetesUtils) GetCG(name string) (*crdv1alpha2.ClusterGroup, error) { - res, err := k.crdClient.CrdV1alpha2().ClusterGroups().Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return res, nil -} - // DeleteV1Alpha2CG is a convenience function for deleting crd/v1alpha2 ClusterGroup by name. -func (k *KubernetesUtils) DeleteV1Alpha2CG(name string) error { +func (data *TestData) DeleteV1Alpha2CG(name string) error { log.Infof("Deleting ClusterGroup %s", name) - err := k.crdClient.CrdV1alpha2().ClusterGroups().Delete(context.TODO(), name, metav1.DeleteOptions{}) + err := data.crdClient.CrdV1alpha2().ClusterGroups().Delete(context.TODO(), name, metav1.DeleteOptions{}) if err != nil { return errors.Wrapf(err, "unable to delete ClusterGroup %s", name) } @@ -632,9 +610,9 @@ func (k *KubernetesUtils) DeleteV1Alpha2CG(name string) error { } // DeleteV1Alpha3CG is a convenience function for deleting core/v1alpha3 ClusterGroup by name. -func (k *KubernetesUtils) DeleteV1Alpha3CG(name string) error { +func (data *TestData) DeleteV1Alpha3CG(name string) error { log.Infof("deleting ClusterGroup %s", name) - err := k.crdClient.CrdV1alpha3().ClusterGroups().Delete(context.TODO(), name, metav1.DeleteOptions{}) + err := data.crdClient.CrdV1alpha3().ClusterGroups().Delete(context.TODO(), name, metav1.DeleteOptions{}) if err != nil { return errors.Wrapf(err, "unable to delete ClusterGroup %s", name) } @@ -642,22 +620,22 @@ func (k *KubernetesUtils) DeleteV1Alpha3CG(name string) error { } // CleanCGs is a convenience function for deleting all ClusterGroups in the cluster. -func (k *KubernetesUtils) CleanCGs() error { - l, err := k.crdClient.CrdV1alpha2().ClusterGroups().List(context.TODO(), metav1.ListOptions{}) +func (data *TestData) CleanCGs() error { + l, err := data.crdClient.CrdV1alpha2().ClusterGroups().List(context.TODO(), metav1.ListOptions{}) if err != nil { return errors.Wrapf(err, "unable to list ClusterGroups in v1alpha2") } for _, cg := range l.Items { - if err := k.DeleteV1Alpha2CG(cg.Name); err != nil { + if err := data.DeleteV1Alpha2CG(cg.Name); err != nil { return err } } - l2, err := k.crdClient.CrdV1alpha3().ClusterGroups().List(context.TODO(), metav1.ListOptions{}) + l2, err := data.crdClient.CrdV1alpha3().ClusterGroups().List(context.TODO(), metav1.ListOptions{}) if err != nil { return errors.Wrapf(err, "unable to list ClusterGroups in v1alpha3") } for _, cg := range l2.Items { - if err := k.DeleteV1Alpha3CG(cg.Name); err != nil { + if err := data.DeleteV1Alpha3CG(cg.Name); err != nil { return err } } @@ -665,12 +643,12 @@ func (k *KubernetesUtils) CleanCGs() error { } // CreateOrUpdateACNP is a convenience function for updating/creating AntreaClusterNetworkPolicies. -func (k *KubernetesUtils) CreateOrUpdateACNP(cnp *crdv1alpha1.ClusterNetworkPolicy) (*crdv1alpha1.ClusterNetworkPolicy, error) { +func (data *TestData) CreateOrUpdateACNP(cnp *crdv1alpha1.ClusterNetworkPolicy) (*crdv1alpha1.ClusterNetworkPolicy, error) { log.Infof("Creating/updating ClusterNetworkPolicy %s", cnp.Name) - cnpReturned, err := k.crdClient.CrdV1alpha1().ClusterNetworkPolicies().Get(context.TODO(), cnp.Name, metav1.GetOptions{}) + cnpReturned, err := data.crdClient.CrdV1alpha1().ClusterNetworkPolicies().Get(context.TODO(), cnp.Name, metav1.GetOptions{}) if err != nil { log.Debugf("Creating ClusterNetworkPolicy %s", cnp.Name) - cnp, err = k.crdClient.CrdV1alpha1().ClusterNetworkPolicies().Create(context.TODO(), cnp, metav1.CreateOptions{}) + cnp, err = data.crdClient.CrdV1alpha1().ClusterNetworkPolicies().Create(context.TODO(), cnp, metav1.CreateOptions{}) if err != nil { log.Debugf("Unable to create ClusterNetworkPolicy: %s", err) } @@ -678,15 +656,15 @@ func (k *KubernetesUtils) CreateOrUpdateACNP(cnp *crdv1alpha1.ClusterNetworkPoli } else if cnpReturned.Name != "" { log.Debugf("ClusterNetworkPolicy with name %s already exists, updating", cnp.Name) cnpReturned.Spec = cnp.Spec - cnp, err = k.crdClient.CrdV1alpha1().ClusterNetworkPolicies().Update(context.TODO(), cnpReturned, metav1.UpdateOptions{}) + cnp, err = data.crdClient.CrdV1alpha1().ClusterNetworkPolicies().Update(context.TODO(), cnpReturned, metav1.UpdateOptions{}) return cnp, err } return nil, fmt.Errorf("error occurred in creating/updating ClusterNetworkPolicy %s", cnp.Name) } // GetACNP is a convenience function for getting AntreaClusterNetworkPolicies. -func (k *KubernetesUtils) GetACNP(name string) (*crdv1alpha1.ClusterNetworkPolicy, error) { - res, err := k.crdClient.CrdV1alpha1().ClusterNetworkPolicies().Get(context.TODO(), name, metav1.GetOptions{}) +func (data *TestData) GetACNP(name string) (*crdv1alpha1.ClusterNetworkPolicy, error) { + res, err := data.crdClient.CrdV1alpha1().ClusterNetworkPolicies().Get(context.TODO(), name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -694,9 +672,9 @@ func (k *KubernetesUtils) GetACNP(name string) (*crdv1alpha1.ClusterNetworkPolic } // DeleteACNP is a convenience function for deleting ACNP by name. -func (k *KubernetesUtils) DeleteACNP(name string) error { +func (data *TestData) DeleteACNP(name string) error { log.Infof("Deleting AntreaClusterNetworkPolicies %s", name) - err := k.crdClient.CrdV1alpha1().ClusterNetworkPolicies().Delete(context.TODO(), name, metav1.DeleteOptions{}) + err := data.crdClient.CrdV1alpha1().ClusterNetworkPolicies().Delete(context.TODO(), name, metav1.DeleteOptions{}) if err != nil { return errors.Wrapf(err, "unable to delete ClusterNetworkPolicy %s", name) } @@ -704,13 +682,13 @@ func (k *KubernetesUtils) DeleteACNP(name string) error { } // CleanACNPs is a convenience function for deleting all Antrea ClusterNetworkPolicies in the cluster. -func (k *KubernetesUtils) CleanACNPs() error { - l, err := k.crdClient.CrdV1alpha1().ClusterNetworkPolicies().List(context.TODO(), metav1.ListOptions{}) +func (data *TestData) CleanACNPs() error { + l, err := data.crdClient.CrdV1alpha1().ClusterNetworkPolicies().List(context.TODO(), metav1.ListOptions{}) if err != nil { return errors.Wrapf(err, "unable to list AntreaClusterNetworkPolicies") } for _, cnp := range l.Items { - if err = k.DeleteACNP(cnp.Name); err != nil { + if err = data.DeleteACNP(cnp.Name); err != nil { return err } } @@ -718,37 +696,33 @@ func (k *KubernetesUtils) CleanACNPs() error { } // CreateOrUpdateANP is a convenience function for updating/creating Antrea NetworkPolicies. -func (k *KubernetesUtils) CreateOrUpdateANP(anp *crdv1alpha1.NetworkPolicy) (*crdv1alpha1.NetworkPolicy, error) { +func (data *TestData) CreateOrUpdateANP(anp *crdv1alpha1.NetworkPolicy) (*crdv1alpha1.NetworkPolicy, error) { log.Infof("Creating/updating Antrea NetworkPolicy %s/%s", anp.Namespace, anp.Name) - cnpReturned, err := k.crdClient.CrdV1alpha1().NetworkPolicies(anp.Namespace).Get(context.TODO(), anp.Name, metav1.GetOptions{}) + cnpReturned, err := data.crdClient.CrdV1alpha1().NetworkPolicies(anp.Namespace).Get(context.TODO(), anp.Name, metav1.GetOptions{}) if err != nil { log.Debugf("Creating Antrea NetworkPolicy %s", anp.Name) - anp, err = k.crdClient.CrdV1alpha1().NetworkPolicies(anp.Namespace).Create(context.TODO(), anp, metav1.CreateOptions{}) + anp, err = data.crdClient.CrdV1alpha1().NetworkPolicies(anp.Namespace).Create(context.TODO(), anp, metav1.CreateOptions{}) if err != nil { log.Debugf("Unable to create Antrea NetworkPolicy: %s", err) } return anp, err } else if cnpReturned.Name != "" { log.Debugf("Antrea NetworkPolicy with name %s already exists, updating", anp.Name) - anp, err = k.crdClient.CrdV1alpha1().NetworkPolicies(anp.Namespace).Update(context.TODO(), anp, metav1.UpdateOptions{}) + anp, err = data.crdClient.CrdV1alpha1().NetworkPolicies(anp.Namespace).Update(context.TODO(), anp, metav1.UpdateOptions{}) return anp, err } return nil, fmt.Errorf("error occurred in creating/updating Antrea NetworkPolicy %s", anp.Name) } // GetANP is a convenience function for getting AntreaNetworkPolicies. -func (k *KubernetesUtils) GetANP(namespace, name string) (*crdv1alpha1.NetworkPolicy, error) { - res, err := k.crdClient.CrdV1alpha1().NetworkPolicies(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - return res, nil +func (data *TestData) GetANP(namespace, name string) (*crdv1alpha1.NetworkPolicy, error) { + return data.crdClient.CrdV1alpha1().NetworkPolicies(namespace).Get(context.TODO(), name, metav1.GetOptions{}) } // DeleteANP is a convenience function for deleting ANP by name and Namespace. -func (k *KubernetesUtils) DeleteANP(ns, name string) error { +func (data *TestData) DeleteANP(ns, name string) error { log.Infof("Deleting Antrea NetworkPolicy '%s/%s'", ns, name) - err := k.crdClient.CrdV1alpha1().NetworkPolicies(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) + err := data.crdClient.CrdV1alpha1().NetworkPolicies(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) if err != nil { return errors.Wrapf(err, "unable to delete Antrea NetworkPolicy %s", name) } @@ -756,14 +730,14 @@ func (k *KubernetesUtils) DeleteANP(ns, name string) error { } // CleanANPs is a convenience function for deleting all Antrea NetworkPolicies in provided namespaces. -func (k *KubernetesUtils) CleanANPs(namespaces []string) error { +func (data *TestData) CleanANPs(namespaces []string) error { for _, ns := range namespaces { - l, err := k.crdClient.CrdV1alpha1().NetworkPolicies(ns).List(context.TODO(), metav1.ListOptions{}) + l, err := data.crdClient.CrdV1alpha1().NetworkPolicies(ns).List(context.TODO(), metav1.ListOptions{}) if err != nil { return errors.Wrapf(err, "unable to list Antrea NetworkPolicies in ns %s", ns) } for _, anp := range l.Items { - if err = k.DeleteANP(anp.Namespace, anp.Name); err != nil { + if err = data.DeleteANP(anp.Namespace, anp.Name); err != nil { return err } } diff --git a/test/e2e/main_test.go b/test/e2e/main_test.go index e9024bc6fc8..15b1bc37e44 100644 --- a/test/e2e/main_test.go +++ b/test/e2e/main_test.go @@ -85,21 +85,24 @@ func testMain(m *testing.M) int { flag.StringVar(&testOptions.skipCases, "skip", "", "Key words to skip cases") flag.Parse() - if err := initProvider(); err != nil { - log.Fatalf("Error when initializing provider: %v", err) - } - cleanupLogging := testOptions.setupLogging() defer cleanupLogging() testData = &TestData{} + if err := testData.InitProvider(testOptions.providerName, testOptions.providerConfigPath); err != nil { + log.Fatalf("Error when initializing provider: %v", err) + } log.Println("Creating K8s clientset") - if err := testData.createClient(); err != nil { + kubeconfigPath, err := testData.provider.GetKubeconfigPath() + if err != nil { + log.Fatalf("error when getting Kubeconfig path: %v", err) + } + if err := testData.CreateClient(kubeconfigPath); err != nil { log.Fatalf("Error when creating K8s clientset: %v", err) return 1 } log.Println("Collecting information about K8s cluster") - if err := collectClusterInfo(); err != nil { + if err := testData.collectClusterInfo(); err != nil { log.Fatalf("Error when collecting information about K8s cluster: %v", err) } if clusterInfo.podV4NetworkCIDR != "" { @@ -115,7 +118,7 @@ func testMain(m *testing.M) int { log.Printf("Service IPv6 network: '%s'", clusterInfo.svcV6NetworkCIDR) } log.Printf("Num nodes: %d", clusterInfo.numNodes) - err := ensureAntreaRunning(testData) + err = ensureAntreaRunning(testData) if err != nil { log.Fatalf("Error when deploying Antrea: %v", err) } diff --git a/test/e2e/multicast_test.go b/test/e2e/multicast_test.go index 90c875d1d0a..31a49751bbe 100644 --- a/test/e2e/multicast_test.go +++ b/test/e2e/multicast_test.go @@ -166,7 +166,7 @@ func runTestMulticastForwardToMultipleInterfaces(t *testing.T, data *TestData, s // It sends two multicast packets for every second(-f 500 means it takes 500 milliseconds for sending one packet). sendMulticastCommand := []string{"/bin/sh", "-c", fmt.Sprintf("timeout 90s mcjoin -f 500 -o -p %d -s -t 3 -w 2 -W %d %s", senderPort, mcjoinWaitTimeout, senderGroup)} go func() { - data.runCommandFromPod(testNamespace, senderName, mcjoinContainerName, sendMulticastCommand) + data.RunCommandFromPod(testNamespace, senderName, mcjoinContainerName, sendMulticastCommand) }() if err := wait.Poll(5*time.Second, defaultTimeout, func() (bool, error) { @@ -175,7 +175,7 @@ func runTestMulticastForwardToMultipleInterfaces(t *testing.T, data *TestData, s // If multicast traffic is sent from non-HostNetwork pods, all multicast interfaces in senders should receive multicast traffic. for _, multicastInterface := range senderMulticastInterfaces { tcpdumpReceiveMulticastCommand := []string{"/bin/sh", "-c", fmt.Sprintf("timeout 5s tcpdump -q -i %s -c 1 -W 90 host %s", multicastInterface, senderGroup)} - _, stderr, err := data.runCommandFromPod(testNamespace, tcpdumpName, tcpdumpContainerName, tcpdumpReceiveMulticastCommand) + _, stderr, err := data.RunCommandFromPod(testNamespace, tcpdumpName, tcpdumpContainerName, tcpdumpReceiveMulticastCommand) if err != nil { return false, err } @@ -210,7 +210,7 @@ func runTestMulticastBetweenPods(t *testing.T, data *TestData, mc multicastTestc // The following command joins a multicast group and sets the timeout to 100 seconds(-W 100) before exit. // The command will return after receiving 1 packet(-c 1). receiveMulticastCommand := []string{"/bin/sh", "-c", fmt.Sprintf("mcjoin -c 1 -o -p %d -W %d %s", mc.port, mcjoinWaitTimeout, mc.group.String())} - res, _, err := data.runCommandFromPod(testNamespace, r, mcjoinContainerName, receiveMulticastCommand) + res, _, err := data.RunCommandFromPod(testNamespace, r, mcjoinContainerName, receiveMulticastCommand) failOnError(err, t) assert.Contains(t, res, "Total: 1 packets") }() @@ -219,12 +219,12 @@ func runTestMulticastBetweenPods(t *testing.T, data *TestData, mc multicastTestc // It sends two multicast packets for every second(-f 500 means it takes 500 milliseconds for sending one packet). sendMulticastCommand := []string{"/bin/sh", "-c", fmt.Sprintf("mcjoin -f 500 -o -p %d -s -t 3 -w 2 -W %d %s", mc.port, mcjoinWaitTimeout, mc.group.String())} go func() { - data.runCommandFromPod(testNamespace, senderName, mcjoinContainerName, sendMulticastCommand) + data.RunCommandFromPod(testNamespace, senderName, mcjoinContainerName, sendMulticastCommand) }() if err := wait.Poll(5*time.Second, defaultTimeout, func() (bool, error) { // Sender pods should add an outbound multicast route except running as HostNetwork. - _, mrouteResult, _, err := RunCommandOnNode(nodeName(mc.senderConfig.nodeIdx), fmt.Sprintf("ip mroute show to %s iif %s | grep '%s'", mc.group.String(), gatewayInterface, strings.Join(nodeMulticastInterfaces[mc.senderConfig.nodeIdx], " "))) + _, mrouteResult, _, err := data.RunCommandOnNode(nodeName(mc.senderConfig.nodeIdx), fmt.Sprintf("ip mroute show to %s iif %s | grep '%s'", mc.group.String(), gatewayInterface, strings.Join(nodeMulticastInterfaces[mc.senderConfig.nodeIdx], " "))) if err != nil { return false, err } @@ -240,7 +240,7 @@ func runTestMulticastBetweenPods(t *testing.T, data *TestData, mc multicastTestc // Check inbound multicast route and whether multicast interfaces has joined the multicast group. for _, receiver := range mc.receiverConfigs { for _, receiverMulticastInterface := range nodeMulticastInterfaces[receiver.nodeIdx] { - _, mRouteResult, _, err := RunCommandOnNode(nodeName(receiver.nodeIdx), fmt.Sprintf("ip mroute show to %s iif %s ", mc.group.String(), receiverMulticastInterface)) + _, mRouteResult, _, err := data.RunCommandOnNode(nodeName(receiver.nodeIdx), fmt.Sprintf("ip mroute show to %s iif %s ", mc.group.String(), receiverMulticastInterface)) if err != nil { return false, err } @@ -255,7 +255,7 @@ func runTestMulticastBetweenPods(t *testing.T, data *TestData, mc multicastTestc return false, nil } } - _, mAddrResult, _, err := RunCommandOnNode(nodeName(receiver.nodeIdx), fmt.Sprintf("ip maddr show %s | grep %s", receiverMulticastInterface, mc.group.String())) + _, mAddrResult, _, err := data.RunCommandOnNode(nodeName(receiver.nodeIdx), fmt.Sprintf("ip maddr show %s | grep %s", receiverMulticastInterface, mc.group.String())) if err != nil { return false, err } @@ -287,13 +287,13 @@ func computeMulticastInterfaces(t *testing.T, data *TestData) ([][]string, error if err != nil { return nil, err } - transportInterface, err := GetTransportInterface() + transportInterface, err := GetTransportInterface(data) if err != nil { t.Fatalf("Error getting transport interfaces: %v", err) } nodeMulticastInterfaces := make([][]string, 0, len(clusterInfo.nodes)) for nodeIdx := range clusterInfo.nodes { - _, localInterfacesStr, _, err := RunCommandOnNode(nodeName(nodeIdx), "ls /sys/class/net") + _, localInterfacesStr, _, err := data.RunCommandOnNode(nodeName(nodeIdx), "ls /sys/class/net") if err != nil { return nil, err } diff --git a/test/e2e/networkpolicy_test.go b/test/e2e/networkpolicy_test.go index c25a19103de..e3e42ad4cde 100644 --- a/test/e2e/networkpolicy_test.go +++ b/test/e2e/networkpolicy_test.go @@ -95,11 +95,11 @@ func testNetworkPolicyStats(t *testing.T, data *TestData) { // So we need to "warm-up" the tunnel. if clusterInfo.podV4NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv4.String())} - data.runCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) } if clusterInfo.podV6NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv6.String())} - data.runCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) } np1, err := data.createNetworkPolicy("test-networkpolicy-ingress", &networkingv1.NetworkPolicySpec{ @@ -155,11 +155,11 @@ func testNetworkPolicyStats(t *testing.T, data *TestData) { go func() { if clusterInfo.podV4NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv4.String())} - data.runCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) } if clusterInfo.podV6NetworkCIDR != "" { cmd := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vz -w 4 %s 80", serverIPs.ipv6.String())} - data.runCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) + data.RunCommandFromPod(testNamespace, clientName, busyboxContainerName, cmd) } wg.Done() }() @@ -349,11 +349,11 @@ func testDefaultDenyIngressPolicy(t *testing.T, data *TestData) { _, serverIPs, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "test-server-", serverNode, testNamespace, false) defer cleanupFunc() - service, err := data.createService("nginx", testNamespace, serverPort, serverPort, map[string]string{"app": "nginx"}, false, false, corev1.ServiceTypeNodePort, nil) + service, err := data.CreateService("nginx", testNamespace, serverPort, serverPort, map[string]string{"app": "nginx"}, false, false, corev1.ServiceTypeNodePort, nil) if err != nil { t.Fatalf("Error when creating nginx NodePort service: %v", err) } - defer data.deleteService(service.Name) + defer data.DeleteService(testNamespace, service.Name) // client1 is a host network Pod and is on the same node as the server Pod, simulating kubelet probe traffic. client1Name, _, cleanupFunc := createAndWaitForPod(t, data, data.createBusyboxPodOnNode, "test-hostnetwork-client-can-connect-", serverNode, testNamespace, true) @@ -969,7 +969,7 @@ func createAndWaitForPodWithLabels(t *testing.T, data *TestData, createFunc func t.Fatalf("Error when creating busybox test Pod: %v", err) } cleanupFunc := func() error { - if err := data.deletePod(ns, name); err != nil { + if err := data.DeletePod(ns, name); err != nil { return fmt.Errorf("error when deleting Pod: %v", err) } return nil diff --git a/test/e2e/nodeportlocal_test.go b/test/e2e/nodeportlocal_test.go index 2f9dfbb6218..fede7abbf32 100644 --- a/test/e2e/nodeportlocal_test.go +++ b/test/e2e/nodeportlocal_test.go @@ -103,7 +103,7 @@ func getNPLAnnotation(t *testing.T, data *TestData, r *require.Assertions, testP podTimeout = 18 * time.Second } for i := 0; i <= maxRetries; i++ { - _, err = data.podWaitFor(podTimeout, testPodName, testNamespace, func(pod *corev1.Pod) (bool, error) { + _, err = data.PodWaitFor(podTimeout, testPodName, testNamespace, func(pod *corev1.Pod) (bool, error) { var err error if pod.Status.Phase != corev1.PodRunning { return false, nil @@ -177,7 +177,7 @@ func checkForNPLRuleInIPTables(t *testing.T, data *TestData, r *require.Assertio t.Logf("Verifying iptables rules %v, present: %v", rules, present) const timeout = 30 * time.Second err := wait.Poll(time.Second, timeout, func() (bool, error) { - stdout, _, err := data.runCommandFromPod(antreaNamespace, antreaPod, agentContainerName, cmd) + stdout, _, err := data.RunCommandFromPod(antreaNamespace, antreaPod, agentContainerName, cmd) if err != nil { t.Logf("Error while checking rules in iptables: %v", err) // Retry, as sometimes error can occur due to concurrent operations on iptables. @@ -209,7 +209,7 @@ func checkForNPLListeningSockets(t *testing.T, data *TestData, r *require.Assert for _, rule := range rules { protocolOption := "--" + rule.protocol cmd := []string{"ss", "--listening", protocolOption, "-H", "-n"} - stdout, _, err := data.runCommandFromPod(antreaNamespace, antreaPod, agentContainerName, cmd) + stdout, _, err := data.RunCommandFromPod(antreaNamespace, antreaPod, agentContainerName, cmd) if err != nil { return false, fmt.Errorf("error when running 'ss': %v", err) } @@ -241,7 +241,7 @@ func deleteNPLRuleFromIPTables(t *testing.T, data *TestData, r *require.Assertio cmd := append([]string{"iptables", "-w", "10", "-t", "nat", "-D", "ANTREA-NODE-PORT-LOCAL"}, buildRuleForPod(rule)...) t.Logf("Deleting iptables rule for %v", rule) const timeout = 30 * time.Second - _, _, err := data.runCommandFromPod(antreaNamespace, antreaPod, agentContainerName, cmd) + _, _, err := data.RunCommandFromPod(antreaNamespace, antreaPod, agentContainerName, cmd) r.NoError(err, "Error when deleting iptables rule") } @@ -302,7 +302,7 @@ func NPLTestMultiplePods(t *testing.T) { expectedAnnotations.Check(t, nplAnnotations) checkTrafficForNPL(testData, r, nplAnnotations, clientName) - testData.deletePod(testNamespace, testPodName) + testData.DeletePod(testNamespace, testPodName) checkNPLRulesForPod(t, testData, r, nplAnnotations, antreaPod, testPodIP, false) } } @@ -319,8 +319,8 @@ func NPLTestPodAddMultiPort(t *testing.T) { selector := make(map[string]string) selector["app"] = "agnhost" ipFamily := corev1.IPv4Protocol - testData.createServiceWithAnnotations("agnhost1", testNamespace, 80, 80, corev1.ProtocolTCP, selector, false, false, corev1.ServiceTypeClusterIP, &ipFamily, annotation) - testData.createServiceWithAnnotations("agnhost2", testNamespace, 80, 8080, corev1.ProtocolTCP, selector, false, false, corev1.ServiceTypeClusterIP, &ipFamily, annotation) + testData.CreateServiceWithAnnotations("agnhost1", testNamespace, 80, 80, corev1.ProtocolTCP, selector, false, false, corev1.ServiceTypeClusterIP, &ipFamily, annotation) + testData.CreateServiceWithAnnotations("agnhost2", testNamespace, 80, 8080, corev1.ProtocolTCP, selector, false, false, corev1.ServiceTypeClusterIP, &ipFamily, annotation) expectedAnnotations := newExpectedNPLAnnotations(defaultStartPort, defaultEndPort). Add(nil, 80, "tcp").Add(nil, 8080, "tcp") @@ -365,9 +365,9 @@ func NPLTestPodAddMultiPort(t *testing.T) { expectedAnnotations.Check(t, nplAnnotations) checkTrafficForNPL(testData, r, nplAnnotations, clientName) - testData.deletePod(testNamespace, testPodName) - testData.deleteService("agnhost1") - testData.deleteService("agnhost2") + testData.DeletePod(testNamespace, testPodName) + testData.DeleteService(testNamespace, "agnhost1") + testData.DeleteService(testNamespace, "agnhost2") checkNPLRulesForPod(t, testData, r, nplAnnotations, antreaPod, testPodIP, false) } @@ -383,8 +383,8 @@ func NPLTestPodAddMultiProtocol(t *testing.T) { selector := make(map[string]string) selector["app"] = "agnhost" ipFamily := corev1.IPv4Protocol - testData.createServiceWithAnnotations("agnhost1", testNamespace, 80, 8080, corev1.ProtocolTCP, selector, false, false, corev1.ServiceTypeClusterIP, &ipFamily, annotation) - testData.createServiceWithAnnotations("agnhost2", testNamespace, 80, 8080, corev1.ProtocolUDP, selector, false, false, corev1.ServiceTypeClusterIP, &ipFamily, annotation) + testData.CreateServiceWithAnnotations("agnhost1", testNamespace, 80, 8080, corev1.ProtocolTCP, selector, false, false, corev1.ServiceTypeClusterIP, &ipFamily, annotation) + testData.CreateServiceWithAnnotations("agnhost2", testNamespace, 80, 8080, corev1.ProtocolUDP, selector, false, false, corev1.ServiceTypeClusterIP, &ipFamily, annotation) expectedAnnotations := newExpectedNPLAnnotations(defaultStartPort, defaultEndPort). Add(nil, 8080, "tcp").Add(nil, 8080, "udp") @@ -400,7 +400,7 @@ func NPLTestPodAddMultiProtocol(t *testing.T) { pod.Labels[k] = v } } - err := testData.createPodOnNodeInNamespace(testPodName, testNamespace, node, containerName, agnhostImage, cmd, args, []corev1.EnvVar{}, []corev1.ContainerPort{port}, false, mutateLabels) + err := testData.CreatePodOnNodeInNamespace(testPodName, testNamespace, node, containerName, agnhostImage, cmd, args, []corev1.EnvVar{}, []corev1.ContainerPort{port}, false, mutateLabels) r.NoError(err, "Error creating test Pod: %v", err) @@ -421,9 +421,9 @@ func NPLTestPodAddMultiProtocol(t *testing.T) { expectedAnnotations.Check(t, nplAnnotations) checkTrafficForNPL(testData, r, nplAnnotations, clientName) - testData.deletePod(testNamespace, testPodName) - testData.deleteService("agnhost1") - testData.deleteService("agnhost2") + testData.DeletePod(testNamespace, testPodName) + testData.DeleteService(testNamespace, "agnhost1") + testData.DeleteService(testNamespace, "agnhost2") checkNPLRulesForPod(t, testData, r, nplAnnotations, antreaPod, testPodIP, false) } @@ -460,7 +460,7 @@ func NPLTestLocalAccess(t *testing.T) { expectedAnnotations.Check(t, nplAnnotations) checkTrafficForNPL(testData, r, nplAnnotations, clientName) - testData.deletePod(testNamespace, testPodName) + testData.DeletePod(testNamespace, testPodName) checkNPLRulesForPod(t, testData, r, nplAnnotations, antreaPod, testPodIP, false) } diff --git a/test/e2e/performance_test.go b/test/e2e/performance_test.go index f2f1033cc0c..ad2314a6731 100644 --- a/test/e2e/performance_test.go +++ b/test/e2e/performance_test.go @@ -235,7 +235,7 @@ func httpRequest(requests, policyRules int, data *TestData, b *testing.B) { for i := 0; i < b.N; i++ { b.Logf("Running http request bench %d/%d", i+1, b.N) cmd := []string{"ab", "-n", fmt.Sprint(requests), "-c", fmt.Sprint(*httpConcurrency), serverURL.String()} - stdout, stderr, err := data.runCommandFromPod(testNamespace, perftoolPodName, perftoolContainerName, cmd) + stdout, stderr, err := data.RunCommandFromPod(testNamespace, perftoolPodName, perftoolContainerName, cmd) if err != nil { b.Errorf("Error when running http request %dx: %v, stdout: %s, stderr: %s\n", requests, err, stdout, stderr) } @@ -294,7 +294,7 @@ func checkRealize(policyRules int, data *TestData) (bool, error) { } // table 90 is the ingressRuleTable where the rules in workload network policy is being applied to. cmd := []string{"ovs-ofctl", "dump-flows", defaultBridgeName, "table=90"} - stdout, _, err := data.runCommandFromPod(antreaNamespace, antreaPodName, "antrea-agent", cmd) + stdout, _, err := data.RunCommandFromPod(antreaNamespace, antreaPodName, "antrea-agent", cmd) if err != nil { return false, err } diff --git a/test/e2e/proxy_test.go b/test/e2e/proxy_test.go index 4aab5b61368..d073109ee40 100644 --- a/test/e2e/proxy_test.go +++ b/test/e2e/proxy_test.go @@ -97,20 +97,20 @@ func skipIfKubeProxyEnabled(t *testing.T, data *TestData) { } } -func probeFromNode(node string, url string) error { - _, _, _, err := RunCommandOnNode(node, fmt.Sprintf("curl --connect-timeout 1 --retry 5 --retry-connrefused %s", url)) +func probeFromNode(node string, url string, data *TestData) error { + _, _, _, err := data.RunCommandOnNode(node, fmt.Sprintf("curl --connect-timeout 1 --retry 5 --retry-connrefused %s", url)) return err } -func probeHostnameFromNode(node string, baseUrl string) (string, error) { +func probeHostnameFromNode(node string, baseUrl string, data *TestData) (string, error) { url := fmt.Sprintf("%s/%s", baseUrl, "hostname") - _, hostname, _, err := RunCommandOnNode(node, fmt.Sprintf("curl --connect-timeout 1 --retry 5 --retry-connrefused %s", url)) + _, hostname, _, err := data.RunCommandOnNode(node, fmt.Sprintf("curl --connect-timeout 1 --retry 5 --retry-connrefused %s", url)) return hostname, err } -func probeClientIPFromNode(node string, baseUrl string) (string, error) { +func probeClientIPFromNode(node string, baseUrl string, data *TestData) (string, error) { url := fmt.Sprintf("%s/%s", baseUrl, "clientip") - _, hostPort, _, err := RunCommandOnNode(node, fmt.Sprintf("curl --connect-timeout 1 --retry 5 --retry-connrefused %s", url)) + _, hostPort, _, err := data.RunCommandOnNode(node, fmt.Sprintf("curl --connect-timeout 1 --retry 5 --retry-connrefused %s", url)) if err != nil { return "", err } @@ -119,19 +119,19 @@ func probeClientIPFromNode(node string, baseUrl string) (string, error) { } func probeFromPod(data *TestData, pod string, url string) error { - _, _, err := data.runCommandFromPod(testNamespace, pod, busyboxContainerName, []string{"wget", "-O", "-", url, "-T", "1", "-t", "5"}) + _, _, err := data.RunCommandFromPod(testNamespace, pod, busyboxContainerName, []string{"wget", "-O", "-", url, "-T", "1", "-t", "5"}) return err } func probeHostnameFromPod(data *TestData, pod string, baseUrl string) (string, error) { url := fmt.Sprintf("%s/%s", baseUrl, "hostname") - hostname, _, err := data.runCommandFromPod(testNamespace, pod, busyboxContainerName, []string{"wget", "-O", "-", url, "-T", "1", "-t", "5"}) + hostname, _, err := data.RunCommandFromPod(testNamespace, pod, busyboxContainerName, []string{"wget", "-O", "-", url, "-T", "1", "-t", "5"}) return hostname, err } func probeClientIPFromPod(data *TestData, pod string, baseUrl string) (string, error) { url := fmt.Sprintf("%s/%s", baseUrl, "clientip") - hostPort, _, err := data.runCommandFromPod(testNamespace, pod, busyboxContainerName, []string{"wget", "-O", "-", url, "-T", "1", "-t", "5"}) + hostPort, _, err := data.RunCommandFromPod(testNamespace, pod, busyboxContainerName, []string{"wget", "-O", "-", url, "-T", "1", "-t", "5"}) if err != nil { return "", err } @@ -213,7 +213,7 @@ func testProxyLoadBalancerService(t *testing.T, isIPv6 bool) { // Delete agnhost Pods which are not on host network and create new agnhost Pods which are on host network. hostAgnhosts := []string{"agnhost-host-0", "agnhost-host-1"} for idx, node := range nodes { - require.NoError(t, data.deletePod(testNamespace, agnhosts[idx])) + require.NoError(t, data.DeletePod(testNamespace, agnhosts[idx])) createAgnhostPod(t, data, hostAgnhosts[idx], node, true) } t.Run("HostNetwork Endpoints", func(t *testing.T) { @@ -239,7 +239,7 @@ func loadBalancerTestCases(t *testing.T, data *TestData, clusterUrl, localUrl st func testLoadBalancerClusterFromNode(t *testing.T, data *TestData, nodes []string, url string) { skipIfKubeProxyEnabled(t, data) for _, node := range nodes { - require.NoError(t, probeFromNode(node, url), "Service LoadBalancer whose externalTrafficPolicy is Cluster should be able to be connected from Node") + require.NoError(t, probeFromNode(node, url, data), "Service LoadBalancer whose externalTrafficPolicy is Cluster should be able to be connected from Node") } } @@ -252,7 +252,7 @@ func testLoadBalancerClusterFromPod(t *testing.T, data *TestData, pods []string, func testLoadBalancerLocalFromNode(t *testing.T, data *TestData, nodes []string, url string, expectedHostnames []string) { skipIfKubeProxyEnabled(t, data) for idx, node := range nodes { - hostname, err := probeHostnameFromNode(node, url) + hostname, err := probeHostnameFromNode(node, url, data) require.NoError(t, err, "Service LoadBalancer whose externalTrafficPolicy is Local should be able to be connected from Node") require.Equal(t, hostname, expectedHostnames[idx]) } @@ -346,7 +346,7 @@ func testProxyNodePortService(t *testing.T, isIPv6 bool) { // Delete agnhost Pods which are not on host network and create new agnhost Pods which are on host network. hostAgnhosts := []string{"agnhost-host-0", "agnhost-host-1"} for idx, node := range nodes { - require.NoError(t, data.deletePod(testNamespace, agnhosts[idx])) + require.NoError(t, data.DeletePod(testNamespace, agnhosts[idx])) createAgnhostPod(t, data, hostAgnhosts[idx], node, true) } t.Run("HostNetwork Endpoints", func(t *testing.T) { @@ -461,7 +461,7 @@ sleep 3600 } // Connect to NodePort on control plane Node in the fake external network. cmd = fmt.Sprintf("ip netns exec %s curl --connect-timeout 1 --retry 5 --retry-connrefused %s", testNetns, testNodePortURL) - _, _, err = data.runCommandFromPod(testNamespace, testPod, agnhostContainerName, []string{"sh", "-c", cmd}) + _, _, err = data.RunCommandFromPod(testNamespace, testPod, agnhostContainerName, []string{"sh", "-c", cmd}) require.NoError(t, err, "Service NodePort should be able to be connected from external network when Egress is enabled") } @@ -484,14 +484,14 @@ func createAgnhostPod(t *testing.T, data *TestData, podName string, node string, func testNodePortClusterFromRemote(t *testing.T, data *TestData, nodes, urls []string) { skipIfKubeProxyEnabled(t, data) for idx, node := range nodes { - require.NoError(t, probeFromNode(node, urls[idx]), "Service NodePort whose externalTrafficPolicy is Cluster should be able to be connected from remote Node") + require.NoError(t, probeFromNode(node, urls[idx], data), "Service NodePort whose externalTrafficPolicy is Cluster should be able to be connected from remote Node") } } func testNodePortClusterFromNode(t *testing.T, data *TestData, nodes, urls []string) { skipIfKubeProxyEnabled(t, data) for idx, node := range nodes { - require.NoError(t, probeFromNode(node, urls[idx]), "Service NodePort whose externalTrafficPolicy is Cluster should be able to be connected from Node") + require.NoError(t, probeFromNode(node, urls[idx], data), "Service NodePort whose externalTrafficPolicy is Cluster should be able to be connected from Node") } } @@ -507,11 +507,11 @@ func testNodePortLocalFromRemote(t *testing.T, data *TestData, nodes, urls, expe skipIfKubeProxyEnabled(t, data) errMsg := "Service NodePort whose externalTrafficPolicy is Local should be able to be connected from remote Node" for idx, node := range nodes { - hostname, err := probeHostnameFromNode(node, urls[idx]) + hostname, err := probeHostnameFromNode(node, urls[idx], data) require.NoError(t, err, errMsg) require.Equal(t, expectedHostnames[idx], hostname) - clientIP, err := probeClientIPFromNode(node, urls[idx]) + clientIP, err := probeClientIPFromNode(node, urls[idx], data) require.NoError(t, err, errMsg) require.Equal(t, expectedClientIPs[idx], clientIP) } @@ -520,7 +520,7 @@ func testNodePortLocalFromRemote(t *testing.T, data *TestData, nodes, urls, expe func testNodePortLocalFromNode(t *testing.T, data *TestData, nodes, urls, expectedHostnames []string) { skipIfKubeProxyEnabled(t, data) for idx, node := range nodes { - hostname, err := probeHostnameFromNode(node, urls[idx]) + hostname, err := probeHostnameFromNode(node, urls[idx], data) require.NoError(t, err, "Service NodePort whose externalTrafficPolicy is Local should be able to be connected rom Node") require.Equal(t, expectedHostnames[idx], hostname) } @@ -648,10 +648,10 @@ func testProxyServiceSessionAffinity(ipFamily *corev1.IPFamily, ingressIPs []str require.NoError(t, data.createBusyboxPodOnNode(busyboxPod, testNamespace, nodeName, false)) defer data.deletePodAndWait(defaultTimeout, busyboxPod, testNamespace) require.NoError(t, data.podWaitForRunning(defaultTimeout, busyboxPod, testNamespace)) - stdout, stderr, err := data.runCommandFromPod(testNamespace, busyboxPod, busyboxContainerName, []string{"wget", "-O", "-", svc.Spec.ClusterIP, "-T", "1", "-t", "5"}) + stdout, stderr, err := data.RunCommandFromPod(testNamespace, busyboxPod, busyboxContainerName, []string{"wget", "-O", "-", svc.Spec.ClusterIP, "-T", "1", "-t", "5"}) require.NoError(t, err, fmt.Sprintf("ipFamily: %v\nstdout: %s\nstderr: %s\n", *ipFamily, stdout, stderr)) for _, ingressIP := range ingressIPs { - stdout, stderr, err := data.runCommandFromPod(testNamespace, busyboxPod, busyboxContainerName, []string{"wget", "-O", "-", ingressIP, "-T", "1", "-t", "5"}) + stdout, stderr, err := data.RunCommandFromPod(testNamespace, busyboxPod, busyboxContainerName, []string{"wget", "-O", "-", ingressIP, "-T", "1", "-t", "5"}) require.NoError(t, err, fmt.Sprintf("ipFamily: %v\nstdout: %s\nstderr: %s\n", *ipFamily, stdout, stderr)) } @@ -660,7 +660,7 @@ func testProxyServiceSessionAffinity(ipFamily *corev1.IPFamily, ingressIPs []str agentName, err := data.getAntreaPodOnNode(nodeName) require.NoError(t, err) - table40Output, _, err := data.runCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-flows", defaultBridgeName, "table=40"}) + table40Output, _, err := data.RunCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-flows", defaultBridgeName, "table=40"}) require.NoError(t, err) if *ipFamily == corev1.IPv4Protocol { require.Contains(t, table40Output, fmt.Sprintf("nw_dst=%s,tp_dst=80", svc.Spec.ClusterIP)) @@ -715,14 +715,14 @@ func testProxyHairpin(ipFamily *corev1.IPFamily, data *TestData, t *testing.T) { defer data.deletePodAndWait(defaultTimeout, busybox, testNamespace) require.NoError(t, err) require.NoError(t, data.podWaitForRunning(defaultTimeout, busybox, testNamespace)) - svc, err := data.createService(busybox, testNamespace, 80, 80, map[string]string{"antrea-e2e": busybox}, false, false, corev1.ServiceTypeClusterIP, ipFamily) + svc, err := data.CreateService(busybox, testNamespace, 80, 80, map[string]string{"antrea-e2e": busybox}, false, false, corev1.ServiceTypeClusterIP, ipFamily) defer data.deleteServiceAndWait(defaultTimeout, busybox) require.NoError(t, err) // Hold on to make sure that the Service is realized. time.Sleep(3 * time.Second) - stdout, stderr, err := data.runCommandFromPod(testNamespace, busybox, busyboxContainerName, []string{"nc", svc.Spec.ClusterIP, "80", "-w", "1", "-e", "ls", "/"}) + stdout, stderr, err := data.RunCommandFromPod(testNamespace, busybox, busyboxContainerName, []string{"nc", svc.Spec.ClusterIP, "80", "-w", "1", "-e", "ls", "/"}) require.NoError(t, err, fmt.Sprintf("ipFamily: %v\nstdout: %s\nstderr: %s\n", *ipFamily, stdout, stderr)) } @@ -789,12 +789,12 @@ func testProxyEndpointLifeCycle(ipFamily *corev1.IPFamily, data *TestData, t *te } for tableID, keyword := range keywords { - tableOutput, _, err := data.runCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=%d", tableID)}) + tableOutput, _, err := data.RunCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=%d", tableID)}) require.NoError(t, err) require.Contains(t, tableOutput, keyword) } - groupOutput, _, err := data.runCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-groups", defaultBridgeName}) + groupOutput, _, err := data.RunCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-groups", defaultBridgeName}) require.NoError(t, err) for _, k := range groupKeywords { require.Contains(t, groupOutput, k) @@ -806,12 +806,12 @@ func testProxyEndpointLifeCycle(ipFamily *corev1.IPFamily, data *TestData, t *te time.Sleep(time.Second) for tableID, keyword := range keywords { - tableOutput, _, err := data.runCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=%d", tableID)}) + tableOutput, _, err := data.RunCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=%d", tableID)}) require.NoError(t, err) require.NotContains(t, tableOutput, keyword) } - groupOutput, _, err = data.runCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-groups", defaultBridgeName}) + groupOutput, _, err = data.RunCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-groups", defaultBridgeName}) require.NoError(t, err) for _, k := range groupKeywords { require.NotContains(t, groupOutput, k) @@ -908,28 +908,28 @@ func testProxyServiceLifeCycle(ipFamily *corev1.IPFamily, ingressIPs []string, d } else { groupKeyword = fmt.Sprintf("load:0x%s->NXM_NX_REG3[],load:0x%x->NXM_NX_REG4[0..15]", strings.TrimLeft(hex.EncodeToString(nginxIPs.ipv4.To4()), "0"), 80) } - groupOutput, _, err := data.runCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-groups", defaultBridgeName}) + groupOutput, _, err := data.RunCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-groups", defaultBridgeName}) require.NoError(t, err) require.Contains(t, groupOutput, groupKeyword) for _, expectedTable := range expectedFlows { - tableOutput, _, err := data.runCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=%d", expectedTable.tableID)}) + tableOutput, _, err := data.RunCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=%d", expectedTable.tableID)}) require.NoError(t, err) for _, expectedFlow := range expectedTable.flows { require.Contains(t, tableOutput, expectedFlow) } } - require.NoError(t, data.deleteService(nginx)) - require.NoError(t, data.deleteService(nginxLBService)) + require.NoError(t, data.DeleteService(testNamespace, nginx)) + require.NoError(t, data.DeleteService(testNamespace, nginxLBService)) // Hold on to make sure that the Service is realized. time.Sleep(3 * time.Second) - groupOutput, _, err = data.runCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-groups", defaultBridgeName}) + groupOutput, _, err = data.RunCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-groups", defaultBridgeName}) require.NoError(t, err) require.NotContains(t, groupOutput, groupKeyword) for _, expectedTable := range expectedFlows { - tableOutput, _, err := data.runCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=%d", expectedTable.tableID)}) + tableOutput, _, err := data.RunCommandFromPod(metav1.NamespaceSystem, agentName, "antrea-agent", []string{"ovs-ofctl", "dump-flows", defaultBridgeName, fmt.Sprintf("table=%d", expectedTable.tableID)}) require.NoError(t, err) for _, expectedFlow := range expectedTable.flows { require.NotContains(t, tableOutput, expectedFlow) diff --git a/test/e2e/service_externalip_test.go b/test/e2e/service_externalip_test.go index 7bebb3d9605..db374e95a29 100644 --- a/test/e2e/service_externalip_test.go +++ b/test/e2e/service_externalip_test.go @@ -223,7 +223,7 @@ func testServiceExternalTrafficPolicyLocal(t *testing.T, data *TestData) { annotation := map[string]string{ antreaagenttypes.ServiceExternalIPPoolAnnotationKey: ipPool.Name, } - service, err = data.createServiceWithAnnotations(fmt.Sprintf("test-svc-local-%d", idx), + service, err = data.CreateServiceWithAnnotations(fmt.Sprintf("test-svc-local-%d", idx), testNamespace, 80, 80, corev1.ProtocolTCP, nil, false, true, v1.ServiceTypeLoadBalancer, nil, annotation) require.NoError(t, err) defer data.clientset.CoreV1().Services(service.Namespace).Delete(context.TODO(), service.Name, metav1.DeleteOptions{}) @@ -342,7 +342,7 @@ func testServiceWithExternalIPCRUD(t *testing.T, data *TestData) { annotation := map[string]string{ antreaagenttypes.ServiceExternalIPPoolAnnotationKey: ipPool.Name, } - service, err = data.createServiceWithAnnotations(fmt.Sprintf("test-svc-eip-%d", idx), + service, err = data.CreateServiceWithAnnotations(fmt.Sprintf("test-svc-eip-%d", idx), testNamespace, 80, 80, corev1.ProtocolTCP, nil, false, false, v1.ServiceTypeLoadBalancer, nil, annotation) require.NoError(t, err) @@ -436,7 +436,7 @@ func testServiceUpdateExternalIP(t *testing.T, data *TestData) { annotation := map[string]string{ antreaagenttypes.ServiceExternalIPPoolAnnotationKey: originalPool.Name, } - service, err := data.createServiceWithAnnotations(fmt.Sprintf("test-update-eip-%d", idx), + service, err := data.CreateServiceWithAnnotations(fmt.Sprintf("test-update-eip-%d", idx), testNamespace, 80, 80, corev1.ProtocolTCP, nil, false, false, v1.ServiceTypeLoadBalancer, nil, annotation) require.NoError(t, err) defer data.clientset.CoreV1().Services(service.Namespace).Delete(context.TODO(), service.Name, metav1.DeleteOptions{}) @@ -490,7 +490,7 @@ func testServiceNodeFailure(t *testing.T, data *TestData) { } signalAgent := func(nodeName, signal string) { cmd := fmt.Sprintf("pkill -%s antrea-agent", signal) - rc, stdout, stderr, err := RunCommandOnNode(nodeName, cmd) + rc, stdout, stderr, err := data.RunCommandOnNode(nodeName, cmd) if rc != 0 || err != nil { t.Errorf("Error when running command '%s' on Node '%s', rc: %d, stdout: %s, stderr: %s, error: %v", cmd, nodeName, rc, stdout, stderr, err) @@ -518,7 +518,7 @@ func testServiceNodeFailure(t *testing.T, data *TestData) { annotation := map[string]string{ antreaagenttypes.ServiceExternalIPPoolAnnotationKey: externalIPPoolTwoNodes.Name, } - service, err := data.createServiceWithAnnotations("test-service-node-failure", testNamespace, 80, 80, + service, err := data.CreateServiceWithAnnotations("test-service-node-failure", testNamespace, 80, 80, corev1.ProtocolTCP, nil, false, false, v1.ServiceTypeLoadBalancer, nil, annotation) require.NoError(t, err) defer data.clientset.CoreV1().Services(service.Namespace).Delete(context.TODO(), service.Name, metav1.DeleteOptions{}) @@ -628,9 +628,9 @@ func testExternalIPAccess(t *testing.T, data *TestData) { annotations := map[string]string{ antreaagenttypes.ServiceExternalIPPoolAnnotationKey: ipPool.Name, } - service, err := data.createServiceWithAnnotations(et.serviceName, testNamespace, port, port, corev1.ProtocolTCP, map[string]string{"app": "agnhost"}, false, et.externalTrafficPolicyLocal, corev1.ServiceTypeLoadBalancer, &ipFamily, annotations) + service, err := data.CreateServiceWithAnnotations(et.serviceName, testNamespace, port, port, corev1.ProtocolTCP, map[string]string{"app": "agnhost"}, false, et.externalTrafficPolicyLocal, corev1.ServiceTypeLoadBalancer, &ipFamily, annotations) require.NoError(t, err) - defer data.deleteService(service.Name) + defer data.DeleteService(testNamespace, service.Name) externalIP, host, err := waitExternalIPConfigured(service) require.NoError(t, err) @@ -668,7 +668,7 @@ sleep 3600`, tt.clientName, tt.clientIP, tt.localIP, tt.clientIPMaskLen) } })) - _, err = data.podWaitFor(defaultTimeout, tt.clientName, testNamespace, func(p *v1.Pod) (bool, error) { + _, err = data.PodWaitFor(defaultTimeout, tt.clientName, testNamespace, func(p *v1.Pod) (bool, error) { for _, condition := range p.Status.Conditions { if condition.Type == corev1.PodReady { return condition.Status == corev1.ConditionTrue, nil @@ -681,7 +681,7 @@ sleep 3600`, tt.clientName, tt.clientIP, tt.localIP, tt.clientIPMaskLen) hostNameUrl := fmt.Sprintf("%s/%s", baseUrl, "hostname") probeCmd := fmt.Sprintf("ip netns exec %s curl --connect-timeout 1 --retry 5 --retry-connrefused %s", tt.clientName, hostNameUrl) - hostname, stderr, err := data.runCommandFromPod(testNamespace, tt.clientName, "", []string{"sh", "-c", probeCmd}) + hostname, stderr, err := data.RunCommandFromPod(testNamespace, tt.clientName, "", []string{"sh", "-c", probeCmd}) assert.NoError(t, err, "External IP should be able to be connected from remote: %s", stderr) if et.externalTrafficPolicyLocal { @@ -692,7 +692,7 @@ sleep 3600`, tt.clientName, tt.clientIP, tt.localIP, tt.clientIPMaskLen) } clientIPUrl := fmt.Sprintf("%s/clientip", baseUrl) probeClientIPCmd := fmt.Sprintf("ip netns exec %s curl --connect-timeout 1 --retry 5 --retry-connrefused %s", tt.clientName, clientIPUrl) - clientIPPort, stderr, err := data.runCommandFromPod(testNamespace, tt.clientName, "", []string{"sh", "-c", probeClientIPCmd}) + clientIPPort, stderr, err := data.RunCommandFromPod(testNamespace, tt.clientName, "", []string{"sh", "-c", probeClientIPCmd}) assert.NoError(t, err, "External IP should be able to be connected from remote: %s", stderr) clientIP, _, err := net.SplitHostPort(clientIPPort) assert.NoError(t, err) diff --git a/test/e2e/service_test.go b/test/e2e/service_test.go index 84fc35f0042..57b4493ff01 100644 --- a/test/e2e/service_test.go +++ b/test/e2e/service_test.go @@ -76,7 +76,7 @@ func (data *TestData) testClusterIP(t *testing.T, isIPv6 bool, namespace string) testClusterIPCases(t, data, url, clients, hostNetworkClients, namespace) }) - require.NoError(t, data.deletePod(namespace, nginx)) + require.NoError(t, data.DeletePod(namespace, nginx)) createAndWaitForPod(t, data, data.createNginxPodOnNode, hostNginx, nodeName(0), namespace, true) t.Run("HostNetwork Endpoints", func(t *testing.T) { testClusterIPCases(t, data, url, clients, hostNetworkClients, namespace) @@ -102,7 +102,7 @@ func testClusterIPFromPod(t *testing.T, data *TestData, url, nodeName, podName s cmd := []string{"/agnhost", "connect", url, "--timeout=1s", "--protocol=tcp"} err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) { t.Logf(strings.Join(cmd, " ")) - stdout, stderr, err := data.runCommandFromPod(namespace, podName, agnhostContainerName, cmd) + stdout, stderr, err := data.RunCommandFromPod(namespace, podName, agnhostContainerName, cmd) t.Logf("stdout: %s - stderr: %s - err: %v", stdout, stderr, err) if err == nil { return true, nil @@ -159,7 +159,7 @@ func (data *TestData) testNodePort(t *testing.T, isWindows bool, namespace strin url := fmt.Sprintf("http://%s:%d", nodeIP, nodePort) cmd := []string{"wget", "-O", "-", url, "-T", "1", "-t", "5"} - stdout, stderr, err := data.runCommandFromPod(namespace, clientName, busyboxContainerName, cmd) + stdout, stderr, err := data.RunCommandFromPod(namespace, clientName, busyboxContainerName, cmd) if err != nil { t.Errorf("Error when running command '%s' from Pod '%s', stdout: %s, stderr: %s, error: %v", strings.Join(cmd, " "), clientName, stdout, stderr, err) @@ -182,7 +182,7 @@ func (data *TestData) createAgnhostServiceAndBackendPods(t *testing.T, name, nam require.NoError(t, err) t.Logf("Created service Pod IPs %v", podIPs.ipStrings) require.NoError(t, data.podWaitForRunning(defaultTimeout, name, namespace)) - svc, err := data.createService(name, namespace, 80, 80, map[string]string{"app": "agnhost"}, false, false, svcType, &ipv4Protocol) + svc, err := data.CreateService(name, namespace, 80, 80, map[string]string{"app": "agnhost"}, false, false, svcType, &ipv4Protocol) require.NoError(t, err) cleanup := func() { diff --git a/test/e2e/supportbundle_test.go b/test/e2e/supportbundle_test.go index debdf3a14ec..87ad204cd28 100644 --- a/test/e2e/supportbundle_test.go +++ b/test/e2e/supportbundle_test.go @@ -38,7 +38,7 @@ import ( // getAccessToken retrieves the local access token of an antrea component API server. func getAccessToken(podName string, containerName string, tokenPath string, data *TestData) (string, error) { - stdout, _, err := data.runCommandFromPod(metav1.NamespaceSystem, podName, containerName, []string{"cat", tokenPath}) + stdout, _, err := data.RunCommandFromPod(metav1.NamespaceSystem, podName, containerName, []string{"cat", tokenPath}) if err != nil { return "", err } diff --git a/test/e2e/tls_test.go b/test/e2e/tls_test.go index 602c86a6fe0..9a2dab35626 100644 --- a/test/e2e/tls_test.go +++ b/test/e2e/tls_test.go @@ -148,7 +148,7 @@ func (data *TestData) opensslConnect(t *testing.T, pod string, container string, if tls12 { cmd = append(cmd, "-tls1_2") } - stdout, stderr, err := data.runCommandFromPod(antreaNamespace, pod, container, cmd) + stdout, stderr, err := data.RunCommandFromPod(antreaNamespace, pod, container, cmd) assert.NoError(t, err, "failed to run openssl command on Pod '%s'\nstderr: %s", pod, stderr) t.Logf("Ran '%s' on Pod %s", strings.Join(cmd, " "), pod) stdouts = append(stdouts, stdout) diff --git a/test/e2e/upgrade_test.go b/test/e2e/upgrade_test.go index 95edfca3b9c..3cb65b45ba4 100644 --- a/test/e2e/upgrade_test.go +++ b/test/e2e/upgrade_test.go @@ -64,10 +64,10 @@ func TestUpgrade(t *testing.T) { namespace := randName("test-namespace-") t.Logf("Creating namespace '%s'", namespace) - if err := data.createNamespace(namespace, nil); err != nil { + if err := data.CreateNamespace(namespace, nil); err != nil { t.Fatalf("Error when creating namespace '%s'", namespace) } - defer data.deleteNamespace(namespace, defaultTimeout) + defer data.DeleteNamespace(namespace, defaultTimeout) data.testPodConnectivitySameNode(t) data.testPodConnectivityDifferentNodes(t) @@ -115,7 +115,7 @@ func TestUpgrade(t *testing.T) { checkFn() t.Logf("Deleting namespace '%s'", namespace) - if err := data.deleteNamespace(namespace, defaultTimeout); err != nil { + if err := data.DeleteNamespace(namespace, defaultTimeout); err != nil { t.Errorf("Namespace deletion failed: %v", err) } diff --git a/test/e2e/wireguard_test.go b/test/e2e/wireguard_test.go index f65d6fb8c7b..f06b4f3c2ff 100644 --- a/test/e2e/wireguard_test.go +++ b/test/e2e/wireguard_test.go @@ -37,14 +37,15 @@ func TestWireGuard(t *testing.T) { skipIfNumNodesLessThan(t, 2) skipIfHasWindowsNodes(t) skipIfAntreaIPAMTest(t) + + data, err := setupTest(t) + skipIfEncapModeIsNot(t, data, config.TrafficEncapModeEncap) providerIsKind := testOptions.providerName == "kind" if !providerIsKind { for _, node := range clusterInfo.nodes { - skipIfMissingKernelModule(t, node.name, []string{"wireguard"}) + skipIfMissingKernelModule(t, data, node.name, []string{"wireguard"}) } } - data, err := setupTest(t) - skipIfEncapModeIsNot(t, data, config.TrafficEncapModeEncap) if err != nil { t.Fatalf("Error when setting up test: %v", err) @@ -82,7 +83,7 @@ func (data *TestData) getWireGuardPeerEndpointsWithHandshake(nodeName string) ([ return peerEndpoints, err } cmd := []string{"wg"} - stdout, stderr, err := data.runCommandFromPod(antreaNamespace, antreaPodName, "wireguard", cmd) + stdout, stderr, err := data.RunCommandFromPod(antreaNamespace, antreaPodName, "wireguard", cmd) if err != nil { return peerEndpoints, fmt.Errorf("error when running 'wg' on '%s': %v - stdout: %s - stderr: %s", nodeName, err, stdout, stderr) }