From f4c55622bff770315d0c7f29a34d8ae1504af4b3 Mon Sep 17 00:00:00 2001 From: Knative Automation Date: Fri, 19 Apr 2024 07:27:27 -0400 Subject: [PATCH] upgrade to latest dependencies (#1311) bumping knative.dev/networking a40d058...979f637: > 979f637 [release-1.14] Update test dialer to use ip when available (# 971) Signed-off-by: Knative Automation --- go.mod | 2 +- go.sum | 4 +- .../test/conformance/ingress/util.go | 101 ++++++++++-------- vendor/modules.txt | 2 +- 4 files changed, 61 insertions(+), 48 deletions(-) diff --git a/go.mod b/go.mod index 0db0024dfc..cd38ac5243 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( k8s.io/apimachinery v0.29.2 k8s.io/client-go v0.29.2 knative.dev/hack v0.0.0-20240404013450-1133b37da8d7 - knative.dev/networking v0.0.0-20240416165409-a40d058f6aef + knative.dev/networking v0.0.0-20240418213116-979f63728302 knative.dev/pkg v0.0.0-20240416145024-0f34a8815650 sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index 63a66feba6..78aea49290 100644 --- a/go.sum +++ b/go.sum @@ -685,8 +685,8 @@ k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCf k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= knative.dev/hack v0.0.0-20240404013450-1133b37da8d7 h1:fkWYWvdHm1mVHevKW2vVJnZtxH0NzOlux8imesweKwE= knative.dev/hack v0.0.0-20240404013450-1133b37da8d7/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q= -knative.dev/networking v0.0.0-20240416165409-a40d058f6aef h1:bA7FC8IGcXFO95LqbUqVMngp0bEXgQ/D2vXDJj8QbfI= -knative.dev/networking v0.0.0-20240416165409-a40d058f6aef/go.mod h1:bb3B1eNMNO827kErNDGKYYiBgtlaR6T1DEDnfEad3q4= +knative.dev/networking v0.0.0-20240418213116-979f63728302 h1:TQwoh8K7RgPXU+5Z5SX385vYjgvQ0OzdFLHRRCeF5K4= +knative.dev/networking v0.0.0-20240418213116-979f63728302/go.mod h1:bb3B1eNMNO827kErNDGKYYiBgtlaR6T1DEDnfEad3q4= knative.dev/pkg v0.0.0-20240416145024-0f34a8815650 h1:m2ahFUO0L2VrgGDYdyOUFdE6xBd3pLXAJozLJwqLRQM= knative.dev/pkg v0.0.0-20240416145024-0f34a8815650/go.mod h1:soFw5ss08G4PU3JiFDKqiZRd2U7xoqcfNpJP1coIXkY= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/vendor/knative.dev/networking/test/conformance/ingress/util.go b/vendor/knative.dev/networking/test/conformance/ingress/util.go index 687cfc817d..3ebbb98424 100644 --- a/vendor/knative.dev/networking/test/conformance/ingress/util.go +++ b/vendor/knative.dev/networking/test/conformance/ingress/util.go @@ -1046,61 +1046,74 @@ func CreateDialContext(ctx context.Context, t *testing.T, ing *v1alpha1.Ingress, // TODO(mattmoor): I'm open to tricks that would let us cleanly test multiple // public load balancers or LBs with multiple ingresses (below), but want to // keep our simple tests simple, thus the [0]s... - // We expect an ingress LB with the form foo.bar.svc.cluster.local (though - // we aren't strictly sensitive to the suffix, this is just illustrative. internalDomain := ing.Status.PublicLoadBalancer.Ingress[0].DomainInternal - parts := strings.SplitN(internalDomain, ".", 3) - if len(parts) < 3 { - t.Fatal("Too few parts in internal domain:", internalDomain) - } - name, namespace := parts[0], parts[1] + if internalDomain != "" { + parts := strings.SplitN(internalDomain, ".", 3) + if len(parts) < 3 { + t.Fatal("Too few parts in internal domain:", internalDomain) + } + name, namespace := parts[0], parts[1] - var svc *corev1.Service - err := reconciler.RetryTestErrors(func(attempts int) (err error) { - svc, err = clients.KubeClient.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) - return err - }) - if err != nil { - t.Fatalf("Unable to retrieve Kubernetes service %s/%s: %v", namespace, name, err) - } + var svc *corev1.Service + err := reconciler.RetryTestErrors(func(attempts int) (err error) { + svc, err = clients.KubeClient.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{}) + return err + }) + if err != nil { + t.Fatalf("Unable to retrieve Kubernetes service %s/%s: %v", namespace, name, err) + } - dial := network.NewBackoffDialer(dialBackoff) - if pkgTest.Flags.IngressEndpoint != "" { - t.Logf("ingressendpoint: %q", pkgTest.Flags.IngressEndpoint) + dial := network.NewBackoffDialer(dialBackoff) + if pkgTest.Flags.IngressEndpoint != "" { + t.Logf("ingressendpoint: %q", pkgTest.Flags.IngressEndpoint) - // If we're using a manual --ingressendpoint then don't require - // "type: LoadBalancer", which may not play nice with KinD - return func(ctx context.Context, _ string, address string) (net.Conn, error) { - _, port, err := net.SplitHostPort(address) - if err != nil { - return nil, err + // If we're using a manual --ingressendpoint then don't require + // "type: LoadBalancer", which may not play nice with KinD + return func(ctx context.Context, _ string, address string) (net.Conn, error) { + _, port, err := net.SplitHostPort(address) + if err != nil { + return nil, err + } + for _, sp := range svc.Spec.Ports { + if fmt.Sprint(sp.Port) == port { + return dial(ctx, "tcp", fmt.Sprintf("%s:%d", pkgTest.Flags.IngressEndpoint, sp.NodePort)) + } + } + return nil, fmt.Errorf("service doesn't contain a matching port: %s", port) } - for _, sp := range svc.Spec.Ports { - if fmt.Sprint(sp.Port) == port { - return dial(ctx, "tcp", fmt.Sprintf("%s:%d", pkgTest.Flags.IngressEndpoint, sp.NodePort)) + } else if len(svc.Status.LoadBalancer.Ingress) >= 1 { + ingress := svc.Status.LoadBalancer.Ingress[0] + return func(ctx context.Context, _ string, address string) (net.Conn, error) { + _, port, err := net.SplitHostPort(address) + if err != nil { + return nil, err + } + if ingress.IP != "" { + return dial(ctx, "tcp", ingress.IP+":"+port) } + if ingress.Hostname != "" { + return dial(ctx, "tcp", ingress.Hostname+":"+port) + } + return nil, errors.New("service ingress does not contain dialing information") } - return nil, fmt.Errorf("service doesn't contain a matching port: %s", port) } - } else if len(svc.Status.LoadBalancer.Ingress) >= 1 { - ingress := svc.Status.LoadBalancer.Ingress[0] - return func(ctx context.Context, _ string, address string) (net.Conn, error) { - _, port, err := net.SplitHostPort(address) - if err != nil { - return nil, err - } - if ingress.IP != "" { - return dial(ctx, "tcp", ingress.IP+":"+port) - } - if ingress.Hostname != "" { - return dial(ctx, "tcp", ingress.Hostname+":"+port) - } - return nil, errors.New("service ingress does not contain dialing information") + t.Fatal("Service does not have a supported shape (not type LoadBalancer? missing --ingressendpoint?).") + } else if ing.Status.PublicLoadBalancer.Ingress[0].IP != "" { + dial := network.NewBackoffDialer(dialBackoff) + ingressIP := ing.Status.PublicLoadBalancer.Ingress[0].IP + + port := 80 + if ing.Spec.Rules[0].Visibility == v1alpha1.IngressVisibilityExternalIP && ing.Spec.HTTPOption == v1alpha1.HTTPOptionRedirected { + port = 443 + } + + return func(ctx context.Context, _ string, _ string) (net.Conn, error) { + return dial(ctx, "tcp", fmt.Sprintf("%s:%d", ingressIP, port)) } } else { - t.Fatal("Service does not have a supported shape (not type LoadBalancer? missing --ingressendpoint?).") - return nil // Unreachable + t.Fatal("No IP or domain found on ingress.") } + return nil // Unreachable } type RequestOption func(*http.Request) diff --git a/vendor/modules.txt b/vendor/modules.txt index 7db48f343c..a6deffac93 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -910,7 +910,7 @@ k8s.io/utils/trace # knative.dev/hack v0.0.0-20240404013450-1133b37da8d7 ## explicit; go 1.18 knative.dev/hack -# knative.dev/networking v0.0.0-20240416165409-a40d058f6aef +# knative.dev/networking v0.0.0-20240418213116-979f63728302 ## explicit; go 1.21 knative.dev/networking/config knative.dev/networking/pkg