From fe8a8f560ab5c652833afd0ef63a0c71e1eb0477 Mon Sep 17 00:00:00 2001 From: wgrayson Date: Wed, 6 Apr 2022 16:16:59 -0700 Subject: [PATCH] ICMP support e2e test Add E2E tests and related content in doc for ICMP support PR #3472 Signed-off-by: wgrayson --- docs/antrea-network-policy.md | 36 +- multicluster/test/e2e/antreapolicy_test.go | 4 +- test/e2e/antreapolicy_test.go | 395 ++++++++++++--------- test/e2e/flowaggregator_test.go | 12 +- test/e2e/framework.go | 4 +- test/e2e/k8s_util.go | 106 +++++- test/e2e/utils/anpspecbuilder.go | 44 +-- test/e2e/utils/cnpspecbuilder.go | 72 +--- test/e2e/utils/helper.go | 86 +++++ 9 files changed, 476 insertions(+), 283 deletions(-) create mode 100644 test/e2e/utils/helper.go diff --git a/docs/antrea-network-policy.md b/docs/antrea-network-policy.md index 1e3304d7950..5032ed1cb1d 100644 --- a/docs/antrea-network-policy.md +++ b/docs/antrea-network-policy.md @@ -16,6 +16,7 @@ - [ACNP for strict Namespace isolation](#acnp-for-strict-namespace-isolation) - [ACNP for default zero-trust cluster security posture](#acnp-for-default-zero-trust-cluster-security-posture) - [ACNP for toServices rule](#acnp-for-toservices-rule) + - [ACNP for ICMP traffic](#acnp-for-icmp-traffic) - [Behavior of to and from selectors](#behavior-of-to-and-from-selectors) - [Key differences from K8s NetworkPolicy](#key-differences-from-k8s-networkpolicy) - [kubectl commands for Antrea ClusterNetworkPolicy](#kubectl-commands-for-antrea-clusternetworkpolicy) @@ -381,6 +382,33 @@ spec: enableLogging: true ``` +#### ACNP for ICMP traffic + +```yaml +apiVersion: crd.antrea.io/v1alpha1 +kind: ClusterNetworkPolicy +metadata: + name: acnp-reject-ping-request +spec: + priority: 5 + tier: securityops + appliedTo: + - podSelector: + matchLabels: + role: server + namespaceSelector: + matchLabels: + env: prod + egress: + - action: Reject + protocols: + - icmp: + icmpType: 8 + icmpCode: 0 + name: DropPingRequest + enableLogging: true +``` + **spec**: The ClusterNetworkPolicy `spec` has all the information needed to define a cluster-wide security policy. @@ -423,7 +451,7 @@ default tier i.e. the "application" Tier. **action**: Each ingress or egress rule of a ClusterNetworkPolicy must have the `action` field set. As of now, the available actions are ["Allow", "Drop", "Reject", "Pass"]. When the rule action is "Allow" or "Drop", Antrea will allow or drop traffic which -matches both `from/to` and `ports` sections of that rule, given that traffic does not +matches both `from/to`, `ports` and `protocols` sections of that rule, given that traffic does not match a higher precedence rule in the cluster (ACNP rules created in higher order Tiers or policy instances in the same Tier with lower priority number). If a "Reject" rule is matched, the client initiating the traffic will receive `ICMP host administratively @@ -439,6 +467,9 @@ configurations will be rejected by the admission controller. **ingress**: Each ClusterNetworkPolicy may consist of zero or more ordered set of ingress rules. Under `ports`, the optional field `endPort` can only be set when a numerical `port` is set to represent a range of ports from `port` to `endPort` inclusive. +`protocols` defines additional protocols that are not supported by `ports`. Currently, only +ICMP protocol is under `protocols`. `icmpType` and `icmpCode` could be used to specify the ICMP +traffic that this rule matches. Also, each rule has an optional `name` field, which should be unique within the policy describing the intention of this rule. If `name` is not provided for a rule, it will be auto-generated by Antrea. The auto-generated name will be @@ -470,6 +501,9 @@ of egress rules. Each rule, depending on the `action` field of the rule, allows or drops traffic which matches all `from`, `ports` sections. Under `ports`, the optional field `endPort` can only be set when a numerical `port` is set to represent a range of ports from `port` to `endPort` inclusive. +`protocols` defines additional protocols that are not supported by `ports`. Currently, only +ICMP protocol is under `protocols`. `icmpType` and `icmpCode` could be used to specify the ICMP +traffic that this rule matches. Also, each rule has an optional `name` field, which should be unique within the policy describing the intention of this rule. If `name` is not provided for a rule, it will be auto-generated by Antrea. The rule name auto-generation process diff --git a/multicluster/test/e2e/antreapolicy_test.go b/multicluster/test/e2e/antreapolicy_test.go index 70d56ea5565..9d68ee4eb05 100644 --- a/multicluster/test/e2e/antreapolicy_test.go +++ b/multicluster/test/e2e/antreapolicy_test.go @@ -20,9 +20,9 @@ import ( "time" log "github.com/sirupsen/logrus" - v1 "k8s.io/api/core/v1" antreae2e "antrea.io/antrea/test/e2e" + "antrea.io/antrea/test/e2e/utils" ) const ( @@ -102,7 +102,7 @@ func (data *MCTestData) testAntreaPolicyCopySpanNSIsolation(t *testing.T) { Name: "Port 80", Reachability: reachability, Ports: []int32{80}, - Protocol: v1.ProtocolTCP, + Protocol: utils.ProtocolTCP, } testCaseList := []*antreae2e.TestCase{ { diff --git a/test/e2e/antreapolicy_test.go b/test/e2e/antreapolicy_test.go index a4c1587c6b0..a2767bbcdba 100644 --- a/test/e2e/antreapolicy_test.go +++ b/test/e2e/antreapolicy_test.go @@ -163,7 +163,7 @@ func applyDefaultDenyToAllNamespaces(k8s *KubernetesUtils, namespaces []string) } time.Sleep(networkPolicyDelay) r := NewReachability(allPods, Dropped) - k8s.Validate(allPods, r, []int32{p80}, v1.ProtocolTCP) + k8s.Validate(allPods, r, []int32{p80}, ProtocolTCP) _, wrong, _ := r.Summary() if wrong != 0 { return fmt.Errorf("error when creating default deny k8s NetworkPolicies") @@ -177,7 +177,7 @@ func cleanupDefaultDenyNPs(k8s *KubernetesUtils, namespaces []string) error { } time.Sleep(networkPolicyDelay * 2) r := NewReachability(allPods, Connected) - k8s.Validate(allPods, r, []int32{p80}, v1.ProtocolTCP) + k8s.Validate(allPods, r, []int32{p80}, ProtocolTCP) _, wrong, _ := r.Summary() if wrong != 0 { return fmt.Errorf("error when cleaning default deny k8s NetworkPolicies") @@ -227,7 +227,7 @@ func testMutateACNPNoRuleName(t *testing.T) { builder = builder.SetName("acnp-no-rule-name"). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). SetPriority(10.0). - AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) acnp := builder.Get() log.Debugf("creating ACNP %v", acnp.Name) @@ -252,7 +252,7 @@ func testMutateANPNoRuleName(t *testing.T) { builder = builder.SetName("x", "anp-no-rule-name"). SetAppliedToGroup([]ANPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). SetPriority(10.0). - AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, nil, nil, nil, crdv1alpha1.RuleActionAllow, "") anp := builder.Get() log.Debugf("creating ANP %v", anp.Name) @@ -302,9 +302,9 @@ func testInvalidANPRuleNameNotUnique(t *testing.T) { builder := &AntreaNetworkPolicySpecBuilder{} builder = builder.SetName("x", "anp-rule-name-not-unique"). SetAppliedToGroup([]ANPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}). - AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, nil, nil, nil, crdv1alpha1.RuleActionAllow, "not-unique"). - AddIngress(v1.ProtocolTCP, &p81, nil, nil, nil, map[string]string{"pod": "c"}, map[string]string{"ns": "x"}, + AddIngress(ProtocolTCP, &p81, nil, nil, nil, nil, nil, map[string]string{"pod": "c"}, map[string]string{"ns": "x"}, nil, nil, nil, crdv1alpha1.RuleActionAllow, "not-unique") anp := builder.Get() log.Debugf("creating ANP %v", anp.Name) @@ -334,7 +334,7 @@ func testInvalidANPPortRangePortUnset(t *testing.T) { builder = builder.SetName("y", "anp-egress-port-range-port-unset"). SetPriority(1.0). SetAppliedToGroup([]ANPAppliedToSpec{{PodSelector: map[string]string{"pod": "b"}}}) - builder.AddEgress(v1.ProtocolTCP, nil, nil, &p8085, nil, map[string]string{"pod": "c"}, map[string]string{"ns": "x"}, + builder.AddEgress(ProtocolTCP, nil, nil, &p8085, nil, nil, nil, map[string]string{"pod": "c"}, map[string]string{"ns": "x"}, nil, nil, nil, crdv1alpha1.RuleActionDrop, "anp-port-range") anp := builder.Get() @@ -351,7 +351,7 @@ func testInvalidANPPortRangeEndPortSmall(t *testing.T) { builder = builder.SetName("y", "anp-egress-port-range-endport-small"). SetPriority(1.0). SetAppliedToGroup([]ANPAppliedToSpec{{PodSelector: map[string]string{"pod": "b"}}}) - builder.AddEgress(v1.ProtocolTCP, &p8082, nil, &p8081, nil, map[string]string{"pod": "c"}, map[string]string{"ns": "x"}, + builder.AddEgress(ProtocolTCP, &p8082, nil, &p8081, nil, nil, nil, map[string]string{"pod": "c"}, map[string]string{"ns": "x"}, nil, nil, nil, crdv1alpha1.RuleActionDrop, "anp-port-range") anp := builder.Get() @@ -497,7 +497,7 @@ func testACNPAllowXBtoA(t *testing.T) { builder = builder.SetName("acnp-allow-xb-to-a"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) reachability := NewReachability(allPods, Dropped) @@ -512,7 +512,7 @@ func testACNPAllowXBtoA(t *testing.T) { reachability, []metav1.Object{builder.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -531,7 +531,7 @@ func testACNPAllowXBtoYA(t *testing.T) { builder = builder.SetName("acnp-allow-xb-to-ya"). SetPriority(2.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": "y"}}}) - builder.AddIngress(v1.ProtocolTCP, nil, &port81Name, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) reachability := NewReachability(allPods, Dropped) @@ -544,7 +544,7 @@ func testACNPAllowXBtoYA(t *testing.T) { reachability, []metav1.Object{builder.Get()}, []int32{81}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -563,14 +563,14 @@ func testACNPPriorityOverrideDefaultDeny(t *testing.T) { builder1 = builder1.SetName("acnp-priority2"). SetPriority(2). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) - builder1.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-priority1"). SetPriority(1). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": "x"}}}) - builder2.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) // Ingress from ns:z to x/a will be dropped since acnp-priority1 has higher precedence. @@ -589,7 +589,7 @@ func testACNPPriorityOverrideDefaultDeny(t *testing.T) { reachabilityBothACNP, []metav1.Object{builder1.Get(), builder2.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -601,8 +601,8 @@ func testACNPPriorityOverrideDefaultDeny(t *testing.T) { } // testACNPAllowNoDefaultIsolation tests that no default isolation rules are created for Policies. -func testACNPAllowNoDefaultIsolation(t *testing.T, protocol v1.Protocol) { - if protocol == v1.ProtocolSCTP { +func testACNPAllowNoDefaultIsolation(t *testing.T, protocol AntreaPolicyProtocol) { + if protocol == ProtocolSCTP { // SCTP testing is failing on our IPv6 CI testbeds at the moment. This seems to be // related to an issue with ESX networking for SCTPv6 traffic when the Pods are on // different Node VMs which are themselves on different ESX hosts. We are @@ -614,9 +614,9 @@ func testACNPAllowNoDefaultIsolation(t *testing.T, protocol v1.Protocol) { builder = builder.SetName("acnp-allow-x-ingress-y-egress-z"). SetPriority(1.1). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) - builder.AddIngress(protocol, &p81, nil, nil, nil, nil, map[string]string{"ns": "y"}, + builder.AddIngress(protocol, &p81, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "y"}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) - builder.AddEgress(protocol, &p81, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder.AddEgress(protocol, &p81, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) reachability := NewReachability(allPods, Connected) @@ -638,8 +638,8 @@ func testACNPAllowNoDefaultIsolation(t *testing.T, protocol v1.Protocol) { } // testACNPDropEgress tests that an ACNP is able to drop egress traffic from pods labelled A to namespace Z. -func testACNPDropEgress(t *testing.T, protocol v1.Protocol) { - if protocol == v1.ProtocolSCTP { +func testACNPDropEgress(t *testing.T, protocol AntreaPolicyProtocol) { + if protocol == ProtocolSCTP { // SCTP testing is failing on our IPv6 CI testbeds at the moment. This seems to be // related to an issue with ESX networking for SCTPv6 traffic when the Pods are on // different Node VMs which are themselves on different ESX hosts. We are @@ -651,7 +651,7 @@ func testACNPDropEgress(t *testing.T, protocol v1.Protocol) { builder = builder.SetName("acnp-deny-a-to-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(protocol, &p80, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder.AddEgress(protocol, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) @@ -684,7 +684,7 @@ func testACNPDropIngressInSelectedNamespace(t *testing.T) { builder = builder.SetName("acnp-deny-ingress-to-x"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) - builder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, false, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "drop-all-ingress", nil) reachability := NewReachability(allPods, Connected) @@ -698,7 +698,7 @@ func testACNPDropIngressInSelectedNamespace(t *testing.T) { reachability, []metav1.Object{builder.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -715,7 +715,7 @@ func testACNPNoEffectOnOtherProtocols(t *testing.T) { builder = builder.SetName("acnp-deny-a-to-z-ingress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) reachability1 := NewReachability(allPods, Connected) @@ -736,7 +736,7 @@ func testACNPNoEffectOnOtherProtocols(t *testing.T) { reachability1, []metav1.Object{builder.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -745,7 +745,7 @@ func testACNPNoEffectOnOtherProtocols(t *testing.T) { reachability2, []metav1.Object{builder.Get()}, []int32{80}, - v1.ProtocolUDP, + ProtocolUDP, 0, nil, }, @@ -768,7 +768,7 @@ func testACNPAppliedToDenyXBtoCGWithYA(t *testing.T) { builder = builder.SetName("acnp-deny-cg-with-ya-from-xb"). SetPriority(2.0). SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cgName}}) - builder.AddIngress(v1.ProtocolTCP, nil, &port81Name, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) @@ -782,7 +782,7 @@ func testACNPAppliedToDenyXBtoCGWithYA(t *testing.T) { // Note in this testcase the ClusterGroup is created after the ACNP []metav1.Object{builder.Get(), cgBuilder.Get()}, []int32{81}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -805,7 +805,7 @@ func testACNPIngressRuleDenyCGWithXBtoYA(t *testing.T) { builder = builder.SetName("acnp-deny-cg-with-xb-to-ya"). SetPriority(2.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": "y"}}}) - builder.AddIngress(v1.ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, + builder.AddIngress(ProtocolTCP, nil, &port81Name, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, cgName, "", nil) reachability := NewReachability(allPods, Connected) @@ -818,7 +818,7 @@ func testACNPIngressRuleDenyCGWithXBtoYA(t *testing.T) { reachability, []metav1.Object{cgBuilder.Get(), builder.Get()}, []int32{81}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -837,7 +837,7 @@ func testACNPAppliedToRuleCGWithPodsAToNsZ(t *testing.T) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-deny-cg-with-a-to-z"). SetPriority(1.0) - builder.AddEgress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, []ACNPAppliedToSpec{{Group: cgName}}, crdv1alpha1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) @@ -852,7 +852,7 @@ func testACNPAppliedToRuleCGWithPodsAToNsZ(t *testing.T) { // Note in this testcase the ClusterGroup is created after the ACNP []metav1.Object{builder.Get(), cgBuilder.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -872,7 +872,7 @@ func testACNPEgressRulePodsAToCGWithNsZ(t *testing.T) { builder = builder.SetName("acnp-deny-a-to-cg-with-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, nil, + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, cgName, "", nil) reachability := NewReachability(allPods, Connected) @@ -887,7 +887,7 @@ func testACNPEgressRulePodsAToCGWithNsZ(t *testing.T) { // Note in this testcase the ClusterGroup is created after the ACNP []metav1.Object{builder.Get(), cgBuilder.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -909,7 +909,7 @@ func testACNPClusterGroupUpdateAppliedTo(t *testing.T) { builder = builder.SetName("acnp-deny-cg-with-a-to-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cgName}}) - builder.AddEgress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Connected) @@ -929,7 +929,7 @@ func testACNPClusterGroupUpdateAppliedTo(t *testing.T) { reachability, []metav1.Object{cgBuilder.Get(), builder.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -938,7 +938,7 @@ func testACNPClusterGroupUpdateAppliedTo(t *testing.T) { updatedReachability, []metav1.Object{updatedCgBuilder.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -960,7 +960,7 @@ func testACNPClusterGroupUpdate(t *testing.T) { builder = builder.SetName("acnp-deny-a-to-cg-with-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, nil, + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, cgName, "", nil) reachability := NewReachability(allPods, Connected) @@ -980,7 +980,7 @@ func testACNPClusterGroupUpdate(t *testing.T) { reachability, []metav1.Object{cgBuilder.Get(), builder.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -989,7 +989,7 @@ func testACNPClusterGroupUpdate(t *testing.T) { updatedReachability, []metav1.Object{updatedCgBuilder.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -1010,7 +1010,7 @@ func testACNPClusterGroupAppliedToPodAdd(t *testing.T, data *TestData) { builder = builder.SetName("acnp-deny-cg-with-zj-to-xj-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cgName}}) - builder.AddEgress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "j"}, map[string]string{"ns": "x"}, + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "j"}, map[string]string{"ns": "x"}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) cp := []*CustomProbe{ { @@ -1032,7 +1032,7 @@ func testACNPClusterGroupAppliedToPodAdd(t *testing.T, data *TestData) { nil, []metav1.Object{cgBuilder.Get(), builder.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, cp, }, @@ -1058,7 +1058,7 @@ func testACNPClusterGroupRefRulePodAdd(t *testing.T, data *TestData) { NSSelector: map[string]string{"ns": "x"}, }, }) - builder.AddEgress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, nil, + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, cgName, "", nil) cp := []*CustomProbe{ { @@ -1081,7 +1081,7 @@ func testACNPClusterGroupRefRulePodAdd(t *testing.T, data *TestData) { // Note in this testcase the ClusterGroup is created after the ACNP []metav1.Object{builder.Get(), cgBuilder.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, cp, }, @@ -1135,9 +1135,9 @@ func testACNPClusterGroupRefRuleIPBlocks(t *testing.T) { NSSelector: map[string]string{"ns": "y"}, }, }) - builder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, cgv1a3Name, "", nil) - builder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, cgv1a2Name, "", nil) reachability := NewReachability(allPods, Connected) @@ -1151,7 +1151,7 @@ func testACNPClusterGroupRefRuleIPBlocks(t *testing.T) { reachability, []metav1.Object{builder.Get(), cgBuilder.Get(), cgBuilder2.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -1175,7 +1175,7 @@ func testBaselineNamespaceIsolation(t *testing.T) { SetTier("baseline"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) - builder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, []metav1.LabelSelectorRequirement{nsExpOtherThanX}, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) @@ -1205,7 +1205,7 @@ func testBaselineNamespaceIsolation(t *testing.T) { reachability, []metav1.Object{builder.Get(), k8sNPBuilder.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -1227,7 +1227,7 @@ func testACNPPriorityOverride(t *testing.T) { SetPriority(1.001). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": "x"}}}) // Highest priority. Drops traffic from z/b to x/a. - builder1.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "z"}, + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} @@ -1235,7 +1235,7 @@ func testACNPPriorityOverride(t *testing.T) { SetPriority(1.002). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": "x"}}}) // Medium priority. Allows traffic from z to x/a. - builder2.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) builder3 := &ClusterNetworkPolicySpecBuilder{} @@ -1243,7 +1243,7 @@ func testACNPPriorityOverride(t *testing.T) { SetPriority(1.003). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) // Lowest priority. Drops traffic from z to x. - builder3.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) @@ -1269,7 +1269,7 @@ func testACNPPriorityOverride(t *testing.T) { reachabilityTwoACNPs, []metav1.Object{builder3.Get(), builder2.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -1281,7 +1281,7 @@ func testACNPPriorityOverride(t *testing.T) { reachabilityAllACNPs, []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -1302,7 +1302,7 @@ func testACNPTierOverride(t *testing.T) { SetPriority(100). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": "x"}}}) // Highest priority tier. Drops traffic from z/b to x/a. - builder1.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "z"}, + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} @@ -1311,7 +1311,7 @@ func testACNPTierOverride(t *testing.T) { SetPriority(10). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": "x"}}}) // Medium priority tier. Allows traffic from z to x/a. - builder2.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) builder3 := &ClusterNetworkPolicySpecBuilder{} @@ -1320,7 +1320,7 @@ func testACNPTierOverride(t *testing.T) { SetPriority(1). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) // Lowest priority tier. Drops traffic from z to x. - builder3.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder3.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) @@ -1346,7 +1346,7 @@ func testACNPTierOverride(t *testing.T) { reachabilityTwoACNPs, []metav1.Object{builder3.Get(), builder2.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -1357,7 +1357,7 @@ func testACNPTierOverride(t *testing.T) { reachabilityAllACNPs, []metav1.Object{builder3.Get(), builder1.Get(), builder2.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -1386,7 +1386,7 @@ func testACNPCustomTiers(t *testing.T) { SetPriority(100). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": "x"}}}) // Medium priority tier. Allows traffic from z to x/a. - builder1.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} @@ -1395,7 +1395,7 @@ func testACNPCustomTiers(t *testing.T) { SetPriority(1). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) // Lowest priority tier. Drops traffic from z to x. - builder2.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) reachabilityTwoACNPs := NewReachability(allPods, Connected) @@ -1411,7 +1411,7 @@ func testACNPCustomTiers(t *testing.T) { reachabilityTwoACNPs, []metav1.Object{builder2.Get(), builder1.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -1434,7 +1434,7 @@ func testACNPPriorityConflictingRule(t *testing.T) { builder1 = builder1.SetName("acnp-drop"). SetPriority(1). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) - builder1.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder1.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} @@ -1443,7 +1443,7 @@ func testACNPPriorityConflictingRule(t *testing.T) { SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) // The following ingress rule will take no effect as it is exactly the same as ingress rule of cnp-drop, // but cnp-allow has lower priority. - builder2.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) reachabilityBothACNP := NewReachability(allPods, Connected) @@ -1456,7 +1456,7 @@ func testACNPPriorityConflictingRule(t *testing.T) { reachabilityBothACNP, []metav1.Object{builder1.Get(), builder2.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -1475,10 +1475,10 @@ func testACNPRulePriority(t *testing.T) { builder1 = builder1.SetName("acnp-deny"). SetPriority(5). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) - builder1.AddEgress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, map[string]string{"ns": "y"}, + builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "y"}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) // This rule should take no effect as it will be overridden by the first rule of cnp-allow - builder1.AddEgress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) builder2 := &ClusterNetworkPolicySpecBuilder{} @@ -1486,10 +1486,10 @@ func testACNPRulePriority(t *testing.T) { builder2 = builder2.SetName("acnp-allow"). SetPriority(5). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}) - builder2.AddEgress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) // This rule should take no effect as it will be overridden by the first rule of cnp-drop - builder2.AddEgress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, map[string]string{"ns": "y"}, + builder2.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "y"}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) // Only egress from pods in namespace x to namespace y should be denied @@ -1503,7 +1503,7 @@ func testACNPRulePriority(t *testing.T) { reachabilityBothACNP, []metav1.Object{builder2.Get(), builder1.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -1520,7 +1520,7 @@ func testACNPPortRange(t *testing.T) { builder = builder.SetName("acnp-deny-a-to-z-egress-port-range"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(v1.ProtocolTCP, &p8080, nil, &p8085, nil, nil, map[string]string{"ns": "z"}, + builder.AddEgress(ProtocolTCP, &p8080, nil, &p8085, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "acnp-port-range", nil) reachability := NewReachability(allPods, Connected) @@ -1534,7 +1534,7 @@ func testACNPPortRange(t *testing.T) { reachability, []metav1.Object{builder.Get()}, []int32{8080, 8081, 8082, 8083, 8084, 8085}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -1552,7 +1552,7 @@ func testACNPRejectEgress(t *testing.T) { builder = builder.SetName("acnp-reject-a-to-z-egress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddEgress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionReject, "", "", nil) reachability := NewReachability(allPods, Connected) @@ -1566,7 +1566,7 @@ func testACNPRejectEgress(t *testing.T) { reachability, []metav1.Object{builder.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -1578,12 +1578,12 @@ func testACNPRejectEgress(t *testing.T) { } // testACNPRejectIngress tests that an ACNP is able to reject egress traffic from pods labelled A to namespace Z. -func testACNPRejectIngress(t *testing.T, protocol v1.Protocol) { +func testACNPRejectIngress(t *testing.T, protocol AntreaPolicyProtocol) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("acnp-reject-a-from-z-ingress"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(protocol, &p80, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder.AddIngress(protocol, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionReject, "", "", nil) reachability := NewReachability(allPods, Connected) @@ -1641,9 +1641,9 @@ func testRejectServiceTraffic(t *testing.T, data *TestData) { builder1 = builder1.SetName("acnp-reject-egress-svc-traffic"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": "agnhost-client"}}}) - builder1.AddEgress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"antrea-e2e": "s1"}, nil, + builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": "s1"}, nil, nil, nil, false, nil, crdv1alpha1.RuleActionReject, "", "", nil) - builder1.AddEgress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"antrea-e2e": "s2"}, nil, + builder1.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": "s2"}, nil, nil, nil, false, nil, crdv1alpha1.RuleActionReject, "", "", nil) acnpEgress := builder1.Get() @@ -1653,7 +1653,7 @@ func testRejectServiceTraffic(t *testing.T, data *TestData) { for _, tc := range testcases { log.Tracef("Probing: %s -> %s:%d", tc.clientPod.PodName(), tc.destAddr, tc.destPort) - connectivity, err := k8sUtils.ProbeAddr(tc.clientPod.Namespace(), "antrea-e2e", tc.clientPod.PodName(), tc.destAddr, tc.destPort, v1.ProtocolTCP) + connectivity, err := k8sUtils.ProbeAddr(tc.clientPod.Namespace(), "antrea-e2e", tc.clientPod.PodName(), tc.destAddr, tc.destPort, ProtocolTCP) if err != nil { t.Errorf("failure -- could not complete probe: %v", err) } @@ -1671,7 +1671,7 @@ func testRejectServiceTraffic(t *testing.T, data *TestData) { builder2 = builder2.SetName("acnp-reject-ingress-svc-traffic"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": "s1"}}, {PodSelector: map[string]string{"antrea-e2e": "s2"}}}) - builder2.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"antrea-e2e": "agnhost-client"}, nil, + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": "agnhost-client"}, nil, nil, nil, false, nil, crdv1alpha1.RuleActionReject, "", "", nil) acnpIngress := builder2.Get() @@ -1681,7 +1681,7 @@ func testRejectServiceTraffic(t *testing.T, data *TestData) { for _, tc := range testcases { log.Tracef("Probing: %s -> %s:%d", tc.clientPod.PodName(), tc.destAddr, tc.destPort) - connectivity, err := k8sUtils.ProbeAddr(tc.clientPod.Namespace(), "antrea-e2e", tc.clientPod.PodName(), tc.destAddr, tc.destPort, v1.ProtocolTCP) + connectivity, err := k8sUtils.ProbeAddr(tc.clientPod.Namespace(), "antrea-e2e", tc.clientPod.PodName(), tc.destAddr, tc.destPort, ProtocolTCP) if err != nil { t.Errorf("failure -- could not complete probe: %v", err) } @@ -1750,7 +1750,7 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData) { for _, tc := range testcases { log.Tracef("Probing: %s -> %s:%d", tc.clientPod.PodName(), tc.destAddr, tc.destPort) - connectivity, err := k8sUtils.ProbeAddr(tc.clientPod.Namespace(), "antrea-e2e", tc.clientPod.PodName(), tc.destAddr, tc.destPort, v1.ProtocolTCP) + connectivity, err := k8sUtils.ProbeAddr(tc.clientPod.Namespace(), "antrea-e2e", tc.clientPod.PodName(), tc.destAddr, tc.destPort, ProtocolTCP) if err != nil { t.Errorf("failure -- could not complete probe: %v", err) } @@ -1768,9 +1768,9 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData) { builder1 := &ClusterNetworkPolicySpecBuilder{} builder1 = builder1.SetName("acnp-reject-ingress-double-dir"). SetPriority(1.0) - builder1.AddIngress(v1.ProtocolTCP, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, + builder1.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}, crdv1alpha1.RuleActionReject, "", "", nil) - builder1.AddIngress(v1.ProtocolTCP, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, + builder1.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}, crdv1alpha1.RuleActionReject, "", "", nil) runTestsWithACNP(builder1.Get(), testcases) @@ -1779,9 +1779,9 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData) { builder2 := &ClusterNetworkPolicySpecBuilder{} builder2 = builder2.SetName("acnp-reject-egress-double-dir"). SetPriority(1.0) - builder2.AddEgress(v1.ProtocolTCP, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, + builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}, crdv1alpha1.RuleActionReject, "", "", nil) - builder2.AddEgress(v1.ProtocolTCP, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, + builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, nil, nil, false, []ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}, crdv1alpha1.RuleActionReject, "", "", nil) runTestsWithACNP(builder2.Get(), testcases) @@ -1791,9 +1791,9 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData) { builder3 = builder3.SetName("acnp-reject-server-double-dir"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}) - builder3.AddIngress(v1.ProtocolTCP, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, + builder3.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, nil, nil, false, nil, crdv1alpha1.RuleActionReject, "", "", nil) - builder3.AddEgress(v1.ProtocolTCP, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, + builder3.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": clientName}, nil, nil, nil, false, nil, crdv1alpha1.RuleActionReject, "", "", nil) runTestsWithACNP(builder3.Get(), testcases) @@ -1803,9 +1803,9 @@ func testRejectNoInfiniteLoop(t *testing.T, data *TestData) { builder4 = builder4.SetName("acnp-reject-client-double-dir"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}) - builder4.AddIngress(v1.ProtocolTCP, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, + builder4.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, nil, nil, false, nil, crdv1alpha1.RuleActionReject, "", "", nil) - builder4.AddEgress(v1.ProtocolTCP, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, + builder4.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, map[string]string{"app": "nginx"}, nil, nil, nil, false, nil, crdv1alpha1.RuleActionReject, "", "", nil) runTestsWithACNP(builder4.Get(), testcases) @@ -1817,7 +1817,7 @@ func testANPPortRange(t *testing.T) { builder = builder.SetName("y", "anp-deny-yb-to-xc-egress-port-range"). SetPriority(1.0). SetAppliedToGroup([]ANPAppliedToSpec{{PodSelector: map[string]string{"pod": "b"}}}) - builder.AddEgress(v1.ProtocolTCP, &p8080, nil, &p8085, nil, map[string]string{"pod": "c"}, map[string]string{"ns": "x"}, + builder.AddEgress(ProtocolTCP, &p8080, nil, &p8085, nil, nil, nil, map[string]string{"pod": "c"}, map[string]string{"ns": "x"}, nil, nil, nil, crdv1alpha1.RuleActionDrop, "anp-port-range") reachability := NewReachability(allPods, Connected) @@ -1829,7 +1829,7 @@ func testANPPortRange(t *testing.T) { reachability, []metav1.Object{builder.Get()}, []int32{8080, 8081, 8082, 8083, 8084, 8085}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }) @@ -1847,7 +1847,7 @@ func testANPBasic(t *testing.T) { builder = builder.SetName("y", "np-same-name"). SetPriority(1.0). SetAppliedToGroup([]ANPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}) - builder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, nil, nil, nil, crdv1alpha1.RuleActionDrop, "") reachability := NewReachability(allPods, Connected) @@ -1858,7 +1858,7 @@ func testANPBasic(t *testing.T) { reachability, []metav1.Object{builder.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -1875,7 +1875,7 @@ func testANPBasic(t *testing.T) { reachability, []metav1.Object{builder.Get(), k8sNPBuilder.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -1898,12 +1898,12 @@ func testANPMultipleAppliedTo(t *testing.T, data *TestData, singleRule bool) { // See https://github.com/antrea-io/antrea/issues/2083. if singleRule { builder.SetAppliedToGroup([]ANPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}, {PodSelector: map[string]string{tempLabel: ""}}}) - builder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, nil, nil, nil, crdv1alpha1.RuleActionDrop, "") } else { - builder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, nil, nil, []ANPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}}}, crdv1alpha1.RuleActionDrop, "") - builder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, nil, nil, []ANPAppliedToSpec{{PodSelector: map[string]string{tempLabel: ""}}}, crdv1alpha1.RuleActionDrop, "") } @@ -1913,7 +1913,7 @@ func testANPMultipleAppliedTo(t *testing.T, data *TestData, singleRule bool) { anp, err := k8sUtils.CreateOrUpdateANP(builder.Get()) failOnError(err, t) failOnError(data.waitForANPRealized(t, anp.Namespace, anp.Name), t) - k8sUtils.Validate(allPods, reachability, []int32{80}, v1.ProtocolTCP) + k8sUtils.Validate(allPods, reachability, []int32{80}, ProtocolTCP) _, wrong, _ := reachability.Summary() if wrong != 0 { t.Errorf("failure -- %d wrong results", wrong) @@ -1932,7 +1932,7 @@ func testANPMultipleAppliedTo(t *testing.T, data *TestData, singleRule bool) { reachability.Expect(Pod("x/b"), Pod("y/a"), Dropped) reachability.Expect(Pod("x/b"), Pod("y/c"), Dropped) time.Sleep(networkPolicyDelay) - k8sUtils.Validate(allPods, reachability, []int32{80}, v1.ProtocolTCP) + k8sUtils.Validate(allPods, reachability, []int32{80}, ProtocolTCP) _, wrong, _ = reachability.Summary() if wrong != 0 { t.Errorf("failure -- %d wrong results", wrong) @@ -1946,7 +1946,7 @@ func testANPMultipleAppliedTo(t *testing.T, data *TestData, singleRule bool) { reachability = NewReachability(allPods, Connected) reachability.Expect(Pod("x/b"), Pod("y/a"), Dropped) time.Sleep(networkPolicyDelay) - k8sUtils.Validate(allPods, reachability, []int32{80}, v1.ProtocolTCP) + k8sUtils.Validate(allPods, reachability, []int32{80}, ProtocolTCP) _, wrong, _ = reachability.Summary() if wrong != 0 { t.Errorf("failure -- %d wrong results", wrong) @@ -1962,7 +1962,7 @@ func testAuditLoggingBasic(t *testing.T, data *TestData) { builder = builder.SetName("test-log-acnp-deny"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": "x"}}}) - builder.AddEgress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, map[string]string{"ns": "z"}, + builder.AddEgress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, map[string]string{"ns": "z"}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) builder.AddEgressLogging() @@ -1976,7 +1976,7 @@ func testAuditLoggingBasic(t *testing.T, data *TestData) { wg.Add(1) go func() { defer wg.Done() - k8sUtils.Probe(ns1, pod1, ns2, pod2, p80, v1.ProtocolTCP) + k8sUtils.Probe(ns1, pod1, ns2, pod2, p80, ProtocolTCP) }() } oneProbe("x", "a", "z", "a") @@ -2049,9 +2049,9 @@ func testAppliedToPerRule(t *testing.T) { builder = builder.SetName("y", "np1").SetPriority(1.0) anpATGrp1 := ANPAppliedToSpec{PodSelector: map[string]string{"pod": "a"}, PodSelectorMatchExp: nil} anpATGrp2 := ANPAppliedToSpec{PodSelector: map[string]string{"pod": "b"}, PodSelectorMatchExp: nil} - builder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, nil, nil, []ANPAppliedToSpec{anpATGrp1}, crdv1alpha1.RuleActionDrop, "") - builder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "z"}, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "z"}, nil, nil, []ANPAppliedToSpec{anpATGrp2}, crdv1alpha1.RuleActionDrop, "") reachability := NewReachability(allPods, Connected) @@ -2063,7 +2063,7 @@ func testAppliedToPerRule(t *testing.T) { reachability, []metav1.Object{builder.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -2075,9 +2075,9 @@ func testAppliedToPerRule(t *testing.T) { cnpATGrp2 := ACNPAppliedToSpec{ PodSelector: map[string]string{"pod": "b"}, NSSelector: map[string]string{"ns": "y"}, PodSelectorMatchExp: nil, NSSelectorMatchExp: nil} - builder2.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, nil, nil, false, []ACNPAppliedToSpec{cnpATGrp1}, crdv1alpha1.RuleActionDrop, "", "", nil) - builder2.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "z"}, + builder2.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "z"}, nil, nil, false, []ACNPAppliedToSpec{cnpATGrp2}, crdv1alpha1.RuleActionDrop, "", "", nil) reachability2 := NewReachability(allPods, Connected) @@ -2091,7 +2091,7 @@ func testAppliedToPerRule(t *testing.T) { reachability2, []metav1.Object{builder2.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, }, @@ -2116,7 +2116,7 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("cnp-cg-svc-ref").SetPriority(1.0).SetAppliedToGroup([]ACNPAppliedToSpec{{Group: cg1Name}}) - builder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, cg2Name, "", nil) // Pods backing svc1 (label pod=a) in Namespace x should not allow ingress from Pods backing svc2 (label pod=b) in Namespace y. @@ -2127,7 +2127,7 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) reachability, []metav1.Object{svc1, svc2, cgBuilder1.Get(), cgBuilder2.Get(), builder.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, } @@ -2159,7 +2159,7 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) reachability2, []metav1.Object{svc1Updated, svc3, cgBuilder1.Get(), cgBuilder2Updated.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, cp, } @@ -2167,7 +2167,7 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) builderUpdated := &ClusterNetworkPolicySpecBuilder{} builderUpdated = builderUpdated.SetName("cnp-cg-svc-ref").SetPriority(1.0) builderUpdated.SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"pod": "a"}, NSSelector: map[string]string{"ns": "x"}}}) - builderUpdated.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "y"}, + builderUpdated.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "y"}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) // Pod x/a should not allow ingress from y/b per the updated ACNP spec. @@ -2176,7 +2176,7 @@ func testACNPClusterGroupServiceRefCreateAndUpdate(t *testing.T, data *TestData) reachability, []metav1.Object{builderUpdated.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, } @@ -2208,7 +2208,7 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { builder := &ClusterNetworkPolicySpecBuilder{} builder = builder.SetName("cnp-nested-cg").SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "z"}}}). - AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, + AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, cgNestedName, "", nil) // Pods in Namespace z should not allow traffic from Pods backing svc1 (label pod=a) in Namespace x. @@ -2223,7 +2223,7 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { // Note in this testcase the ClusterGroup is created after the ACNP []metav1.Object{builder.Get(), svc1, cgBuilder1.Get(), cgBuilderNested.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, } @@ -2254,7 +2254,7 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { reachability2, []metav1.Object{cgBuilder2.Get(), cgBuilderNested.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, cp, } @@ -2270,7 +2270,7 @@ func testACNPNestedClusterGroupCreateAndUpdate(t *testing.T, data *TestData) { reachability3, []metav1.Object{cgBuilder3.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, } @@ -2314,7 +2314,7 @@ func testACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) { NSSelector: map[string]string{"ns": "y"}, }, }) - builder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, cgParentName, "", nil) reachability := NewReachability(allPods, Connected) @@ -2325,7 +2325,7 @@ func testACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) { reachability, []metav1.Object{builder.Get(), cgBuilder1.Get(), cgBuilder2.Get(), cgParent.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, } @@ -2345,7 +2345,7 @@ func testACNPNestedIPBlockClusterGroupCreateAndUpdate(t *testing.T) { reachability2, []metav1.Object{cgBuilder3.Get(), updatedCGParent.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, } @@ -2363,9 +2363,9 @@ func testACNPNamespaceIsolation(t *testing.T) { SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{}}}) // deny ingress traffic except from own namespace, which is always allowed. - builder.AddIngress(v1.ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, true, nil, crdv1alpha1.RuleActionAllow, "", "", nil) - builder.AddIngress(v1.ProtocolTCP, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) reachability := NewReachability(allPods, Dropped) @@ -2375,7 +2375,7 @@ func testACNPNamespaceIsolation(t *testing.T) { reachability, []metav1.Object{builder.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, } @@ -2384,9 +2384,9 @@ func testACNPNamespaceIsolation(t *testing.T) { builder2 = builder2.SetName("test-acnp-ns-isolation-applied-to-per-rule"). SetTier("baseline"). SetPriority(1.0) - builder2.AddEgress(v1.ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, + builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, true, []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}, crdv1alpha1.RuleActionAllow, "", "", nil) - builder2.AddEgress(v1.ProtocolTCP, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, + builder2.AddEgress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, false, []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}, crdv1alpha1.RuleActionDrop, "", "", nil) reachability2 := NewReachability(allPods, Connected) @@ -2401,7 +2401,7 @@ func testACNPNamespaceIsolation(t *testing.T) { reachability2, []metav1.Object{builder2.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, } @@ -2419,9 +2419,9 @@ func testACNPStrictNamespacesIsolation(t *testing.T) { SetTier("securityops"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{}}}) - builder.AddIngress(v1.ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, true, nil, crdv1alpha1.RuleActionPass, "", "", nil) - builder.AddIngress(v1.ProtocolTCP, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, + builder.AddIngress(ProtocolTCP, nil, nil, nil, nil, nil, nil, nil, map[string]string{}, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) // deny ingress traffic except from own namespace, which is delegated to Namespace owners (who can create K8s // NetworkPolicies to regulate intra-Namespace traffic) @@ -2432,7 +2432,7 @@ func testACNPStrictNamespacesIsolation(t *testing.T) { reachability, []metav1.Object{builder.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, } @@ -2450,7 +2450,7 @@ func testACNPStrictNamespacesIsolation(t *testing.T) { reachability2, []metav1.Object{builder2.Get()}, []int32{80}, - v1.ProtocolTCP, + ProtocolTCP, 0, nil, } @@ -2473,8 +2473,8 @@ func testFQDNPolicy(t *testing.T) { SetTier("application"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{NSSelector: map[string]string{}}}) - builder.AddFQDNRule("*google.com", v1.ProtocolTCP, nil, nil, nil, "r1", nil, crdv1alpha1.RuleActionReject) - builder.AddFQDNRule("wayfair.com", v1.ProtocolTCP, nil, nil, nil, "r2", nil, crdv1alpha1.RuleActionDrop) + builder.AddFQDNRule("*google.com", ProtocolTCP, nil, nil, nil, "r1", nil, crdv1alpha1.RuleActionReject) + builder.AddFQDNRule("wayfair.com", ProtocolTCP, nil, nil, nil, "r2", nil, crdv1alpha1.RuleActionDrop) testcases := []podToAddrTestStep{ { @@ -2507,7 +2507,7 @@ func testFQDNPolicy(t *testing.T) { time.Sleep(networkPolicyDelay) for _, tc := range testcases { log.Tracef("Probing: %s -> %s", tc.clientPod.PodName(), tc.destAddr) - connectivity, err := k8sUtils.ProbeAddr(tc.clientPod.Namespace(), "pod", tc.clientPod.PodName(), tc.destAddr, tc.destPort, v1.ProtocolTCP) + connectivity, err := k8sUtils.ProbeAddr(tc.clientPod.Namespace(), "pod", tc.clientPod.PodName(), tc.destAddr, tc.destPort, ProtocolTCP) if err != nil { t.Errorf("failure -- could not complete probe: %v", err) } @@ -2563,8 +2563,8 @@ func testFQDNPolicyInClusterService(t *testing.T) { SetTier("application"). SetPriority(1.0) for idx, service := range services { - builder.AddFQDNRule(svcDNSName(service), v1.ProtocolTCP, nil, nil, nil, fmt.Sprintf("r%d", idx*2), []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "y"}, PodSelector: map[string]string{"pod": "b"}}}, crdv1alpha1.RuleActionReject) - builder.AddFQDNRule(svcDNSName(service), v1.ProtocolTCP, nil, nil, nil, fmt.Sprintf("r%d", idx*2+1), []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "z"}, PodSelector: map[string]string{"pod": "c"}}}, crdv1alpha1.RuleActionDrop) + builder.AddFQDNRule(svcDNSName(service), ProtocolTCP, nil, nil, nil, fmt.Sprintf("r%d", idx*2), []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "y"}, PodSelector: map[string]string{"pod": "b"}}}, crdv1alpha1.RuleActionReject) + builder.AddFQDNRule(svcDNSName(service), ProtocolTCP, nil, nil, nil, fmt.Sprintf("r%d", idx*2+1), []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "z"}, PodSelector: map[string]string{"pod": "c"}}}, crdv1alpha1.RuleActionDrop) } acnp := builder.Get() k8sUtils.CreateOrUpdateACNP(acnp) @@ -2597,7 +2597,7 @@ func testFQDNPolicyInClusterService(t *testing.T) { for _, tc := range testcases { log.Tracef("Probing: %s -> %s", tc.clientPod.PodName(), tc.destAddr) - connectivity, err := k8sUtils.ProbeAddr(tc.clientPod.Namespace(), "pod", tc.clientPod.PodName(), tc.destAddr, tc.destPort, v1.ProtocolTCP) + connectivity, err := k8sUtils.ProbeAddr(tc.clientPod.Namespace(), "pod", tc.clientPod.PodName(), tc.destAddr, tc.destPort, ProtocolTCP) if err != nil { t.Errorf("failure -- could not complete probe: %v", err) } @@ -2674,7 +2674,7 @@ func testToServices(t *testing.T) { for _, tc := range testcases { log.Tracef("Probing: %s -> %s", tc.clientPod.PodName(), tc.destAddr) - connectivity, err := k8sUtils.ProbeAddr(tc.clientPod.Namespace(), "pod", tc.clientPod.PodName(), tc.destAddr, tc.destPort, v1.ProtocolTCP) + connectivity, err := k8sUtils.ProbeAddr(tc.clientPod.Namespace(), "pod", tc.clientPod.PodName(), tc.destAddr, tc.destPort, ProtocolTCP) if err != nil { t.Errorf("failure -- could not complete probe: %v", err) } @@ -2715,7 +2715,7 @@ func testServiceAccountSelector(t *testing.T, data *TestData) { builder = builder.SetName("acnp-service-account"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": serverName}}}) - builder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, nil, nil, + builder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, nil, nil, nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", sa) acnp := builder.Get() @@ -2765,7 +2765,7 @@ func testServiceAccountSelector(t *testing.T, data *TestData) { for _, tc := range testcases { log.Tracef("Probing: %s -> %s:%d", tc.clientPod.PodName(), tc.destAddr, tc.destPort) - connectivity, err := k8sUtils.ProbeAddr(tc.clientPod.Namespace(), "antrea-e2e", tc.clientPod.PodName(), tc.destAddr, tc.destPort, v1.ProtocolTCP) + connectivity, err := k8sUtils.ProbeAddr(tc.clientPod.Namespace(), "antrea-e2e", tc.clientPod.PodName(), tc.destAddr, tc.destPort, ProtocolTCP) if err != nil { t.Errorf("failure -- could not complete probe: %v", err) } @@ -2784,7 +2784,7 @@ func testACNPNodeSelectorEgress(t *testing.T) { builder = builder.SetName("test-acnp-drop-egress-control-plane"). SetPriority(1.0) nodeSelector := metav1.LabelSelector{MatchLabels: map[string]string{"kubernetes.io/hostname": controlPlaneNodeName()}} - builder.AddNodeSelectorRule(&nodeSelector, v1.ProtocolTCP, &p6443, "egress-control-plane-drop", + builder.AddNodeSelectorRule(&nodeSelector, ProtocolTCP, &p6443, "egress-control-plane-drop", []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}, PodSelector: map[string]string{"pod": "a"}}}, crdv1alpha1.RuleActionDrop, true) @@ -2829,7 +2829,7 @@ func testACNPNodeSelectorEgress(t *testing.T) { time.Sleep(networkPolicyDelay) for _, tc := range testcases { log.Tracef("Probing: %s -> %s", tc.clientPod.PodName(), tc.destAddr) - connectivity, err := k8sUtils.ProbeAddr(tc.clientPod.Namespace(), "pod", tc.clientPod.PodName(), tc.destAddr, tc.destPort, v1.ProtocolTCP) + connectivity, err := k8sUtils.ProbeAddr(tc.clientPod.Namespace(), "pod", tc.clientPod.PodName(), tc.destAddr, tc.destPort, ProtocolTCP) if err != nil { t.Errorf("failure -- could not complete probe: %v", err) } @@ -2861,7 +2861,7 @@ func testACNPNodeSelectorIngress(t *testing.T, data *TestData) { builder = builder.SetName("test-acnp-drop-ingress-from-control-plane"). SetPriority(1.0) nodeSelector := metav1.LabelSelector{MatchLabels: map[string]string{"kubernetes.io/hostname": controlPlaneNodeName()}} - builder.AddNodeSelectorRule(&nodeSelector, v1.ProtocolTCP, &p80, "ingress-control-plane-drop", + builder.AddNodeSelectorRule(&nodeSelector, ProtocolTCP, &p80, "ingress-control-plane-drop", []ACNPAppliedToSpec{{NSSelector: map[string]string{"ns": "x"}}}, crdv1alpha1.RuleActionDrop, false) @@ -2906,7 +2906,81 @@ func testACNPNodeSelectorIngress(t *testing.T, data *TestData) { time.Sleep(networkPolicyDelay) for _, tc := range testcases { log.Tracef("Probing: %s -> %s", tc.clientPod.PodName(), tc.destAddr) - connectivity, err := k8sUtils.ProbeAddr(tc.clientPod.Namespace(), "antrea-e2e", tc.clientPod.PodName(), tc.destAddr, tc.destPort, v1.ProtocolTCP) + connectivity, err := k8sUtils.ProbeAddr(tc.clientPod.Namespace(), "antrea-e2e", tc.clientPod.PodName(), tc.destAddr, tc.destPort, ProtocolTCP) + if err != nil { + t.Errorf("failure -- could not complete probe: %v", err) + } + if connectivity != tc.expectedConnectivity { + t.Errorf("failure -- wrong results for probe: Source %s/%s --> Dest %s:%d connectivity: %v, expected: %v", + tc.clientPod.Namespace(), tc.clientPod.PodName(), tc.destAddr, tc.destPort, connectivity, tc.expectedConnectivity) + } + } + // cleanup test resources + failOnError(k8sUtils.DeleteACNP(builder.Name), t) + failOnError(waitForResourceDelete("", builder.Name, resourceACNP, timeout), t) + time.Sleep(networkPolicyDelay) +} + +func testACNPICMPSupport(t *testing.T, data *TestData) { + clientName, _, cleanupFunc := createAndWaitForPod(t, data, data.createNetshootPodOnNode, "client", nodeName(1), testNamespace, false) + defer cleanupFunc() + + server0Name, server0IP, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server0", nodeName(0), testNamespace, false) + defer cleanupFunc() + + server1Name, server1IP, cleanupFunc := createAndWaitForPod(t, data, data.createNginxPodOnNode, "server1", nodeName(1), testNamespace, false) + defer cleanupFunc() + + icmpType := int32(8) + icmpCode := int32(0) + builder := &ClusterNetworkPolicySpecBuilder{} + builder = builder.SetName("test-acnp-icmp"). + SetPriority(1.0).SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": clientName}}}) + builder.AddEgress(ProtocolICMP, nil, nil, nil, &icmpType, &icmpCode, nil, map[string]string{"antrea-e2e": server0Name}, nil, + nil, nil, false, nil, crdv1alpha1.RuleActionReject, "", "", nil) + builder.AddEgress(ProtocolICMP, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": server1Name}, nil, + nil, nil, false, nil, crdv1alpha1.RuleActionDrop, "", "", nil) + + testcases := []podToAddrTestStep{} + if clusterInfo.podV4NetworkCIDR != "" { + testcases = append(testcases, []podToAddrTestStep{ + { + Pod(fmt.Sprintf("%s/%s", testNamespace, clientName)), + server0IP.ipv4.String(), + -1, + Rejected, + }, + { + Pod(fmt.Sprintf("%s/%s", testNamespace, clientName)), + server1IP.ipv4.String(), + -1, + Dropped, + }, + }...) + } + if clusterInfo.podV6NetworkCIDR != "" { + testcases = append(testcases, []podToAddrTestStep{ + { + Pod(fmt.Sprintf("%s/%s", testNamespace, clientName)), + server0IP.ipv6.String(), + -1, + Rejected, + }, + { + Pod(fmt.Sprintf("%s/%s", testNamespace, clientName)), + server1IP.ipv6.String(), + -1, + Dropped, + }, + }...) + } + + _, err := k8sUtils.CreateOrUpdateACNP(builder.Get()) + failOnError(err, t) + time.Sleep(networkPolicyDelay) + for _, tc := range testcases { + log.Tracef("Probing: %s -> %s", tc.clientPod.PodName(), tc.destAddr) + connectivity, err := k8sUtils.ProbeAddr(tc.clientPod.Namespace(), "antrea-e2e", tc.clientPod.PodName(), tc.destAddr, tc.destPort, ProtocolICMP) if err != nil { t.Errorf("failure -- could not complete probe: %v", err) } @@ -2961,7 +3035,7 @@ func executeTestsWithData(t *testing.T, testList []*TestCase, data *TestData) { allTestList = append(allTestList, testList...) } -func doProbe(t *testing.T, data *TestData, p *CustomProbe, protocol v1.Protocol) { +func doProbe(t *testing.T, data *TestData, p *CustomProbe, protocol AntreaPolicyProtocol) { // Bootstrap Pods _, _, srcPodCleanupFunc := createAndWaitForPodWithLabels(t, data, data.createServerPodWithLabels, p.SourcePod.Pod.PodName(), p.SourcePod.Pod.Namespace(), p.Port, p.SourcePod.Labels) defer srcPodCleanupFunc() @@ -3210,17 +3284,17 @@ func TestAntreaPolicy(t *testing.T) { t.Run("TestGroupNoK8sNP", func(t *testing.T) { // testcases below do not depend on underlying default-deny K8s NetworkPolicies. - t.Run("Case=ACNPAllowNoDefaultIsolationTCP", func(t *testing.T) { testACNPAllowNoDefaultIsolation(t, v1.ProtocolTCP) }) - t.Run("Case=ACNPAllowNoDefaultIsolationUDP", func(t *testing.T) { testACNPAllowNoDefaultIsolation(t, v1.ProtocolUDP) }) - t.Run("Case=ACNPAllowNoDefaultIsolationSCTP", func(t *testing.T) { testACNPAllowNoDefaultIsolation(t, v1.ProtocolSCTP) }) - t.Run("Case=ACNPDropEgress", func(t *testing.T) { testACNPDropEgress(t, v1.ProtocolTCP) }) - t.Run("Case=ACNPDropEgressUDP", func(t *testing.T) { testACNPDropEgress(t, v1.ProtocolUDP) }) - t.Run("Case=ACNPDropEgressSCTP", func(t *testing.T) { testACNPDropEgress(t, v1.ProtocolSCTP) }) + t.Run("Case=ACNPAllowNoDefaultIsolationTCP", func(t *testing.T) { testACNPAllowNoDefaultIsolation(t, ProtocolTCP) }) + t.Run("Case=ACNPAllowNoDefaultIsolationUDP", func(t *testing.T) { testACNPAllowNoDefaultIsolation(t, ProtocolUDP) }) + t.Run("Case=ACNPAllowNoDefaultIsolationSCTP", func(t *testing.T) { testACNPAllowNoDefaultIsolation(t, ProtocolSCTP) }) + t.Run("Case=ACNPDropEgress", func(t *testing.T) { testACNPDropEgress(t, ProtocolTCP) }) + t.Run("Case=ACNPDropEgressUDP", func(t *testing.T) { testACNPDropEgress(t, ProtocolUDP) }) + t.Run("Case=ACNPDropEgressSCTP", func(t *testing.T) { testACNPDropEgress(t, ProtocolSCTP) }) t.Run("Case=ACNPDropIngressInNamespace", func(t *testing.T) { testACNPDropIngressInSelectedNamespace(t) }) t.Run("Case=ACNPPortRange", func(t *testing.T) { testACNPPortRange(t) }) t.Run("Case=ACNPRejectEgress", func(t *testing.T) { testACNPRejectEgress(t) }) - t.Run("Case=ACNPRejectIngress", func(t *testing.T) { testACNPRejectIngress(t, v1.ProtocolTCP) }) - t.Run("Case=ACNPRejectIngressUDP", func(t *testing.T) { testACNPRejectIngress(t, v1.ProtocolUDP) }) + t.Run("Case=ACNPRejectIngress", func(t *testing.T) { testACNPRejectIngress(t, ProtocolTCP) }) + t.Run("Case=ACNPRejectIngressUDP", func(t *testing.T) { testACNPRejectIngress(t, ProtocolUDP) }) t.Run("Case=RejectServiceTraffic", func(t *testing.T) { testRejectServiceTraffic(t, data) }) t.Run("Case=RejectNoInfiniteLoop", func(t *testing.T) { testRejectNoInfiniteLoop(t, data) }) t.Run("Case=ACNPNoEffectOnOtherProtocols", func(t *testing.T) { testACNPNoEffectOnOtherProtocols(t) }) @@ -3255,6 +3329,7 @@ func TestAntreaPolicy(t *testing.T) { t.Run("Case=ACNPServiceAccountSelector", func(t *testing.T) { testServiceAccountSelector(t, data) }) t.Run("Case=ACNPNodeSelectorEgress", func(t *testing.T) { testACNPNodeSelectorEgress(t) }) t.Run("Case=ACNPNodeSelectorIngress", func(t *testing.T) { testACNPNodeSelectorIngress(t, data) }) + t.Run("Case=ACNPICMPSupport", func(t *testing.T) { testACNPICMPSupport(t, data) }) }) // print results for reachability tests printResults() @@ -3284,7 +3359,7 @@ func TestAntreaPolicyStatus(t *testing.T) { anpBuilder = anpBuilder.SetName(testNamespace, "anp-applied-to-two-nodes"). SetPriority(1.0). SetAppliedToGroup([]ANPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}) - anpBuilder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + anpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, nil, nil, nil, crdv1alpha1.RuleActionAllow, "") anp := anpBuilder.Get() log.Debugf("creating ANP %v", anp.Name) @@ -3296,7 +3371,7 @@ func TestAntreaPolicyStatus(t *testing.T) { acnpBuilder = acnpBuilder.SetName("acnp-applied-to-two-nodes"). SetPriority(1.0). SetAppliedToGroup([]ACNPAppliedToSpec{{PodSelector: map[string]string{"app": "nginx"}}}) - acnpBuilder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + acnpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, nil, nil, false, nil, crdv1alpha1.RuleActionAllow, "", "", nil) acnp := acnpBuilder.Get() log.Debugf("creating ACNP %v", acnp.Name) @@ -3332,9 +3407,9 @@ func TestAntreaPolicyStatusWithAppliedToPerRule(t *testing.T) { anpBuilder := &AntreaNetworkPolicySpecBuilder{} anpBuilder = anpBuilder.SetName(testNamespace, "anp-applied-to-per-rule"). SetPriority(1.0) - anpBuilder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + anpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, nil, nil, []ANPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": server0Name}}}, crdv1alpha1.RuleActionAllow, "") - anpBuilder.AddIngress(v1.ProtocolTCP, &p80, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, + anpBuilder.AddIngress(ProtocolTCP, &p80, nil, nil, nil, nil, nil, map[string]string{"pod": "b"}, map[string]string{"ns": "x"}, nil, nil, []ANPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": server1Name}}}, crdv1alpha1.RuleActionAllow, "") anp := anpBuilder.Get() log.Debugf("creating ANP %v", anp.Name) @@ -3439,7 +3514,7 @@ func testANPNetworkPolicyStatsWithDropAction(t *testing.T, data *TestData) { allowAction := crdv1alpha1.RuleActionAllow selectorB := metav1.LabelSelector{MatchLabels: map[string]string{"antrea-e2e": clientName}} selectorC := metav1.LabelSelector{MatchLabels: map[string]string{"antrea-e2e": serverName}} - protocol := v1.ProtocolUDP + protocol, _ := AntreaPolicyProtocolToK8sProtocol(ProtocolUDP) // When using the userspace OVS datapath and tunneling, // the first IP packet sent on a tunnel is always dropped because of a missing ARP entry. @@ -3574,7 +3649,7 @@ func testAntreaClusterNetworkPolicyStats(t *testing.T, data *TestData) { allowAction := crdv1alpha1.RuleActionAllow selectorB := metav1.LabelSelector{MatchLabels: map[string]string{"antrea-e2e": clientName}} selectorC := metav1.LabelSelector{MatchLabels: map[string]string{"antrea-e2e": serverName}} - protocol := v1.ProtocolUDP + protocol, _ := AntreaPolicyProtocolToK8sProtocol(ProtocolUDP) // When using the userspace OVS datapath and tunneling, // the first IP packet sent on a tunnel is always dropped because of a missing ARP entry. diff --git a/test/e2e/flowaggregator_test.go b/test/e2e/flowaggregator_test.go index 950015c6d66..c9cac2d240d 100644 --- a/test/e2e/flowaggregator_test.go +++ b/test/e2e/flowaggregator_test.go @@ -930,7 +930,7 @@ func deployAntreaNetworkPolicies(t *testing.T, data *TestData, srcPod, dstPod st builder1 = builder1.SetName(testNamespace, ingressAntreaNetworkPolicyName). SetPriority(2.0). SetAppliedToGroup([]utils.ANPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": dstPod}}}) - builder1 = builder1.AddIngress(corev1.ProtocolTCP, nil, nil, nil, nil, map[string]string{"antrea-e2e": srcPod}, map[string]string{}, + builder1 = builder1.AddIngress(utils.ProtocolTCP, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": srcPod}, map[string]string{}, nil, nil, nil, secv1alpha1.RuleActionAllow, testIngressRuleName) anp1 = builder1.Get() anp1, err1 := k8sUtils.CreateOrUpdateANP(anp1) @@ -943,7 +943,7 @@ func deployAntreaNetworkPolicies(t *testing.T, data *TestData, srcPod, dstPod st builder2 = builder2.SetName(testNamespace, egressAntreaNetworkPolicyName). SetPriority(2.0). SetAppliedToGroup([]utils.ANPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": srcPod}}}) - builder2 = builder2.AddEgress(corev1.ProtocolTCP, nil, nil, nil, nil, map[string]string{"antrea-e2e": dstPod}, map[string]string{}, + builder2 = builder2.AddEgress(utils.ProtocolTCP, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": dstPod}, map[string]string{}, nil, nil, nil, secv1alpha1.RuleActionAllow, testEgressRuleName) anp2 = builder2.Get() anp2, err2 := k8sUtils.CreateOrUpdateANP(anp2) @@ -968,24 +968,24 @@ func deployDenyAntreaNetworkPolicies(t *testing.T, data *TestData, srcPod, podRe builder1 = builder1.SetName(testNamespace, ingressRejectANPName). SetPriority(2.0). SetAppliedToGroup([]utils.ANPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": podReject}}}) - builder1 = builder1.AddIngress(corev1.ProtocolTCP, nil, nil, nil, nil, map[string]string{"antrea-e2e": srcPod}, map[string]string{}, + builder1 = builder1.AddIngress(utils.ProtocolTCP, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": srcPod}, map[string]string{}, nil, nil, nil, secv1alpha1.RuleActionReject, testIngressRuleName) builder2 = builder2.SetName(testNamespace, ingressDropANPName). SetPriority(2.0). SetAppliedToGroup([]utils.ANPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": podDrop}}}) - builder2 = builder2.AddIngress(corev1.ProtocolTCP, nil, nil, nil, nil, map[string]string{"antrea-e2e": srcPod}, map[string]string{}, + builder2 = builder2.AddIngress(utils.ProtocolTCP, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": srcPod}, map[string]string{}, nil, nil, nil, secv1alpha1.RuleActionDrop, testIngressRuleName) } else { // apply reject and drop egress rule to source pod builder1 = builder1.SetName(testNamespace, egressRejectANPName). SetPriority(2.0). SetAppliedToGroup([]utils.ANPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": srcPod}}}) - builder1 = builder1.AddEgress(corev1.ProtocolTCP, nil, nil, nil, nil, map[string]string{"antrea-e2e": podReject}, map[string]string{}, + builder1 = builder1.AddEgress(utils.ProtocolTCP, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": podReject}, map[string]string{}, nil, nil, nil, secv1alpha1.RuleActionReject, testEgressRuleName) builder2 = builder2.SetName(testNamespace, egressDropANPName). SetPriority(2.0). SetAppliedToGroup([]utils.ANPAppliedToSpec{{PodSelector: map[string]string{"antrea-e2e": srcPod}}}) - builder2 = builder2.AddEgress(corev1.ProtocolTCP, nil, nil, nil, nil, map[string]string{"antrea-e2e": podDrop}, map[string]string{}, + builder2 = builder2.AddEgress(utils.ProtocolTCP, nil, nil, nil, nil, nil, nil, map[string]string{"antrea-e2e": podDrop}, map[string]string{}, nil, nil, nil, secv1alpha1.RuleActionDrop, testEgressRuleName) } anp1 = builder1.Get() diff --git a/test/e2e/framework.go b/test/e2e/framework.go index 172f0b84edc..b525ddc0288 100644 --- a/test/e2e/framework.go +++ b/test/e2e/framework.go @@ -1085,9 +1085,9 @@ func (data *TestData) createMcJoinPodOnNode(name string, ns string, nodeName str // createNetshootPodOnNode creates a Pod in the test namespace with a single netshoot container. The // Pod will be scheduled on the specified Node (if nodeName is not empty). -func (data *TestData) createNetshootPodOnNode(name string, ns string, nodeName string, _ bool) error { +func (data *TestData) createNetshootPodOnNode(name string, ns string, nodeName string, hostNetwork bool) error { sleepDuration := 3600 // seconds - return data.createPodOnNode(name, ns, nodeName, netshootImage, []string{"sleep", strconv.Itoa(sleepDuration)}, nil, nil, nil, true, nil) + return data.createPodOnNode(name, ns, nodeName, netshootImage, []string{"sleep", strconv.Itoa(sleepDuration)}, nil, nil, nil, hostNetwork, nil) } // createNginxPodOnNode creates a Pod in the test namespace with a single nginx container. The diff --git a/test/e2e/k8s_util.go b/test/e2e/k8s_util.go index 99afdcd60d6..bcf581c7b66 100644 --- a/test/e2e/k8s_util.go +++ b/test/e2e/k8s_util.go @@ -32,6 +32,7 @@ import ( crdv1alpha1 "antrea.io/antrea/pkg/apis/crd/v1alpha1" crdv1alpha2 "antrea.io/antrea/pkg/apis/crd/v1alpha2" crdv1alpha3 "antrea.io/antrea/pkg/apis/crd/v1alpha3" + "antrea.io/antrea/test/e2e/utils" ) type KubernetesUtils struct { @@ -60,7 +61,7 @@ type TestStep struct { Reachability *Reachability TestResources []metav1.Object Ports []int32 - Protocol v1.Protocol + Protocol utils.AntreaPolicyProtocol Duration time.Duration CustomProbes []*CustomProbe } @@ -120,12 +121,12 @@ func (k *KubernetesUtils) probe( dstAddr string, dstName string, port int32, - protocol v1.Protocol, + protocol utils.AntreaPolicyProtocol, ) PodConnectivityMark { - protocolStr := map[v1.Protocol]string{ - v1.ProtocolTCP: "tcp", - v1.ProtocolUDP: "udp", - v1.ProtocolSCTP: "sctp", + protocolStr := map[utils.AntreaPolicyProtocol]string{ + utils.ProtocolTCP: "tcp", + utils.ProtocolUDP: "udp", + utils.ProtocolSCTP: "sctp", } // We try to connect 3 times. This dates back to when we were using the OVS netdev datapath // for Kind clusters, as the first packet sent on a tunnel was always dropped @@ -178,11 +179,77 @@ func DecideProbeResult(stderr string, probeNum int) PodConnectivityMark { return Error } +func (k *KubernetesUtils) pingProbe( + pod *v1.Pod, + podName string, + containerName string, + dstAddr string, + dstName string, +) PodConnectivityMark { + pingCmd := fmt.Sprintf("ping -4 -c 3 -W 1 %s", dstAddr) + if strings.Contains(dstAddr, ":") { + pingCmd = fmt.Sprintf("ping -6 -c 3 -W 1 %s", dstAddr) + } + cmd := []string{ + "/bin/sh", + "-c", + pingCmd, + } + log.Tracef("Running: kubectl exec %s -c %s -n %s -- %s", pod.Name, containerName, pod.Namespace, strings.Join(cmd, " ")) + stdout, stderr, err := k.RunCommandFromPod(pod.Namespace, pod.Name, containerName, cmd) + log.Tracef("%s -> %s: error when running command: err - %v /// stdout - %s /// stderr - %s", podName, dstName, err, stdout, stderr) + return DecidePingProbeResult(stdout, 3) +} + +// DecidePingProbeResult uses the pingProbe stdout to decide the connectivity. +func DecidePingProbeResult(stdout string, probeNum int) PodConnectivityMark { + // Provide stdout example for different connectivity: + // ================== Connected stdout ================== + // PING 10.10.1.2 (10.10.1.2) 56(84) bytes of data. + // 64 bytes from 10.10.1.2: icmp_seq=1 ttl=64 time=0.695 ms + // 64 bytes from 10.10.1.2: icmp_seq=2 ttl=64 time=0.250 ms + // 64 bytes from 10.10.1.2: icmp_seq=3 ttl=64 time=0.058 ms + // + // --- 10.10.1.2 ping statistics --- + // 3 packets transmitted, 3 received, 0% packet loss, time 2043ms + // rtt min/avg/max/mdev = 0.058/0.334/0.695/0.266 ms + // ====================================================== + // =================== Dropped stdout =================== + // PING 10.10.1.2 (10.10.1.2) 56(84) bytes of data. + // + // --- 10.10.1.2 ping statistics --- + // 3 packets transmitted, 0 received, 100% packet loss, time 2037ms + // ======================================================= + // =================== Rejected stdout =================== + // PING 10.10.1.2 (10.10.1.2) 56(84) bytes of data. + // From 10.10.1.2 icmp_seq=1 Destination Host Prohibited + // From 10.10.1.2 icmp_seq=2 Destination Host Prohibited + // From 10.10.1.2 icmp_seq=3 Destination Host Prohibited + // + // --- 10.10.1.2 ping statistics --- + // 3 packets transmitted, 0 received, +3 errors, 100% packet loss, time 2042ms + // ======================================================= + countConnected := strings.Count(stdout, "bytes from") + countRejected := strings.Count(stdout, "Prohibited") + countDropped := probeNum - strings.Count(stdout, "icmp_seq") + + if countRejected == 0 && countConnected > 0 { + return Connected + } + if countConnected == 0 && countRejected > 0 { + return Rejected + } + if countDropped == probeNum { + return Dropped + } + return Error +} + // Probe execs into a Pod and checks its connectivity to another Pod. Of course it // assumes that the target Pod is serving on the input port, and also that agnhost // is installed. The connectivity from source Pod to all IPs of the target Pod // should be consistent. Otherwise, Error PodConnectivityMark will be returned. -func (k *KubernetesUtils) Probe(ns1, pod1, ns2, pod2 string, port int32, protocol v1.Protocol) (PodConnectivityMark, error) { +func (k *KubernetesUtils) Probe(ns1, pod1, ns2, pod2 string, port int32, protocol utils.AntreaPolicyProtocol) (PodConnectivityMark, error) { fromPods, err := k.GetPodsByLabel(ns1, "pod", pod1) if err != nil { return Error, fmt.Errorf("unable to get Pods from Namespace %s: %v", ns1, err) @@ -223,7 +290,7 @@ func (k *KubernetesUtils) Probe(ns1, pod1, ns2, pod2 string, port int32, protoco // ProbeAddr execs into a Pod and checks its connectivity to an arbitrary destination // address. -func (k *KubernetesUtils) ProbeAddr(ns, podLabelKey, podLabelValue, dstAddr string, port int32, protocol v1.Protocol) (PodConnectivityMark, error) { +func (k *KubernetesUtils) ProbeAddr(ns, podLabelKey, podLabelValue, dstAddr string, port int32, protocol utils.AntreaPolicyProtocol) (PodConnectivityMark, error) { fromPods, err := k.GetPodsByLabel(ns, podLabelKey, podLabelValue) if err != nil { return Error, fmt.Errorf("unable to get Pods from Namespace %s: %v", ns, err) @@ -233,11 +300,16 @@ func (k *KubernetesUtils) ProbeAddr(ns, podLabelKey, podLabelValue, dstAddr stri } fromPod := fromPods[0] containerName := fromPod.Spec.Containers[0].Name - // If it's an IPv6 address, add "[]" around it. - if strings.Contains(dstAddr, ":") { - dstAddr = fmt.Sprintf("[%s]", dstAddr) + var connectivity PodConnectivityMark + if protocol == utils.ProtocolICMP { + connectivity = k.pingProbe(&fromPod, fmt.Sprintf("%s/%s", ns, podLabelValue), containerName, dstAddr, dstAddr) + } else { + // If it's an IPv6 address, add "[]" around it. + if strings.Contains(dstAddr, ":") { + dstAddr = fmt.Sprintf("[%s]", dstAddr) + } + connectivity = k.probe(&fromPod, fmt.Sprintf("%s/%s", ns, podLabelValue), containerName, dstAddr, dstAddr, port, protocol) } - connectivity := k.probe(&fromPod, fmt.Sprintf("%s/%s", ns, podLabelValue), containerName, dstAddr, dstAddr, port, protocol) return connectivity, nil } @@ -807,17 +879,17 @@ func (k *KubernetesUtils) waitForHTTPServers(allPods []Pod) error { serversAreReady := func() bool { reachability := NewReachability(allPods, Connected) - k.Validate(allPods, reachability, []int32{80, 81, 8080, 8081, 8082, 8083, 8084, 8085}, v1.ProtocolTCP) + k.Validate(allPods, reachability, []int32{80, 81, 8080, 8081, 8082, 8083, 8084, 8085}, utils.ProtocolTCP) if _, wrong, _ := reachability.Summary(); wrong != 0 { return false } - k.Validate(allPods, reachability, []int32{80, 81}, v1.ProtocolUDP) + k.Validate(allPods, reachability, []int32{80, 81}, utils.ProtocolUDP) if _, wrong, _ := reachability.Summary(); wrong != 0 { return false } - k.Validate(allPods, reachability, []int32{80, 81}, v1.ProtocolSCTP) + k.Validate(allPods, reachability, []int32{80, 81}, utils.ProtocolSCTP) if _, wrong, _ := reachability.Summary(); wrong != 0 { return false } @@ -834,7 +906,7 @@ func (k *KubernetesUtils) waitForHTTPServers(allPods []Pod) error { return errors.Errorf("after %d tries, HTTP servers are not ready", maxTries) } -func (k *KubernetesUtils) validateOnePort(allPods []Pod, reachability *Reachability, port int32, protocol v1.Protocol) { +func (k *KubernetesUtils) validateOnePort(allPods []Pod, reachability *Reachability, port int32, protocol utils.AntreaPolicyProtocol) { type probeResult struct { podFrom Pod podTo Pod @@ -884,7 +956,7 @@ func (k *KubernetesUtils) validateOnePort(allPods []Pod, reachability *Reachabil // list of ports and a protocol. The connectivity from a Pod to another Pod should // be consistent across all provided ports. Otherwise, this connectivity will be // treated as Error. -func (k *KubernetesUtils) Validate(allPods []Pod, reachability *Reachability, ports []int32, protocol v1.Protocol) { +func (k *KubernetesUtils) Validate(allPods []Pod, reachability *Reachability, ports []int32, protocol utils.AntreaPolicyProtocol) { for _, port := range ports { // we do not run all the probes in parallel as we have experienced that on some // machines, this can cause a fraction of the probes to always fail, despite the diff --git a/test/e2e/utils/anpspecbuilder.go b/test/e2e/utils/anpspecbuilder.go index af6279237ea..e9ec42c578c 100644 --- a/test/e2e/utils/anpspecbuilder.go +++ b/test/e2e/utils/anpspecbuilder.go @@ -15,11 +15,8 @@ package utils import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - crdv1alpha1 "antrea.io/antrea/pkg/apis/crd/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type AntreaNetworkPolicySpecBuilder struct { @@ -87,8 +84,8 @@ func (b *AntreaNetworkPolicySpecBuilder) GetAppliedToPeer(podSelector map[string } } -func (b *AntreaNetworkPolicySpecBuilder) AddIngress(protoc v1.Protocol, - port *int32, portName *string, endPort *int32, cidr *string, +func (b *AntreaNetworkPolicySpecBuilder) AddIngress(protoc AntreaPolicyProtocol, + port *int32, portName *string, endPort, icmpType, icmpCode *int32, cidr *string, podSelector map[string]string, nsSelector map[string]string, podSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, ruleAppliedToSpecs []ANPAppliedToSpec, action crdv1alpha1.RuleAction, name string) *AntreaNetworkPolicySpecBuilder { @@ -129,36 +126,11 @@ func (b *AntreaNetworkPolicySpecBuilder) AddIngress(protoc v1.Protocol, IPBlock: ipBlock, }} } - - var ports []crdv1alpha1.NetworkPolicyPort - if port != nil && portName != nil { - panic("specify portname or port, not both") - } - if portName != nil { - ports = []crdv1alpha1.NetworkPolicyPort{ - { - Port: &intstr.IntOrString{Type: intstr.String, StrVal: *portName}, - Protocol: &protoc, - }, - } - } - if port != nil || endPort != nil { - var pVal *intstr.IntOrString - if port != nil { - pVal = &intstr.IntOrString{IntVal: *port} - } - ports = []crdv1alpha1.NetworkPolicyPort{ - { - Port: pVal, - EndPort: endPort, - Protocol: &protoc, - }, - } - } - + ports, protocols := GenPortsOrProtocols(protoc, port, portName, endPort, icmpType, icmpCode) newRule := crdv1alpha1.Rule{ From: policyPeer, Ports: ports, + Protocols: protocols, Action: &action, Name: name, AppliedTo: appliedTos, @@ -167,8 +139,8 @@ func (b *AntreaNetworkPolicySpecBuilder) AddIngress(protoc v1.Protocol, return b } -func (b *AntreaNetworkPolicySpecBuilder) AddEgress(protoc v1.Protocol, - port *int32, portName *string, endPort *int32, cidr *string, +func (b *AntreaNetworkPolicySpecBuilder) AddEgress(protoc AntreaPolicyProtocol, + port *int32, portName *string, endPort, icmpType, icmpCode *int32, cidr *string, podSelector map[string]string, nsSelector map[string]string, podSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, ruleAppliedToSpecs []ANPAppliedToSpec, action crdv1alpha1.RuleAction, name string) *AntreaNetworkPolicySpecBuilder { @@ -176,7 +148,7 @@ func (b *AntreaNetworkPolicySpecBuilder) AddEgress(protoc v1.Protocol, // For simplicity, we just reuse the Ingress code here. The underlying data model for ingress/egress is identical // With the exception of calling the rule `To` vs. `From`. c := &AntreaNetworkPolicySpecBuilder{} - c.AddIngress(protoc, port, portName, endPort, cidr, podSelector, nsSelector, + c.AddIngress(protoc, port, portName, endPort, icmpType, icmpCode, cidr, podSelector, nsSelector, podSelectorMatchExp, nsSelectorMatchExp, ruleAppliedToSpecs, action, name) theRule := c.Get().Spec.Ingress[0] diff --git a/test/e2e/utils/cnpspecbuilder.go b/test/e2e/utils/cnpspecbuilder.go index 0a1d89ff69a..76f068b5acb 100644 --- a/test/e2e/utils/cnpspecbuilder.go +++ b/test/e2e/utils/cnpspecbuilder.go @@ -15,7 +15,6 @@ package utils import ( - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -104,8 +103,8 @@ func (b *ClusterNetworkPolicySpecBuilder) GetAppliedToPeer(podSelector map[strin return peer } -func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc v1.Protocol, - port *int32, portName *string, endPort *int32, cidr *string, +func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc AntreaPolicyProtocol, + port *int32, portName *string, endPort, icmpType, icmpCode *int32, cidr *string, podSelector map[string]string, nsSelector map[string]string, podSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool, ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1alpha1.RuleAction, ruleClusterGroup, name string, serviceAccount *crdv1alpha1.NamespacedName) *ClusterNetworkPolicySpecBuilder { @@ -158,35 +157,11 @@ func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc v1.Protocol, ServiceAccount: serviceAccount, }} } - - var ports []crdv1alpha1.NetworkPolicyPort - if port != nil && portName != nil { - panic("specify portname or port, not both") - } - if portName != nil { - ports = []crdv1alpha1.NetworkPolicyPort{ - { - Port: &intstr.IntOrString{Type: intstr.String, StrVal: *portName}, - Protocol: &protoc, - }, - } - } - if port != nil || endPort != nil { - var pVal *intstr.IntOrString - if port != nil { - pVal = &intstr.IntOrString{IntVal: *port} - } - ports = []crdv1alpha1.NetworkPolicyPort{ - { - Port: pVal, - EndPort: endPort, - Protocol: &protoc, - }, - } - } + ports, protocols := GenPortsOrProtocols(protoc, port, portName, endPort, icmpType, icmpCode) newRule := crdv1alpha1.Rule{ From: policyPeer, Ports: ports, + Protocols: protocols, Action: &action, Name: name, AppliedTo: appliedTos, @@ -195,8 +170,8 @@ func (b *ClusterNetworkPolicySpecBuilder) AddIngress(protoc v1.Protocol, return b } -func (b *ClusterNetworkPolicySpecBuilder) AddEgress(protoc v1.Protocol, - port *int32, portName *string, endPort *int32, cidr *string, +func (b *ClusterNetworkPolicySpecBuilder) AddEgress(protoc AntreaPolicyProtocol, + port *int32, portName *string, endPort, icmpType, icmpCode *int32, cidr *string, podSelector map[string]string, nsSelector map[string]string, podSelectorMatchExp []metav1.LabelSelectorRequirement, nsSelectorMatchExp []metav1.LabelSelectorRequirement, selfNS bool, ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1alpha1.RuleAction, ruleClusterGroup, name string, serviceAccount *crdv1alpha1.NamespacedName) *ClusterNetworkPolicySpecBuilder { @@ -204,7 +179,7 @@ func (b *ClusterNetworkPolicySpecBuilder) AddEgress(protoc v1.Protocol, // For simplicity, we just reuse the Ingress code here. The underlying data model for ingress/egress is identical // With the exception of calling the rule `To` vs. `From`. c := &ClusterNetworkPolicySpecBuilder{} - c.AddIngress(protoc, port, portName, endPort, cidr, podSelector, nsSelector, + c.AddIngress(protoc, port, portName, endPort, icmpType, icmpCode, cidr, podSelector, nsSelector, podSelectorMatchExp, nsSelectorMatchExp, selfNS, ruleAppliedToSpecs, action, ruleClusterGroup, name, serviceAccount) theRule := c.Get().Spec.Ingress[0] @@ -218,17 +193,17 @@ func (b *ClusterNetworkPolicySpecBuilder) AddEgress(protoc v1.Protocol, return b } -func (b *ClusterNetworkPolicySpecBuilder) AddNodeSelectorRule(nodeSelector *metav1.LabelSelector, protoc v1.Protocol, port *int32, name string, +func (b *ClusterNetworkPolicySpecBuilder) AddNodeSelectorRule(nodeSelector *metav1.LabelSelector, protoc AntreaPolicyProtocol, port *int32, name string, ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1alpha1.RuleAction, isEgress bool) *ClusterNetworkPolicySpecBuilder { var appliedTos []crdv1alpha1.NetworkPolicyPeer for _, at := range ruleAppliedToSpecs { appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, at.NSSelector, at.PodSelectorMatchExp, at.NSSelectorMatchExp, at.Group)) } policyPeer := []crdv1alpha1.NetworkPolicyPeer{{NodeSelector: nodeSelector}} - + k8sProtocol, _ := AntreaPolicyProtocolToK8sProtocol(protoc) newRule := crdv1alpha1.Rule{ Ports: []crdv1alpha1.NetworkPolicyPort{ - {Protocol: &protoc, Port: &intstr.IntOrString{IntVal: *port}}, + {Protocol: &k8sProtocol, Port: &intstr.IntOrString{IntVal: *port}}, }, Action: &action, Name: name, @@ -245,35 +220,14 @@ func (b *ClusterNetworkPolicySpecBuilder) AddNodeSelectorRule(nodeSelector *meta } func (b *ClusterNetworkPolicySpecBuilder) AddFQDNRule(fqdn string, - protoc v1.Protocol, port *int32, portName *string, endPort *int32, name string, + protoc AntreaPolicyProtocol, port *int32, portName *string, endPort *int32, name string, ruleAppliedToSpecs []ACNPAppliedToSpec, action crdv1alpha1.RuleAction) *ClusterNetworkPolicySpecBuilder { var appliedTos []crdv1alpha1.NetworkPolicyPeer for _, at := range ruleAppliedToSpecs { appliedTos = append(appliedTos, b.GetAppliedToPeer(at.PodSelector, at.NSSelector, at.PodSelectorMatchExp, at.NSSelectorMatchExp, at.Group)) } policyPeer := []crdv1alpha1.NetworkPolicyPeer{{FQDN: fqdn}} - var ports []crdv1alpha1.NetworkPolicyPort - if portName != nil { - ports = []crdv1alpha1.NetworkPolicyPort{ - { - Port: &intstr.IntOrString{Type: intstr.String, StrVal: *portName}, - Protocol: &protoc, - }, - } - } - if port != nil || endPort != nil { - var pVal *intstr.IntOrString - if port != nil { - pVal = &intstr.IntOrString{IntVal: *port} - } - ports = []crdv1alpha1.NetworkPolicyPort{ - { - Port: pVal, - EndPort: endPort, - Protocol: &protoc, - }, - } - } + ports, _ := GenPortsOrProtocols(protoc, port, portName, endPort, nil, nil) newRule := crdv1alpha1.Rule{ To: policyPeer, Ports: ports, @@ -304,7 +258,7 @@ func (b *ClusterNetworkPolicySpecBuilder) AddToServicesRule(svcRefs []crdv1alpha // AddEgressDNS mutates the nth policy rule to allow DNS, convenience method func (b *ClusterNetworkPolicySpecBuilder) WithEgressDNS() *ClusterNetworkPolicySpecBuilder { - protocolUDP := v1.ProtocolUDP + protocolUDP, _ := AntreaPolicyProtocolToK8sProtocol(ProtocolUDP) route53 := crdv1alpha1.NetworkPolicyPort{ Protocol: &protocolUDP, Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53}, diff --git a/test/e2e/utils/helper.go b/test/e2e/utils/helper.go new file mode 100644 index 00000000000..48694ebde8b --- /dev/null +++ b/test/e2e/utils/helper.go @@ -0,0 +1,86 @@ +// Copyright 2022 Antrea Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + crdv1alpha1 "antrea.io/antrea/pkg/apis/crd/v1alpha1" +) + +type AntreaPolicyProtocol string + +const ( + ProtocolTCP AntreaPolicyProtocol = "TCP" + ProtocolUDP AntreaPolicyProtocol = "UDP" + ProtocolSCTP AntreaPolicyProtocol = "SCTP" + ProtocolICMP AntreaPolicyProtocol = "ICMP" +) + +func AntreaPolicyProtocolToK8sProtocol(antreaProtocol AntreaPolicyProtocol) (v1.Protocol, error) { + switch antreaProtocol { + case ProtocolTCP: + return v1.ProtocolTCP, nil + case ProtocolUDP: + return v1.ProtocolUDP, nil + case ProtocolSCTP: + return v1.ProtocolSCTP, nil + default: + return "", fmt.Errorf("k8s doesn't support protocol %s", antreaProtocol) + } +} + +func GenPortsOrProtocols(protoc AntreaPolicyProtocol, port *int32, portName *string, endPort, icmpType, icmpCode *int32) ([]crdv1alpha1.NetworkPolicyPort, []crdv1alpha1.NetworkPolicyProtocol) { + if protoc == ProtocolICMP { + return nil, []crdv1alpha1.NetworkPolicyProtocol{ + { + ICMP: &crdv1alpha1.ICMPProtocol{ + ICMPType: icmpType, + ICMPCode: icmpCode, + }, + }, + } + } + var ports []crdv1alpha1.NetworkPolicyPort + k8sProtocol, _ := AntreaPolicyProtocolToK8sProtocol(protoc) + if port != nil && portName != nil { + panic("specify portname or port, not both") + } + if portName != nil { + ports = []crdv1alpha1.NetworkPolicyPort{ + { + Port: &intstr.IntOrString{Type: intstr.String, StrVal: *portName}, + Protocol: &k8sProtocol, + }, + } + } + if port != nil || endPort != nil { + var pVal *intstr.IntOrString + if port != nil { + pVal = &intstr.IntOrString{IntVal: *port} + } + ports = []crdv1alpha1.NetworkPolicyPort{ + { + Port: pVal, + EndPort: endPort, + Protocol: &k8sProtocol, + }, + } + } + return ports, nil +}