diff --git a/cmd/antrea-agent/agent.go b/cmd/antrea-agent/agent.go index 481dc814317..6f32d8b7e5c 100644 --- a/cmd/antrea-agent/agent.go +++ b/cmd/antrea-agent/agent.go @@ -100,10 +100,16 @@ func run(o *Options) error { } defer ovsdbConnection.Close() + // Create an ifaceStore that caches network interfaces managed by this node. + ifaceStore := interfacestore.NewInterfaceStore() + ovsDatapathType := ovsconfig.OVSDatapathType(o.config.OVSDatapathType) ovsBridgeClient := ovsconfig.NewOVSBridge(o.config.OVSBridge, ovsDatapathType, ovsdbConnection) ovsBridgeMgmtAddr := ofconfig.GetMgmtAddress(o.config.OVSRunDir, o.config.OVSBridge) - ofClient := openflow.NewClient(o.config.OVSBridge, ovsBridgeMgmtAddr, ovsDatapathType, + ofClient := openflow.NewClient(o.config.OVSBridge, + ovsBridgeMgmtAddr, + ovsDatapathType, + ifaceStore, features.DefaultFeatureGate.Enabled(features.AntreaProxy), features.DefaultFeatureGate.Enabled(features.AntreaPolicy), features.DefaultFeatureGate.Enabled(features.Egress), @@ -139,9 +145,6 @@ func run(o *Options) error { return fmt.Errorf("error creating route client: %v", err) } - // Create an ifaceStore that caches network interfaces managed by this node. - ifaceStore := interfacestore.NewInterfaceStore() - // networkReadyCh is used to notify that the Node's network is ready. // Functions that rely on the Node's network should wait for the channel to close. networkReadyCh := make(chan struct{}) diff --git a/pkg/agent/openflow/client.go b/pkg/agent/openflow/client.go index 207b73ea873..1441cb3e81f 100644 --- a/pkg/agent/openflow/client.go +++ b/pkg/agent/openflow/client.go @@ -467,11 +467,15 @@ func (c *client) UninstallNodeFlows(hostname string) error { func (c *client) InstallPodFlows(interfaceName string, podInterfaceIPs []net.IP, podInterfaceMAC net.HardwareAddr, ofPort uint32) error { c.replayMutex.RLock() defer c.replayMutex.RUnlock() - localGatewayMAC := c.nodeConfig.GatewayConfig.MAC + skipHairpin := false + if !c.enableProxy { + // If AntreaProxy is disabled, flows on table l2ForwardingCalcTable should skip table hairpinMarkTable. + skipHairpin = true + } flows := []binding.Flow{ c.podClassifierFlow(ofPort, cookie.Pod), - c.l2ForwardCalcFlow(podInterfaceMAC, ofPort, false, cookie.Pod), + c.l2ForwardCalcFlow(podInterfaceMAC, ofPort, skipHairpin, cookie.Pod), } // Add support for IPv4 ARP responder. @@ -566,7 +570,11 @@ func (c *client) InstallEndpointFlows(protocol binding.Protocol, endpoints []pro cacheKey := generateEndpointFlowCacheKey(endpoint.IP(), endpointPort, protocol) flows = append(flows, c.endpointDNATFlow(endpointIP, portVal, protocol)) if endpoint.GetIsLocal() { - flows = append(flows, c.hairpinSNATFlow(endpointIP)) + ofPort, ok := c.ifaceStore.GetInterfaceByIP(endpointIP.String()) + if !ok { + return fmt.Errorf("there is no OVS port with such IP %s", endpointIP.String()) + } + flows = append(flows, c.hairpinMarkFlow(getIPProtocol(endpointIP), uint32(ofPort.OFPort), false)) } keyToFlows[cacheKey] = flows } @@ -623,26 +631,18 @@ func (c *client) InstallDefaultServiceFlows(nodePortAddressesIPv4, nodePortAddre c.l2ForwardOutputServiceHairpinFlow(), } if c.IsIPv4Enabled() { - flows = append(flows, c.serviceHairpinResponseDNATFlow(binding.ProtocolIP)) flows = append(flows, c.serviceLBBypassFlows(binding.ProtocolIP)...) flows = append(flows, c.l3FwdServiceDefaultFlowsViaGW(binding.ProtocolIP, cookie.Service)...) if c.proxyAll { - // The output interface of a packet is the same as where it is from, and the action of the packet should be - // IN_PORT, rather than output. When a packet of Service is from Antrea gateway and its Endpoint is on host - // network, it needs hairpin mark (by setting a register, it will be matched at table L2ForwardingOutTable). - flows = append(flows, c.serviceHairpinRegSetFlows(binding.ProtocolIP)) // These flows are used to match the first packet of NodePort. The flows will set a bit of a register to mark // the Service type of the packet as NodePort. The mark will be consumed in table serviceLBTable to match NodePort flows = append(flows, c.serviceClassifierFlows(nodePortAddressesIPv4, binding.ProtocolIP)...) } } if c.IsIPv6Enabled() { - flows = append(flows, c.serviceHairpinResponseDNATFlow(binding.ProtocolIPv6)) flows = append(flows, c.serviceLBBypassFlows(binding.ProtocolIPv6)...) flows = append(flows, c.l3FwdServiceDefaultFlowsViaGW(binding.ProtocolIPv6, cookie.Service)...) if c.proxyAll { - // As IPv4 above. - flows = append(flows, c.serviceHairpinRegSetFlows(binding.ProtocolIPv6)) // As IPv4 above. flows = append(flows, c.serviceClassifierFlows(nodePortAddressesIPv6, binding.ProtocolIPv6)...) } @@ -666,10 +666,14 @@ func (c *client) InstallClusterServiceCIDRFlows(serviceNets []*net.IPNet) error func (c *client) InstallGatewayFlows() error { gatewayConfig := c.nodeConfig.GatewayConfig gatewayIPs := []net.IP{} - + skipHairpin := false + if !c.enableProxy { + // If AntreaProxy is disabled, flows on table l2ForwardingCalcTable should skip table hairpinMarkTable. + skipHairpin = true + } flows := []binding.Flow{ c.gatewayClassifierFlow(cookie.Default), - c.l2ForwardCalcFlow(gatewayConfig.MAC, config.HostGatewayOFPort, true, cookie.Default), + c.l2ForwardCalcFlow(gatewayConfig.MAC, config.HostGatewayOFPort, skipHairpin, cookie.Default), } flows = append(flows, c.gatewayIPSpoofGuardFlows(cookie.Default)...) @@ -677,9 +681,15 @@ func (c *client) InstallGatewayFlows() error { if gatewayConfig.IPv4 != nil { gatewayIPs = append(gatewayIPs, gatewayConfig.IPv4) flows = append(flows, c.gatewayARPSpoofGuardFlow(gatewayConfig.IPv4, gatewayConfig.MAC, cookie.Default)) + if c.enableProxy { + flows = append(flows, c.hairpinMarkFlow(binding.ProtocolIP, uint32(config.HostGatewayOFPort), true)) + } } if gatewayConfig.IPv6 != nil { gatewayIPs = append(gatewayIPs, gatewayConfig.IPv6) + if c.enableProxy { + flows = append(flows, c.hairpinMarkFlow(binding.ProtocolIPv6, uint32(config.HostGatewayOFPort), true)) + } } // Add flow to ensure the liveness check packet could be forwarded correctly. diff --git a/pkg/agent/openflow/client_test.go b/pkg/agent/openflow/client_test.go index 5a3cecce579..92d238ca6b1 100644 --- a/pkg/agent/openflow/client_test.go +++ b/pkg/agent/openflow/client_test.go @@ -103,7 +103,7 @@ func TestIdempotentFlowInstallation(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() m := oftest.NewMockOFEntryOperations(ctrl) - ofClient := NewClient(bridgeName, bridgeMgmtAddr, ovsconfig.OVSDatapathSystem, true, false, false, false, false) + ofClient := NewClient(bridgeName, bridgeMgmtAddr, ovsconfig.OVSDatapathSystem, nil, true, false, false, false, false) client := ofClient.(*client) client.cookieAllocator = cookie.NewAllocator(0) client.ofEntryOperations = m @@ -132,7 +132,7 @@ func TestIdempotentFlowInstallation(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() m := oftest.NewMockOFEntryOperations(ctrl) - ofClient := NewClient(bridgeName, bridgeMgmtAddr, ovsconfig.OVSDatapathSystem, true, false, false, false, false) + ofClient := NewClient(bridgeName, bridgeMgmtAddr, ovsconfig.OVSDatapathSystem, nil, true, false, false, false, false) client := ofClient.(*client) client.cookieAllocator = cookie.NewAllocator(0) client.ofEntryOperations = m @@ -174,7 +174,7 @@ func TestFlowInstallationFailed(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() m := oftest.NewMockOFEntryOperations(ctrl) - ofClient := NewClient(bridgeName, bridgeMgmtAddr, ovsconfig.OVSDatapathSystem, true, false, false, false, false) + ofClient := NewClient(bridgeName, bridgeMgmtAddr, ovsconfig.OVSDatapathSystem, nil, true, false, false, false, false) client := ofClient.(*client) client.cookieAllocator = cookie.NewAllocator(0) client.ofEntryOperations = m @@ -209,7 +209,7 @@ func TestConcurrentFlowInstallation(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() m := oftest.NewMockOFEntryOperations(ctrl) - ofClient := NewClient(bridgeName, bridgeMgmtAddr, ovsconfig.OVSDatapathSystem, true, false, false, false, false) + ofClient := NewClient(bridgeName, bridgeMgmtAddr, ovsconfig.OVSDatapathSystem, nil, true, false, false, false, false) client := ofClient.(*client) client.cookieAllocator = cookie.NewAllocator(0) client.ofEntryOperations = m @@ -400,7 +400,7 @@ func Test_client_SendTraceflowPacket(t *testing.T) { } func prepareTraceflowFlow(ctrl *gomock.Controller) *client { - ofClient := NewClient(bridgeName, bridgeMgmtAddr, ovsconfig.OVSDatapathSystem, true, true, false, false, false) + ofClient := NewClient(bridgeName, bridgeMgmtAddr, ovsconfig.OVSDatapathSystem, nil, true, true, false, false, false) c := ofClient.(*client) c.cookieAllocator = cookie.NewAllocator(0) c.nodeConfig = nodeConfig @@ -418,7 +418,7 @@ func prepareTraceflowFlow(ctrl *gomock.Controller) *client { } func prepareSendTraceflowPacket(ctrl *gomock.Controller, success bool) *client { - ofClient := NewClient(bridgeName, bridgeMgmtAddr, ovsconfig.OVSDatapathSystem, true, true, false, false, false) + ofClient := NewClient(bridgeName, bridgeMgmtAddr, ovsconfig.OVSDatapathSystem, nil, true, true, false, false, false) c := ofClient.(*client) c.nodeConfig = nodeConfig m := ovsoftest.NewMockBridge(ctrl) @@ -506,7 +506,7 @@ func Test_client_setBasePacketOutBuilder(t *testing.T) { } func prepareSetBasePacketOutBuilder(ctrl *gomock.Controller, success bool) *client { - ofClient := NewClient(bridgeName, bridgeMgmtAddr, ovsconfig.OVSDatapathSystem, true, true, false, false, false) + ofClient := NewClient(bridgeName, bridgeMgmtAddr, ovsconfig.OVSDatapathSystem, nil, true, true, false, false, false) c := ofClient.(*client) m := ovsoftest.NewMockBridge(ctrl) c.bridge = m diff --git a/pkg/agent/openflow/network_policy_test.go b/pkg/agent/openflow/network_policy_test.go index 4ebe9a4d80d..dc2e6dd8f31 100644 --- a/pkg/agent/openflow/network_policy_test.go +++ b/pkg/agent/openflow/network_policy_test.go @@ -507,7 +507,7 @@ func TestBatchInstallPolicyRuleFlows(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() mockOperations := oftest.NewMockOFEntryOperations(ctrl) - ofClient := NewClient(bridgeName, bridgeMgmtAddr, ovsconfig.OVSDatapathSystem, false, true, false, false, false) + ofClient := NewClient(bridgeName, bridgeMgmtAddr, ovsconfig.OVSDatapathSystem, nil, false, true, false, false, false) c = ofClient.(*client) c.cookieAllocator = cookie.NewAllocator(0) c.ofEntryOperations = mockOperations @@ -575,7 +575,8 @@ func BenchmarkBatchInstallPolicyRuleFlows(b *testing.B) { ctrl := gomock.NewController(b) defer ctrl.Finish() mockOperations := oftest.NewMockOFEntryOperations(ctrl) - ofClient := NewClient(bridgeName, bridgeMgmtAddr, ovsconfig.OVSDatapathSystem, false, true, false, false, false) + ofClient := NewClient(bridgeName, bridgeMgmtAddr, ovsconfig.OVSDatapathSystem, nil, + false, true, false, false, false) c = ofClient.(*client) c.cookieAllocator = cookie.NewAllocator(0) c.ofEntryOperations = mockOperations diff --git a/pkg/agent/openflow/pipeline.go b/pkg/agent/openflow/pipeline.go index 8e5141bdb76..6db6ec3daa4 100644 --- a/pkg/agent/openflow/pipeline.go +++ b/pkg/agent/openflow/pipeline.go @@ -31,6 +31,7 @@ import ( "k8s.io/klog/v2" "antrea.io/antrea/pkg/agent/config" + "antrea.io/antrea/pkg/agent/interfacestore" "antrea.io/antrea/pkg/agent/metrics" "antrea.io/antrea/pkg/agent/openflow/cookie" "antrea.io/antrea/pkg/agent/types" @@ -48,8 +49,7 @@ const ( spoofGuardTable binding.TableIDType = 10 arpResponderTable binding.TableIDType = 20 ipv6Table binding.TableIDType = 21 - serviceHairpinTable binding.TableIDType = 23 - serviceConntrackTable binding.TableIDType = 24 // serviceConntrackTable use a new ct_zone to transform SNAT'd connections. + snatConntrackTable binding.TableIDType = 24 // snatConntrackTable use a new ct_zone to transform SNAT'd connections. conntrackTable binding.TableIDType = 30 conntrackStateTable binding.TableIDType = 31 serviceClassifierTable binding.TableIDType = 35 @@ -66,14 +66,14 @@ const ( snatTable binding.TableIDType = 71 l3DecTTLTable binding.TableIDType = 72 l2ForwardingCalcTable binding.TableIDType = 80 + hairpinMarkTable binding.TableIDType = 81 AntreaPolicyIngressRuleTable binding.TableIDType = 85 DefaultTierIngressRuleTable binding.TableIDType = 89 IngressRuleTable binding.TableIDType = 90 IngressDefaultTable binding.TableIDType = 100 IngressMetricTable binding.TableIDType = 101 conntrackCommitTable binding.TableIDType = 105 - serviceConntrackCommitTable binding.TableIDType = 106 - hairpinSNATTable binding.TableIDType = 108 + snatConntrackCommitTable binding.TableIDType = 106 L2ForwardingOutTable binding.TableIDType = 110 // Flow priority level @@ -133,8 +133,7 @@ var ( {spoofGuardTable, "SpoofGuard"}, {arpResponderTable, "ARPResponder"}, {ipv6Table, "IPv6"}, - {serviceHairpinTable, "ServiceHairpin"}, - {serviceConntrackTable, "ServiceConntrack"}, + {snatConntrackTable, "SNATConntrack"}, {conntrackTable, "ConntrackZone"}, {conntrackStateTable, "ConntrackState"}, {serviceClassifierTable, "ServiceClassifier"}, @@ -150,13 +149,13 @@ var ( {snatTable, "SNAT"}, {l3DecTTLTable, "IPTTLDec"}, {l2ForwardingCalcTable, "L2Forwarding"}, + {hairpinMarkTable, "HairpinMark"}, {AntreaPolicyIngressRuleTable, "AntreaPolicyIngressRule"}, {IngressRuleTable, "IngressRule"}, {IngressDefaultTable, "IngressDefaultRule"}, {IngressMetricTable, "IngressMetric"}, {conntrackCommitTable, "ConntrackCommit"}, - {serviceConntrackCommitTable, "ServiceConntrackCommit"}, - {hairpinSNATTable, "HairpinSNATTable"}, + {snatConntrackCommitTable, "SNATConntrackCommit"}, {L2ForwardingOutTable, "Output"}, } ) @@ -269,8 +268,6 @@ var ( snatPktMarkRange = &binding.Range{0, 7} globalVirtualMAC, _ = net.ParseMAC("aa:bb:cc:dd:ee:ff") - hairpinIP = net.ParseIP("169.254.169.252").To4() - hairpinIPv6 = net.ParseIP("fc00::aabb:ccdd:eeff").To16() ) type OFEntryOperations interface { @@ -348,6 +345,7 @@ type client struct { // For example, if a flow has multiple actions, setting it to true can get consistent flow. // Enabling it may carry a performance impact. It's disabled by default and should only be used in testing. deterministic bool + ifaceStore interfacestore.InterfaceStore } func (c *client) GetTunnelVirtualMAC() net.HardwareAddr { @@ -472,8 +470,8 @@ func (c *client) defaultFlows() (flows []binding.Flow) { // tunnelClassifierFlow generates the flow to mark traffic comes from the tunnelOFPort. func (c *client) tunnelClassifierFlow(tunnelOFPort uint32, category cookie.Category) binding.Flow { nextTable := conntrackTable - if c.proxyAll { - nextTable = serviceConntrackTable + if c.enableProxy { + nextTable = snatConntrackTable } return c.pipeline[ClassifierTable].BuildFlow(priorityNormal). MatchInPort(tunnelOFPort). @@ -520,6 +518,8 @@ func (c *client) connectionTrackFlows(category cookie.Category) []binding.Flow { connectionTrackTable := c.pipeline[conntrackTable] connectionTrackStateTable := c.pipeline[conntrackStateTable] connectionTrackCommitTable := c.pipeline[conntrackCommitTable] + snatConnectionTrackTable := c.pipeline[snatConntrackTable] + snatConnectionTrackCommitTable := c.pipeline[snatConntrackCommitTable] flows := c.conntrackBasicFlows(category) if c.enableProxy { // Replace the default flow with multiple resubmits actions. @@ -541,12 +541,12 @@ func (c *client) connectionTrackFlows(category cookie.Category) []binding.Flow { for _, proto := range c.ipProtocols { gatewayIP := c.nodeConfig.GatewayConfig.IPv4 serviceVirtualIP := config.VirtualServiceIPv4 - snatZone := SNATCtZone + snatCtZone := SNATCtZone ctZone := CtZone if proto == binding.ProtocolIPv6 { gatewayIP = c.nodeConfig.GatewayConfig.IPv6 serviceVirtualIP = config.VirtualServiceIPv6 - snatZone = SNATCtZoneV6 + snatCtZone = SNATCtZoneV6 ctZone = CtZoneV6 } flows = append(flows, @@ -555,33 +555,68 @@ func (c *client) connectionTrackFlows(category cookie.Category) []binding.Flow { Action().CT(false, connectionTrackTable.GetNext(), ctZone).NAT().CTDone(). Cookie(c.cookieAllocator.Request(category).Raw()). Done(), - connectionTrackCommitTable.BuildFlow(priorityLow).MatchProtocol(proto). - MatchCTStateTrk(true). + connectionTrackCommitTable.BuildFlow(priorityHigh).MatchProtocol(proto). MatchCTMark(ServiceCTMark). - MatchRegMark(EpSelectedRegMark). + Action().GotoTable(snatConntrackCommitTable). + Cookie(c.cookieAllocator.Request(category).Raw()). + Done(), + // This flow is used to maintain SNAT conntrack for Service traffic. + snatConnectionTrackTable.BuildFlow(priorityNormal).MatchProtocol(proto). + Action().CT(false, snatConnectionTrackTable.GetNext(), snatCtZone).NAT().CTDone(). Cookie(c.cookieAllocator.Request(category).Raw()). - Action().GotoTable(connectionTrackCommitTable.GetNext()). + Done(), + // This flow is used to match the consequent request packets of Service traffic whose first request packet has been committed + // and performed SNAT. For example: + /* * 192.168.77.1 is the IP address of client. + * 192.168.77.100 is the IP address of k8s node. + * 30001 is a NodePort port. + * 10.10.0.1 is the IP address of Antrea gateway. + * 10.10.0.3 is the Endpoint of NodePort Service. + + * pkt 1 (request) + * client 192.168.77.1:12345->192.168.77.100:30001 + * ct zone SNAT 65521 192.168.77.1:12345->192.168.77.100:30001 + * ct zone DNAT 65520 192.168.77.1:12345->192.168.77.100:30001 + * ct commit DNAT zone 65520 192.168.77.1:12345->192.168.77.100:30001 => 192.168.77.1:12345->10.10.0.3:80 + * ct commit SNAT zone 65521 192.168.77.1:12345->10.10.0.3:80 => 10.10.0.1:12345->10.10.0.3:80 + * output + * pkt 2 (response) + * Pod 10.10.0.3:80->10.10.0.1:12345 + * ct zone SNAT 65521 10.10.0.3:80->10.10.0.1:12345 => 10.10.0.3:80->192.168.77.1:12345 + * ct zone DNAT 65520 10.10.0.3:80->192.168.77.1:12345 => 192.168.77.1:30001->192.168.77.1:12345 + * output + * pkt 3 (request) + * client 192.168.77.1:12345->192.168.77.100:30001 + * ct zone SNAT 65521 192.168.77.1:12345->192.168.77.100:30001 + * ct zone DNAT 65520 192.168.77.1:12345->10.10.0.3:80 + * ct zone SNAT 65521 192.168.77.1:12345->10.10.0.3:80 => 10.10.0.1:12345->10.10.0.3:80 + * output + * pkt ... + + The source IP address of pkt 3 cannot be transformed through zone 65521 as there is no connection track about + 192.168.77.1:12345<->192.168.77.100:30001, and the source IP is still 192.168.77.100. + Before output, pkt 3 needs SNAT, but the connection has been committed. The flow is for pkt 3 to perform SNAT. */ + snatConnectionTrackCommitTable.BuildFlow(priorityNormal).MatchProtocol(proto). + Cookie(c.cookieAllocator.Request(category).Raw()). + MatchCTStateNew(false). + MatchCTStateTrk(true). + Action().CT(false, snatConnectionTrackCommitTable.GetNext(), snatCtZone). + NAT(). + CTDone(). + Done(), + snatConnectionTrackCommitTable.BuildFlow(priorityNormal).MatchProtocol(proto). + Cookie(c.cookieAllocator.Request(category).Raw()). + MatchCTStateNew(true). + MatchCTStateTrk(true). + MatchRegMark(HairpinRegMark). + Action().CT(true, L2ForwardingOutTable, snatCtZone). + SNAT(&binding.IPRange{StartIP: gatewayIP, EndIP: gatewayIP}, nil). + CTDone(). Done(), ) if c.proxyAll { - serviceConnectionTrackTable := c.pipeline[serviceConntrackTable] - serviceConnectionTrackCommitTable := c.pipeline[serviceConntrackCommitTable] flows = append(flows, - // This flow is used to match the Service traffic from Antrea gateway. The Service traffic from gateway - // should enter table serviceConntrackCommitTable, otherwise it will be matched by other flows in - // table connectionTrackCommit. - connectionTrackCommitTable.BuildFlow(priorityHigh).MatchProtocol(proto). - MatchCTMark(ServiceCTMark). - MatchRegMark(FromGatewayRegMark). - Action().GotoTable(serviceConntrackCommitTable). - Cookie(c.cookieAllocator.Request(category).Raw()). - Done(), - // This flow is used to maintain SNAT conntrack for Service traffic. - serviceConnectionTrackTable.BuildFlow(priorityNormal).MatchProtocol(proto). - Action().CT(false, serviceConnectionTrackTable.GetNext(), snatZone).NAT().CTDone(). - Cookie(c.cookieAllocator.Request(category).Raw()). - Done(), // This flow is used to match the following cases: // - The first packet of NodePort/LoadBalancer whose Endpoint is not on local Pod CIDR or any remote // Pod CIDRs. Note that, this flow will change the behavior of the packet that NodePort/LoadBalancer @@ -593,68 +628,29 @@ func (c *client) connectionTrackFlows(category cookie.Category) []binding.Flow { // - The first packet of ClusterIP and the Endpoint is not on local Pod CIDR or any remote Pod CIDRs. // As the packet is from Antrea gateway, and it will pass through Antrea gateway, a virtual IP is used // to perform SNAT for the packet, rather than Antrea gateway's IP. - serviceConnectionTrackCommitTable.BuildFlow(priorityHigh).MatchProtocol(proto). + snatConnectionTrackCommitTable.BuildFlow(priorityHigh).MatchProtocol(proto). MatchRegMark(ToGatewayRegMark). + MatchRegMark(HairpinRegMark). Cookie(c.cookieAllocator.Request(category).Raw()). MatchCTStateNew(true). MatchCTStateTrk(true). - Action().CT(true, serviceConnectionTrackCommitTable.GetNext(), snatZone). + Action().CT(true, snatConnectionTrackCommitTable.GetNext(), snatCtZone). SNAT(&binding.IPRange{StartIP: serviceVirtualIP, EndIP: serviceVirtualIP}, nil). CTDone(). Done(), // This flow is used to match the first packet of NodePort/LoadBalancer whose output port is not // Antrea gateway, and externalTrafficPolicy is Cluster. This packet requires SNAT. Antrea gateway // IP is used to perform SNAT for the packet. - serviceConnectionTrackCommitTable.BuildFlow(priorityNormal).MatchProtocol(proto). + snatConnectionTrackCommitTable.BuildFlow(priorityNormal).MatchProtocol(proto). + MatchRegMark(FromGatewayRegMark). MatchRegMark(ServiceNeedSNATRegMark). Cookie(c.cookieAllocator.Request(category).Raw()). MatchCTStateNew(true). MatchCTStateTrk(true). - Action().CT(true, serviceConnectionTrackCommitTable.GetNext(), snatZone). + Action().CT(true, snatConnectionTrackCommitTable.GetNext(), snatCtZone). SNAT(&binding.IPRange{StartIP: gatewayIP, EndIP: gatewayIP}, nil). CTDone(). Done(), - // This flow is used to match the consequent request packets of Service traffic whose first request packet has been committed - // and performed SNAT. For example: - /* - * 192.168.77.1 is the IP address of client. - * 192.168.77.100 is the IP address of k8s node. - * 30001 is a NodePort port. - * 10.10.0.1 is the IP address of Antrea gateway. - * 10.10.0.3 is the Endpoint of NodePort Service. - - * pkt 1 (request) - * client 192.168.77.1:12345->192.168.77.100:30001 - * ct zone SNAT 65521 192.168.77.1:12345->192.168.77.100:30001 - * ct zone DNAT 65520 192.168.77.1:12345->192.168.77.100:30001 - * ct commit DNAT zone 65520 192.168.77.1:12345->192.168.77.100:30001 => 192.168.77.1:12345->10.10.0.3:80 - * ct commit SNAT zone 65521 192.168.77.1:12345->10.10.0.3:80 => 10.10.0.1:12345->10.10.0.3:80 - * output - * pkt 2 (response) - * Pod 10.10.0.3:80->10.10.0.1:12345 - * ct zone SNAT 65521 10.10.0.3:80->10.10.0.1:12345 => 10.10.0.3:80->192.168.77.1:12345 - * ct zone DNAT 65520 10.10.0.3:80->192.168.77.1:12345 => 192.168.77.1:30001->192.168.77.1:12345 - * output - * pkt 3 (request) - * client 192.168.77.1:12345->192.168.77.100:30001 - * ct zone SNAT 65521 192.168.77.1:12345->192.168.77.100:30001 - * ct zone DNAT 65520 192.168.77.1:12345->10.10.0.3:80 - * ct zone SNAT 65521 192.168.77.1:12345->10.10.0.3:80 => 10.10.0.1:12345->10.10.0.3:80 - * output - * pkt ... - - The source IP address of pkt 3 cannot be transformed through zone 65521 as there is no connection track about - 192.168.77.1:12345<->192.168.77.100:30001, and the source IP is still 192.168.77.100. - Before output, pkt 3 needs SNAT, but the connection has been committed. The flow is for pkt 3 to perform SNAT. - */ - serviceConnectionTrackCommitTable.BuildFlow(priorityNormal).MatchProtocol(proto). - Cookie(c.cookieAllocator.Request(category).Raw()). - MatchCTStateNew(false). - MatchCTStateTrk(true). - Action().CT(false, serviceConnectionTrackCommitTable.GetNext(), snatZone). - NAT(). - CTDone(). - Done(), ) } } @@ -700,8 +696,8 @@ func (c *client) conntrackBypassRejectFlow(proto binding.Protocol) binding.Flow // dns response packetout from conntrack, to avoid unexpected packet drop. func (c *client) dnsResponseBypassConntrackFlow() binding.Flow { table := c.pipeline[conntrackTable] - if c.proxyAll { - table = c.pipeline[serviceConntrackTable] + if c.enableProxy { + table = c.pipeline[snatConntrackTable] } return table.BuildFlow(priorityHigh). MatchRegFieldWithValue(CustomReasonField, CustomReasonDNS). @@ -975,12 +971,12 @@ func (c *client) serviceLBBypassFlows(ipProtocol binding.Protocol) []binding.Flo } // l2ForwardCalcFlow generates the flow that matches dst MAC and loads ofPort to reg. -func (c *client) l2ForwardCalcFlow(dstMAC net.HardwareAddr, ofPort uint32, skipIngressRules bool, category cookie.Category) binding.Flow { +func (c *client) l2ForwardCalcFlow(dstMAC net.HardwareAddr, ofPort uint32, skipHairpin bool, category cookie.Category) binding.Flow { l2FwdCalcTable := c.pipeline[l2ForwardingCalcTable] nextTable := l2FwdCalcTable.GetNext() - if !skipIngressRules { - // Go to ingress NetworkPolicy tables for traffic to local Pods. - nextTable = c.ingressEntryTable + if !skipHairpin { + // Go to hairpinMarkTable table for local Pods. + nextTable = hairpinMarkTable } return l2FwdCalcTable.BuildFlow(priorityNormal). MatchDstMAC(dstMAC). @@ -1000,7 +996,7 @@ func (c *client) traceflowL2ForwardOutputFlows(dataplaneTag uint8, liveTraffic, l2FwdOutTable := c.pipeline[L2ForwardingOutTable] for _, ipProtocol := range c.ipProtocols { if c.networkConfig.TrafficEncapMode.SupportsEncap() { - // SendToController and Output if output port is tunnel port. + // SendToController and Output if output port is tunnel port.git fb1 := l2FwdOutTable.BuildFlow(priorityNormal+3). MatchRegFieldWithValue(TargetOFPortField, config.DefaultTunOFPort). MatchIPDSCP(dataplaneTag). @@ -1389,40 +1385,6 @@ func getIPProtocol(ip net.IP) binding.Protocol { return ipProtocol } -// serviceHairpinResponseDNATFlow generates the flow which transforms destination -// IP of the hairpin packet to the source IP. -func (c *client) serviceHairpinResponseDNATFlow(ipProtocol binding.Protocol) binding.Flow { - hpIP := hairpinIP - from := binding.NxmFieldSrcIPv4 - to := binding.NxmFieldDstIPv4 - if ipProtocol == binding.ProtocolIPv6 { - hpIP = hairpinIPv6 - from = binding.NxmFieldSrcIPv6 - to = binding.NxmFieldDstIPv6 - } - hairpinTable := c.pipeline[serviceHairpinTable] - return hairpinTable.BuildFlow(priorityNormal).MatchProtocol(ipProtocol). - MatchDstIP(hpIP). - Action().Move(from, to). - Action().LoadRegMark(HairpinRegMark). - Action().GotoTable(hairpinTable.GetNext()). - Cookie(c.cookieAllocator.Request(cookie.Service).Raw()). - Done() -} - -// serviceHairpinRegSetFlows generates the flow to set the hairpin mark for the packet which is from Antrea gateway and -// its output interface is also Antrea gateway. In table L2ForwardingOutTable #110, a packet with hairpin mark will be -// sent out with action IN_PORT, otherwise the packet with action output will be dropped. -func (c *client) serviceHairpinRegSetFlows(ipProtocol binding.Protocol) binding.Flow { - return c.pipeline[hairpinSNATTable].BuildFlow(priorityNormal).MatchProtocol(ipProtocol). - MatchRegMark(FromGatewayRegMark). - MatchRegMark(ToGatewayRegMark). - Action().LoadRegMark(HairpinRegMark). - Action().GotoTable(L2ForwardingOutTable). - Cookie(c.cookieAllocator.Request(cookie.Service).Raw()). - Done() -} - // gatewayARPSpoofGuardFlow generates the flow to check ARP traffic sent out from the local gateway interface. func (c *client) gatewayARPSpoofGuardFlow(gatewayIP net.IP, gatewayMAC net.HardwareAddr, category cookie.Category) binding.Flow { return c.pipeline[spoofGuardTable].BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolARP). @@ -2336,22 +2298,20 @@ func (c *client) endpointDNATFlow(endpointIP net.IP, endpointPort uint16, protoc Done() } -// hairpinSNATFlow generates the flow which does SNAT for Service -// hairpin packets and loads the hairpin mark to markReg. -func (c *client) hairpinSNATFlow(endpointIP net.IP) binding.Flow { - ipProtocol := getIPProtocol(endpointIP) - hpIP := hairpinIP - if ipProtocol == binding.ProtocolIPv6 { - hpIP = hairpinIPv6 +// hairpinMarkFlow generates the flow which marks for Service hairpin packets. +func (c *client) hairpinMarkFlow(ipProtocol binding.Protocol, ofPort uint32, skipIngressRules bool) binding.Flow { + nextTable := IngressMetricTable + if !skipIngressRules { + // Go to ingress NetworkPolicy tables for traffic to local Pods. + nextTable = c.ingressEntryTable } - return c.pipeline[hairpinSNATTable].BuildFlow(priorityNormal). + return c.pipeline[hairpinMarkTable].BuildFlow(priorityNormal). Cookie(c.cookieAllocator.Request(cookie.Service).Raw()). MatchProtocol(ipProtocol). - MatchDstIP(endpointIP). - MatchSrcIP(endpointIP). - Action().SetSrcIP(hpIP). + MatchInPort(ofPort). + MatchRegFieldWithValue(TargetOFPortField, ofPort). Action().LoadRegMark(HairpinRegMark). - Action().GotoTable(L2ForwardingOutTable). + Action().GotoTable(nextTable). Done() } @@ -2473,22 +2433,19 @@ func (c *client) generatePipeline() { L2ForwardingOutTable: bridge.CreateTable(L2ForwardingOutTable, binding.LastTableID, binding.TableMissActionDrop), } if c.enableProxy { - c.pipeline[spoofGuardTable] = bridge.CreateTable(spoofGuardTable, serviceHairpinTable, binding.TableMissActionDrop) - c.pipeline[ipv6Table] = bridge.CreateTable(ipv6Table, serviceHairpinTable, binding.TableMissActionNext) - if c.proxyAll { - c.pipeline[serviceHairpinTable] = bridge.CreateTable(serviceHairpinTable, serviceConntrackTable, binding.TableMissActionNext) - c.pipeline[serviceConntrackTable] = bridge.CreateTable(serviceConntrackTable, conntrackTable, binding.TableMissActionNext) - c.pipeline[serviceClassifierTable] = bridge.CreateTable(serviceClassifierTable, binding.LastTableID, binding.TableMissActionNone) - c.pipeline[serviceConntrackCommitTable] = bridge.CreateTable(serviceConntrackCommitTable, hairpinSNATTable, binding.TableMissActionNext) - } else { - c.pipeline[serviceHairpinTable] = bridge.CreateTable(serviceHairpinTable, conntrackTable, binding.TableMissActionNext) - } + c.pipeline[spoofGuardTable] = bridge.CreateTable(spoofGuardTable, snatConntrackTable, binding.TableMissActionDrop) + c.pipeline[ipv6Table] = bridge.CreateTable(ipv6Table, snatConntrackTable, binding.TableMissActionNext) + c.pipeline[snatConntrackTable] = bridge.CreateTable(snatConntrackTable, conntrackTable, binding.TableMissActionNext) c.pipeline[conntrackStateTable] = bridge.CreateTable(conntrackStateTable, endpointDNATTable, binding.TableMissActionNext) c.pipeline[sessionAffinityTable] = bridge.CreateTable(sessionAffinityTable, binding.LastTableID, binding.TableMissActionNone) c.pipeline[serviceLBTable] = bridge.CreateTable(serviceLBTable, endpointDNATTable, binding.TableMissActionNext) c.pipeline[endpointDNATTable] = bridge.CreateTable(endpointDNATTable, c.egressEntryTable, binding.TableMissActionNext) - c.pipeline[conntrackCommitTable] = bridge.CreateTable(conntrackCommitTable, hairpinSNATTable, binding.TableMissActionNext) - c.pipeline[hairpinSNATTable] = bridge.CreateTable(hairpinSNATTable, L2ForwardingOutTable, binding.TableMissActionNext) + c.pipeline[hairpinMarkTable] = bridge.CreateTable(hairpinMarkTable, c.ingressEntryTable, binding.TableMissActionNext) + c.pipeline[conntrackCommitTable] = bridge.CreateTable(conntrackCommitTable, L2ForwardingOutTable, binding.TableMissActionNext) + c.pipeline[snatConntrackCommitTable] = bridge.CreateTable(snatConntrackCommitTable, L2ForwardingOutTable, binding.TableMissActionNext) + if c.proxyAll { + c.pipeline[serviceClassifierTable] = bridge.CreateTable(serviceClassifierTable, binding.LastTableID, binding.TableMissActionNone) + } } else { c.pipeline[spoofGuardTable] = bridge.CreateTable(spoofGuardTable, conntrackTable, binding.TableMissActionDrop) c.pipeline[ipv6Table] = bridge.CreateTable(ipv6Table, conntrackTable, binding.TableMissActionNext) @@ -2513,6 +2470,7 @@ func (c *client) generatePipeline() { func NewClient(bridgeName string, mgmtAddr string, ovsDatapathType ovsconfig.OVSDatapathType, + ifaceStore interfacestore.InterfaceStore, enableProxy bool, enableAntreaPolicy bool, enableEgress bool, @@ -2541,6 +2499,7 @@ func NewClient(bridgeName string, ovsctlClient: ovsctl.NewClient(bridgeName), ovsDatapathType: ovsDatapathType, ovsMetersAreSupported: ovsMetersAreSupported(ovsDatapathType), + ifaceStore: ifaceStore, } c.ofEntryOperations = c if enableAntreaPolicy { diff --git a/pkg/agent/openflow/pipeline_windows.go b/pkg/agent/openflow/pipeline_windows.go index 5411fa8186e..a3e836c8286 100644 --- a/pkg/agent/openflow/pipeline_windows.go +++ b/pkg/agent/openflow/pipeline_windows.go @@ -102,7 +102,7 @@ func (c *client) hostBridgeUplinkFlows(localSubnet net.IPNet, category cookie.Ca MatchDstIPNet(localSubnet). Action().LoadRegMark(FromUplinkRegMark). Action().LoadRegMark(RewriteMACRegMark). - Action().GotoTable(serviceHairpinTable). + Action().GotoTable(snatConntrackTable). Cookie(c.cookieAllocator.Request(category).Raw()). Done()) } diff --git a/test/integration/agent/openflow_test.go b/test/integration/agent/openflow_test.go index 3e7be68e871..5bd58e5f5f0 100644 --- a/test/integration/agent/openflow_test.go +++ b/test/integration/agent/openflow_test.go @@ -33,6 +33,7 @@ import ( "k8s.io/component-base/metrics/legacyregistry" config1 "antrea.io/antrea/pkg/agent/config" + "antrea.io/antrea/pkg/agent/interfacestore" "antrea.io/antrea/pkg/agent/metrics" ofClient "antrea.io/antrea/pkg/agent/openflow" "antrea.io/antrea/pkg/agent/openflow/cookie" @@ -113,7 +114,7 @@ func TestConnectivityFlows(t *testing.T) { antrearuntime.WindowsOS = runtime.GOOS } - c = ofClient.NewClient(br, bridgeMgmtAddr, ovsconfig.OVSDatapathNetdev, true, false, true, false, false) + c = ofClient.NewClient(br, bridgeMgmtAddr, ovsconfig.OVSDatapathNetdev, nil, true, false, true, false, false) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge: %v", err)) defer func() { @@ -154,7 +155,7 @@ func TestConnectivityFlows(t *testing.T) { } func TestReplayFlowsConnectivityFlows(t *testing.T) { - c = ofClient.NewClient(br, bridgeMgmtAddr, ovsconfig.OVSDatapathNetdev, true, false, false, false, false) + c = ofClient.NewClient(br, bridgeMgmtAddr, ovsconfig.OVSDatapathNetdev, nil, true, false, false, false, false) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge: %v", err)) @@ -190,7 +191,7 @@ func TestReplayFlowsConnectivityFlows(t *testing.T) { } func TestReplayFlowsNetworkPolicyFlows(t *testing.T) { - c = ofClient.NewClient(br, bridgeMgmtAddr, ovsconfig.OVSDatapathNetdev, true, false, false, false, false) + c = ofClient.NewClient(br, bridgeMgmtAddr, ovsconfig.OVSDatapathNetdev, nil, true, false, false, false, false) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge: %v", err)) @@ -375,7 +376,7 @@ func TestNetworkPolicyFlows(t *testing.T) { // Initialize ovs metrics (Prometheus) to test them metrics.InitializeOVSMetrics() - c = ofClient.NewClient(br, bridgeMgmtAddr, ovsconfig.OVSDatapathNetdev, true, false, false, false, false) + c = ofClient.NewClient(br, bridgeMgmtAddr, ovsconfig.OVSDatapathNetdev, nil, true, false, false, false, false) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge %s", br)) @@ -485,7 +486,7 @@ func TestIPv6ConnectivityFlows(t *testing.T) { // Initialize ovs metrics (Prometheus) to test them metrics.InitializeOVSMetrics() - c = ofClient.NewClient(br, bridgeMgmtAddr, ovsconfig.OVSDatapathNetdev, true, false, true, false, false) + c = ofClient.NewClient(br, bridgeMgmtAddr, ovsconfig.OVSDatapathNetdev, nil, true, false, true, false, false) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge: %v", err)) @@ -527,7 +528,9 @@ type svcConfig struct { } func TestProxyServiceFlows(t *testing.T) { - c = ofClient.NewClient(br, bridgeMgmtAddr, ovsconfig.OVSDatapathNetdev, true, false, false, false, false) + ifaceStore := interfacestore.NewInterfaceStore() + + c = ofClient.NewClient(br, bridgeMgmtAddr, ovsconfig.OVSDatapathNetdev, ifaceStore, true, false, false, false, false) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge %s", br)) @@ -552,6 +555,22 @@ func TestProxyServiceFlows(t *testing.T) { }), } + var ifaceList []*interfacestore.InterfaceConfig + iface := &interfacestore.InterfaceConfig{ + Type: interfacestore.ContainerInterface, + IPs: []net.IP{net.ParseIP("192.168.1.2")}, + OVSPortConfig: &interfacestore.OVSPortConfig{ + OFPort: int32(3), + }, + ContainerInterfaceConfig: &interfacestore.ContainerInterfaceConfig{ + ContainerID: "fakeid", + PodName: "test", + PodNamespace: "test-ns", + }, + } + ifaceList = append(ifaceList, iface) + ifaceStore.Initialize(ifaceList) + stickyMaxAgeSeconds := uint16(30) tcs := []struct { @@ -594,7 +613,7 @@ func TestProxyServiceFlows(t *testing.T) { for _, tc := range tcs { groupID := ofconfig.GroupIDType(tc.gid) - expTableFlows, expGroupBuckets := expectedProxyServiceGroupAndFlows(tc.gid, tc.svc, tc.endpoints, tc.stickyAge) + expTableFlows, expGroupBuckets := expectedProxyServiceGroupAndFlows(tc.gid, tc.svc, tc.endpoints, tc.stickyAge, ifaceStore) installServiceFlows(t, tc.gid, tc.svc, tc.endpoints, tc.stickyAge) for _, tableFlow := range expTableFlows { ofTestUtils.CheckFlowExists(t, ovsCtlClient, tableFlow.tableID, true, tableFlow.flows) @@ -607,7 +626,6 @@ func TestProxyServiceFlows(t *testing.T) { } ofTestUtils.CheckGroupExists(t, ovsCtlClient, groupID, "select", expGroupBuckets, false) } - } func installServiceFlows(t *testing.T, gid uint32, svc svcConfig, endpointList []k8sproxy.Endpoint, stickyMaxAgeSeconds uint16) { @@ -632,7 +650,7 @@ func uninstallServiceFlowsFunc(t *testing.T, gid uint32, svc svcConfig, endpoint } } -func expectedProxyServiceGroupAndFlows(gid uint32, svc svcConfig, endpointList []k8sproxy.Endpoint, stickyAge uint16) (tableFlows []expectTableFlows, groupBuckets []string) { +func expectedProxyServiceGroupAndFlows(gid uint32, svc svcConfig, endpointList []k8sproxy.Endpoint, stickyAge uint16, ifaceStore interfacestore.InterfaceStore) (tableFlows []expectTableFlows, groupBuckets []string) { nw_proto := 6 learnProtoField := "NXM_OF_TCP_DST[]" if svc.protocol == ofconfig.ProtocolUDP { @@ -659,7 +677,7 @@ func expectedProxyServiceGroupAndFlows(gid uint32, svc svcConfig, endpointList [ }, }} epDNATFlows := expectTableFlows{tableID: 42, flows: []*ofTestUtils.ExpectFlow{}} - hairpinFlows := expectTableFlows{tableID: 108, flows: []*ofTestUtils.ExpectFlow{}} + hairpinFlows := expectTableFlows{tableID: 81, flows: []*ofTestUtils.ExpectFlow{}} groupBuckets = make([]string, 0) for _, ep := range endpointList { epIP := ipToHexString(net.ParseIP(ep.IP())) @@ -672,11 +690,11 @@ func expectedProxyServiceGroupAndFlows(gid uint32, svc svcConfig, endpointList [ MatchStr: fmt.Sprintf("priority=200,%s,reg3=%s,reg4=0x%x/0x7ffff", string(svc.protocol), epIP, unionVal), ActStr: fmt.Sprintf("ct(commit,table=50,zone=65520,nat(dst=%s:%d),exec(load:0x21->NXM_NX_CT_MARK[])", ep.IP(), epPort), }) - if ep.GetIsLocal() { + ofPort, _ := ifaceStore.GetInterfaceByIP(ep.IP()) hairpinFlows.flows = append(hairpinFlows.flows, &ofTestUtils.ExpectFlow{ - MatchStr: fmt.Sprintf("priority=200,ip,nw_src=%s,nw_dst=%s", ep.IP(), ep.IP()), - ActStr: "set_field:169.254.169.252->ip_src,load:0x1->NXM_NX_REG0[18],goto_table:110", + MatchStr: fmt.Sprintf("priority=200,ip,reg1=0x%x,in_port=%d", ofPort.OFPort, ofPort.OFPort), + ActStr: "load:0x1->NXM_NX_REG0[18],goto_table:90", }) } } @@ -991,7 +1009,7 @@ func preparePodFlows(podIPs []net.IP, podMAC net.HardwareAddr, podOFPort uint32, []*ofTestUtils.ExpectFlow{ { MatchStr: fmt.Sprintf("priority=200,dl_dst=%s", podMAC.String()), - ActStr: fmt.Sprintf("load:0x%x->NXM_NX_REG1[],load:0x1->NXM_NX_REG0[16],goto_table:90", podOFPort), + ActStr: fmt.Sprintf("load:0x%x->NXM_NX_REG1[],load:0x1->NXM_NX_REG0[16],goto_table:81", podOFPort), }, }, }, @@ -1014,7 +1032,7 @@ func preparePodFlows(podIPs []net.IP, podMAC net.HardwareAddr, podOFPort uint32, }, }, }) - nextTableForSpoofguard = 23 + nextTableForSpoofguard = 24 } else { ipProto = "ipv6" nwSrcField = "ipv6_src" @@ -1062,7 +1080,7 @@ func prepareGatewayFlows(gwIPs []net.IP, gwMAC net.HardwareAddr, vMAC net.Hardwa []*ofTestUtils.ExpectFlow{ { MatchStr: fmt.Sprintf("priority=200,dl_dst=%s", gwMAC.String()), - ActStr: fmt.Sprintf("load:0x%x->NXM_NX_REG1[],load:0x1->NXM_NX_REG0[16],goto_table:101", config1.HostGatewayOFPort), + ActStr: fmt.Sprintf("load:0x%x->NXM_NX_REG1[],load:0x1->NXM_NX_REG0[16],goto_table:81", config1.HostGatewayOFPort), }, }, }, @@ -1084,7 +1102,7 @@ func prepareGatewayFlows(gwIPs []net.IP, gwMAC net.HardwareAddr, vMAC net.Hardwa }, { MatchStr: fmt.Sprintf("priority=200,ip,in_port=%d", config1.HostGatewayOFPort), - ActStr: "goto_table:23", + ActStr: "goto_table:24", }, }, }) @@ -1134,7 +1152,7 @@ func prepareTunnelFlows(tunnelPort uint32, vMAC net.HardwareAddr) []expectTableF []*ofTestUtils.ExpectFlow{ { MatchStr: fmt.Sprintf("priority=200,in_port=%d", tunnelPort), - ActStr: "load:0->NXM_NX_REG0[0..3],load:0x1->NXM_NX_REG0[19],goto_table:30", + ActStr: "load:0->NXM_NX_REG0[0..3],load:0x1->NXM_NX_REG0[19],goto_table:24", }, }, }, @@ -1203,7 +1221,7 @@ func prepareDefaultFlows(config *testConfig) []expectTableFlows { } table105Flows := expectTableFlows{ tableID: 105, - flows: []*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:108"}}, + flows: []*ofTestUtils.ExpectFlow{{MatchStr: "priority=0", ActStr: "goto_table:110"}}, } table72Flows := expectTableFlows{ tableID: 72, @@ -1220,8 +1238,8 @@ func prepareDefaultFlows(config *testConfig) []expectTableFlows { &ofTestUtils.ExpectFlow{MatchStr: "priority=190,ct_state=+inv+trk,ip", ActStr: "drop"}, ) table105Flows.flows = append(table105Flows.flows, - &ofTestUtils.ExpectFlow{MatchStr: "priority=200,ct_state=+new+trk,ip,reg0=0x1/0xf", ActStr: "ct(commit,table=108,zone=65520,exec(load:0x20->NXM_NX_CT_MARK[])"}, - &ofTestUtils.ExpectFlow{MatchStr: "priority=190,ct_state=+new+trk,ip", ActStr: "ct(commit,table=108,zone=65520)"}, + &ofTestUtils.ExpectFlow{MatchStr: "priority=200,ct_state=+new+trk,ip,reg0=0x1/0xf", ActStr: "ct(commit,table=110,zone=65520,exec(load:0x20->NXM_NX_CT_MARK[])"}, + &ofTestUtils.ExpectFlow{MatchStr: "priority=190,ct_state=+new+trk,ip", ActStr: "ct(commit,table=110,zone=65520)"}, ) table72Flows.flows = append(table72Flows.flows, &ofTestUtils.ExpectFlow{MatchStr: "priority=210,ip,reg0=0x1/0xf", ActStr: "goto_table:80"}, @@ -1236,8 +1254,8 @@ func prepareDefaultFlows(config *testConfig) []expectTableFlows { &ofTestUtils.ExpectFlow{MatchStr: "priority=190,ct_state=+inv+trk,ipv6", ActStr: "drop"}, ) table105Flows.flows = append(table105Flows.flows, - &ofTestUtils.ExpectFlow{MatchStr: "priority=200,ct_state=+new+trk,ipv6,reg0=0x1/0xf", ActStr: "ct(commit,table=108,zone=65510,exec(load:0x20->NXM_NX_CT_MARK[])"}, - &ofTestUtils.ExpectFlow{MatchStr: "priority=190,ct_state=+new+trk,ipv6", ActStr: "ct(commit,table=108,zone=65510)"}, + &ofTestUtils.ExpectFlow{MatchStr: "priority=200,ct_state=+new+trk,ipv6,reg0=0x1/0xf", ActStr: "ct(commit,table=110,zone=65510,exec(load:0x20->NXM_NX_CT_MARK[])"}, + &ofTestUtils.ExpectFlow{MatchStr: "priority=190,ct_state=+new+trk,ipv6", ActStr: "ct(commit,table=110,zone=65510)"}, ) table72Flows.flows = append(table72Flows.flows, &ofTestUtils.ExpectFlow{MatchStr: "priority=210,ipv6,reg0=0x1/0xf", ActStr: "goto_table:80"}, @@ -1403,7 +1421,7 @@ func prepareSNATFlows(snatIP net.IP, mark, podOFPort, podOFPortRemote uint32, vM } func TestSNATFlows(t *testing.T) { - c = ofClient.NewClient(br, bridgeMgmtAddr, ovsconfig.OVSDatapathNetdev, false, false, true, false, false) + c = ofClient.NewClient(br, bridgeMgmtAddr, ovsconfig.OVSDatapathNetdev, nil, false, false, true, false, false) err := ofTestUtils.PrepareOVSBridge(br) require.Nil(t, err, fmt.Sprintf("Failed to prepare OVS bridge %s", br))