diff --git a/.github/workflows/advanced-lb-sanity.yml b/.github/workflows/advanced-lb-sanity.yml index 90bf3dbb7..a19f616c2 100644 --- a/.github/workflows/advanced-lb-sanity.yml +++ b/.github/workflows/advanced-lb-sanity.yml @@ -31,7 +31,6 @@ jobs: - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - - run: sudo -E env "PATH=$PATH" make test - run: docker pull ghcr.io/loxilb-io/loxilb:latest - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp diff --git a/.github/workflows/basic-sanity-ubuntu-22.yml b/.github/workflows/basic-sanity-ubuntu-22.yml index 9f4ad7c2c..943adb4ab 100644 --- a/.github/workflows/basic-sanity-ubuntu-22.yml +++ b/.github/workflows/basic-sanity-ubuntu-22.yml @@ -42,6 +42,7 @@ jobs: - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - run: sudo -E env "PATH=$PATH" make test + - run: for iface in $(ls /sys/class/net); do sudo tc filter delete dev $iface ingress >> /dev/null 2>&1 | true; sudo tc filter delete dev $iface egress >> /dev/null 2>&1 | true; done - run: docker pull ghcr.io/loxilb-io/loxilb:latest - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest-amd64 - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp-ebpf diff --git a/.github/workflows/basic-sanity.yml b/.github/workflows/basic-sanity.yml index 5a9df8200..1f1ddd80c 100644 --- a/.github/workflows/basic-sanity.yml +++ b/.github/workflows/basic-sanity.yml @@ -24,6 +24,7 @@ jobs: - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - run: sudo -E env "PATH=$PATH" make test + - run: for iface in $(ls /sys/class/net); do sudo tc filter delete dev $iface ingress >> /dev/null 2>&1 | true; sudo tc filter delete dev $iface egress >> /dev/null 2>&1 | true; done - run: docker pull ghcr.io/loxilb-io/loxilb:latest - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp diff --git a/.github/workflows/cluster-sanity.yml b/.github/workflows/cluster-sanity.yml index a23ae569c..d15c4909e 100644 --- a/.github/workflows/cluster-sanity.yml +++ b/.github/workflows/cluster-sanity.yml @@ -26,7 +26,6 @@ jobs: - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - - run: sudo -E env "PATH=$PATH" make test - run: docker pull ghcr.io/loxilb-io/loxilb:latest - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp diff --git a/.github/workflows/ipsec-sanity.yml b/.github/workflows/ipsec-sanity.yml index 3ac2517cf..66c0c5416 100644 --- a/.github/workflows/ipsec-sanity.yml +++ b/.github/workflows/ipsec-sanity.yml @@ -31,7 +31,6 @@ jobs: - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - - run: sudo -E env "PATH=$PATH" make test - run: docker pull ghcr.io/loxilb-io/loxilb:latest - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp diff --git a/.github/workflows/liveness-sanity.yml b/.github/workflows/liveness-sanity.yml index f1ebcc78b..65377dc1e 100644 --- a/.github/workflows/liveness-sanity.yml +++ b/.github/workflows/liveness-sanity.yml @@ -31,7 +31,6 @@ jobs: - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - - run: sudo -E env "PATH=$PATH" make test - run: docker pull ghcr.io/loxilb-io/loxilb:latest - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp diff --git a/.github/workflows/nat66-sanity-ubuntu-22.yml b/.github/workflows/nat66-sanity-ubuntu-22.yml index 073a8b526..a926e36d2 100644 --- a/.github/workflows/nat66-sanity-ubuntu-22.yml +++ b/.github/workflows/nat66-sanity-ubuntu-22.yml @@ -31,7 +31,6 @@ jobs: - run: sudo apt-get -y install clang-13 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - - run: sudo -E env "PATH=$PATH" make test - run: docker pull ghcr.io/loxilb-io/loxilb:latest - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp-ebpf diff --git a/.github/workflows/nat66-sanity.yml b/.github/workflows/nat66-sanity.yml index 8a4351b25..7432d642b 100644 --- a/.github/workflows/nat66-sanity.yml +++ b/.github/workflows/nat66-sanity.yml @@ -31,7 +31,6 @@ jobs: - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - - run: sudo -E env "PATH=$PATH" make test - run: docker pull ghcr.io/loxilb-io/loxilb:latest - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp diff --git a/.github/workflows/scale-sanity-ubuntu-22.yml b/.github/workflows/scale-sanity-ubuntu-22.yml index 085e5a31b..c1e89016d 100644 --- a/.github/workflows/scale-sanity-ubuntu-22.yml +++ b/.github/workflows/scale-sanity-ubuntu-22.yml @@ -31,7 +31,6 @@ jobs: - run: sudo apt-get -y install clang-13 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat lksctp-tools - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - - run: sudo -E env "PATH=$PATH" make test - run: docker pull ghcr.io/loxilb-io/loxilb:latest - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp-ebpf diff --git a/.github/workflows/scale-sanity.yml b/.github/workflows/scale-sanity.yml index 35fd189d8..56086d268 100644 --- a/.github/workflows/scale-sanity.yml +++ b/.github/workflows/scale-sanity.yml @@ -31,7 +31,6 @@ jobs: - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - - run: sudo -E env "PATH=$PATH" make test - run: docker pull ghcr.io/loxilb-io/loxilb:latest - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp diff --git a/.github/workflows/sctp-sanity-ubuntu-22.yml b/.github/workflows/sctp-sanity-ubuntu-22.yml index a5b753319..f23b73d71 100644 --- a/.github/workflows/sctp-sanity-ubuntu-22.yml +++ b/.github/workflows/sctp-sanity-ubuntu-22.yml @@ -31,7 +31,6 @@ jobs: - run: sudo apt-get -y install clang-13 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool lksctp-tools - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - - run: sudo -E env "PATH=$PATH" make test - run: docker pull ghcr.io/loxilb-io/loxilb:latest - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp-ebpf diff --git a/.github/workflows/sctp-sanity.yml b/.github/workflows/sctp-sanity.yml index 637b0feb9..0d19afe4e 100644 --- a/.github/workflows/sctp-sanity.yml +++ b/.github/workflows/sctp-sanity.yml @@ -31,7 +31,6 @@ jobs: - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - - run: sudo -E env "PATH=$PATH" make test - run: docker pull ghcr.io/loxilb-io/loxilb:latest - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp diff --git a/.github/workflows/sctpmh-sanity.yml b/.github/workflows/sctpmh-sanity.yml index facc8fbb9..5dbc9b6ca 100644 --- a/.github/workflows/sctpmh-sanity.yml +++ b/.github/workflows/sctpmh-sanity.yml @@ -31,7 +31,6 @@ jobs: - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool curl lksctp-tools - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - - run: sudo -E env "PATH=$PATH" make test - run: docker pull ghcr.io/loxilb-io/loxilb:latest - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp diff --git a/.github/workflows/tcp-sanity-ubuntu-22.yml b/.github/workflows/tcp-sanity-ubuntu-22.yml index ba91c8518..29720fd27 100644 --- a/.github/workflows/tcp-sanity-ubuntu-22.yml +++ b/.github/workflows/tcp-sanity-ubuntu-22.yml @@ -31,7 +31,6 @@ jobs: - run: sudo apt-get -y install clang-13 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - - run: sudo -E env "PATH=$PATH" make test - run: docker pull ghcr.io/loxilb-io/loxilb:latest - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latestu22 - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp-ebpf diff --git a/.github/workflows/tcp-sanity.yml b/.github/workflows/tcp-sanity.yml index 083cee68b..b55ef2e82 100644 --- a/.github/workflows/tcp-sanity.yml +++ b/.github/workflows/tcp-sanity.yml @@ -31,7 +31,6 @@ jobs: - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - - run: sudo -E env "PATH=$PATH" make test - run: docker pull ghcr.io/loxilb-io/loxilb:latest - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp diff --git a/.github/workflows/test-scenario.yml b/.github/workflows/test-scenario.yml index 9781d2a54..4c2aa684e 100644 --- a/.github/workflows/test-scenario.yml +++ b/.github/workflows/test-scenario.yml @@ -31,6 +31,15 @@ jobs: go-version: '>=1.18.0' - run: sudo apt-get update - run: sudo apt-get -y install lksctp-tools linux-tools-$(uname -r) bridge-utils iperf iproute2 nodejs socat + - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev elfutils dwarves git libbsd-dev unzip build-essential bison flex ethtool + - run: loxilb-ebpf/utils/mkllb_bpffs.sh + - run: sudo -E env "PATH=$PATH" make + - run: docker pull ghcr.io/loxilb-io/loxilb:latest + - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest + - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp + - run: docker exec -dit loxilb mkllb_bpffs + - run: id=`docker ps -f name=loxilb | cut -d " " -f 1 | grep -iv "CONTAINER"` && docker commit $id ghcr.io/loxilb-io/loxilb:latest + - run: docker stop loxilb && docker rm loxilb - run: | cd cicd/${{ github.event.inputs.testName }}/ ./config.sh diff --git a/.github/workflows/udp-sanity-ubuntu-22.yml b/.github/workflows/udp-sanity-ubuntu-22.yml index 3ea2a3f8e..83608eaa3 100644 --- a/.github/workflows/udp-sanity-ubuntu-22.yml +++ b/.github/workflows/udp-sanity-ubuntu-22.yml @@ -31,7 +31,6 @@ jobs: - run: sudo apt-get -y install clang-13 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - - run: sudo -E env "PATH=$PATH" make test - run: docker pull ghcr.io/loxilb-io/loxilb:latest - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp-ebpf diff --git a/.github/workflows/udp-sanity.yml b/.github/workflows/udp-sanity.yml index d46d4ab29..825bc4782 100644 --- a/.github/workflows/udp-sanity.yml +++ b/.github/workflows/udp-sanity.yml @@ -31,7 +31,6 @@ jobs: - run: sudo apt-get -y install clang-10 llvm libelf-dev gcc-multilib libpcap-dev linux-tools-$(uname -r) elfutils dwarves git libbsd-dev bridge-utils unzip build-essential bison flex iperf iproute2 nodejs socat ethtool - run: loxilb-ebpf/utils/mkllb_bpffs.sh - run: sudo -E env "PATH=$PATH" make - - run: sudo -E env "PATH=$PATH" make test - run: docker pull ghcr.io/loxilb-io/loxilb:latest - run: docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --name loxilb ghcr.io/loxilb-io/loxilb:latest - run: pwd && ls && sudo -E env "PATH=$PATH" make docker-cp diff --git a/api/loxinlp/nlp.go b/api/loxinlp/nlp.go index 050acc8b0..0a41156e1 100644 --- a/api/loxinlp/nlp.go +++ b/api/loxinlp/nlp.go @@ -583,7 +583,8 @@ func AddVxLANBridgeNoHook(vxlanid int, epIntfName string) int { SrcAddr: LocalIPs[0].IP, VtepDevIndex: EndpointInterface.Attrs().Index, VxlanId: vxlanid, - Port: 4789, // VxLAN default port + Port: 8472, // VxLAN default port + Learning: true, } if err := nlp.LinkAdd(VxlanDev); err != nil { tk.LogIt(tk.LogWarning, "nlp: failed to create VxlanDev: [ %v ] with the error: %s\n", VxlanDev, err) @@ -1270,6 +1271,26 @@ func AddRouteNoHook(DestinationIPNet, gateway, proto string) int { return ret } +func GetRouteNoHook(destination string) ([]string, error) { + var gws []string + + dst := net.ParseIP(destination) + if dst == nil { + return []string{}, errors.New("invalid destination") + } + + rts, err := nlp.RouteGet(dst) + if err != nil { + return []string{}, errors.New("invalid rt destination") + } + + for _, rt := range rts { + gws = append(gws, rt.Gw.String()) + } + + return gws, nil +} + func DelRouteNoHook(DestinationIPNet string) int { var ret int var route nlp.Route diff --git a/api/models/firewall_option_entry.go b/api/models/firewall_option_entry.go index 0a26bd90e..8276d55e3 100644 --- a/api/models/firewall_option_entry.go +++ b/api/models/firewall_option_entry.go @@ -32,6 +32,9 @@ type FirewallOptionEntry struct { // Set a fwmark for any matching rule FwMark int64 `json:"fwMark,omitempty"` + // Trigger only on default cases + OnDefault bool `json:"onDefault,omitempty"` + // Record or dump for matching rule Record bool `json:"record,omitempty"` diff --git a/api/models/loadbalance_entry.go b/api/models/loadbalance_entry.go index a469c7738..f93e6b56f 100644 --- a/api/models/loadbalance_entry.go +++ b/api/models/loadbalance_entry.go @@ -409,6 +409,9 @@ type LoadbalanceEntryServiceArguments struct { // block-number if any of this LB entry Block uint32 `json:"block,omitempty"` + // flag to indicate an egress rule + Egress bool `json:"egress,omitempty"` + // IP address for external access ExternalIP string `json:"externalIP,omitempty"` diff --git a/api/restapi/embedded_spec.go b/api/restapi/embedded_spec.go index 94ff3998f..97ad8c5a7 100644 --- a/api/restapi/embedded_spec.go +++ b/api/restapi/embedded_spec.go @@ -4962,6 +4962,10 @@ func init() { "description": "Set a fwmark for any matching rule", "type": "integer" }, + "onDefault": { + "description": "Trigger only on default cases", + "type": "boolean" + }, "record": { "description": "Record or dump for matching rule", "type": "boolean" @@ -5131,6 +5135,10 @@ func init() { "type": "integer", "format": "uint32" }, + "egress": { + "description": "flag to indicate an egress rule", + "type": "boolean" + }, "externalIP": { "description": "IP address for external access", "type": "string" @@ -11261,6 +11269,10 @@ func init() { "description": "Set a fwmark for any matching rule", "type": "integer" }, + "onDefault": { + "description": "Trigger only on default cases", + "type": "boolean" + }, "record": { "description": "Record or dump for matching rule", "type": "boolean" @@ -11399,6 +11411,10 @@ func init() { "type": "integer", "format": "uint32" }, + "egress": { + "description": "flag to indicate an egress rule", + "type": "boolean" + }, "externalIP": { "description": "IP address for external access", "type": "string" @@ -11546,6 +11562,10 @@ func init() { "type": "integer", "format": "uint32" }, + "egress": { + "description": "flag to indicate an egress rule", + "type": "boolean" + }, "externalIP": { "description": "IP address for external access", "type": "string" diff --git a/api/restapi/handler/firewall.go b/api/restapi/handler/firewall.go index 009ab9f01..606108717 100644 --- a/api/restapi/handler/firewall.go +++ b/api/restapi/handler/firewall.go @@ -59,6 +59,7 @@ func ConfigPostFW(params operations.PostConfigFirewallParams) middleware.Respond Opts.DoSnat = params.Attr.Opts.DoSnat Opts.ToIP = params.Attr.Opts.ToIP Opts.ToPort = uint16(params.Attr.Opts.ToPort) + Opts.OnDefault = params.Attr.Opts.OnDefault FW.Rule = Rules FW.Opts = Opts @@ -179,6 +180,7 @@ func ConfigGetFW(params operations.GetConfigFirewallAllParams) middleware.Respon tmpOpts.DoSnat = FW.Opts.DoSnat tmpOpts.ToIP = FW.Opts.ToIP tmpOpts.ToPort = int64(FW.Opts.ToPort) + tmpOpts.OnDefault = FW.Opts.OnDefault tmpOpts.Counter = FW.Opts.Counter tmpResult.RuleArguments = &tmpRule tmpResult.Opts = &tmpOpts diff --git a/api/restapi/handler/loadbalancer.go b/api/restapi/handler/loadbalancer.go index b38a99d58..8e78e4531 100644 --- a/api/restapi/handler/loadbalancer.go +++ b/api/restapi/handler/loadbalancer.go @@ -50,6 +50,7 @@ func ConfigPostLoadbalancer(params operations.PostConfigLoadbalancerParams) midd lbRules.Serv.Oper = cmn.LBOp(params.Attr.ServiceArguments.Oper) lbRules.Serv.HostUrl = params.Attr.ServiceArguments.Host lbRules.Serv.ProxyProtocolV2 = params.Attr.ServiceArguments.Proxyprotocolv2 + lbRules.Serv.Egress = params.Attr.ServiceArguments.Egress if lbRules.Serv.Proto == "sctp" { for _, data := range params.Attr.SecondaryIPs { @@ -175,6 +176,7 @@ func ConfigGetLoadbalancer(params operations.GetConfigLoadbalancerAllParams) mid tmpSvc.Snat = lb.Serv.Snat tmpSvc.Host = lb.Serv.HostUrl tmpSvc.Proxyprotocolv2 = lb.Serv.ProxyProtocolV2 + tmpSvc.Egress = lb.Serv.Egress tmpLB.ServiceArguments = &tmpSvc diff --git a/api/swagger.yml b/api/swagger.yml index fcf7e9c82..a1144e51d 100644 --- a/api/swagger.yml +++ b/api/swagger.yml @@ -3041,6 +3041,9 @@ definitions: proxyprotocolv2: type: boolean description: flag to enable proxy protocol v2 + egress: + type: boolean + description: flag to indicate an egress rule endpoints: type: array @@ -3778,6 +3781,9 @@ definitions: toPort: type: integer description: Modify to given Port (Zero if port is not to be modified) + onDefault: + type: boolean + description: Trigger only on default cases counter: type: string description: traffic counters diff --git a/cicd/httpsep/config.sh b/cicd/httpsep/config.sh index 4e371603b..71b39f499 100755 --- a/cicd/httpsep/config.sh +++ b/cicd/httpsep/config.sh @@ -55,7 +55,7 @@ $dexec llb1 pkill loxilb $dexec llb1 ip link del llb0 docker exec -dt llb1 /root/loxilb-io/loxilb/loxilb -sleep 5 +sleep 25 $dexec llb1 loxicmd create endpoint 31.31.31.1 --probetype=https --probeport=8080 --probereq="health" --proberesp="OK" --period=60 --retries=2 $dexec llb1 loxicmd create endpoint 32.32.32.1 --probetype=https --probeport=8080 --probereq="health" --proberesp="OK" --period=60 --retries=2 diff --git a/cicd/sctptunlb/validation.sh b/cicd/sctptunlb/validation.sh index 06087f1a6..8d7b8b58d 100755 --- a/cicd/sctptunlb/validation.sh +++ b/cicd/sctptunlb/validation.sh @@ -9,7 +9,7 @@ $hexec l3e1 socat -v -T0.5 sctp-l:8080,reuseaddr,fork system:"echo 'server1'; ca $hexec l3e2 socat -v -T0.5 sctp-l:8080,reuseaddr,fork system:"echo 'server2'; cat" >/dev/null 2>&1 & $hexec l3e3 socat -v -T0.5 sctp-l:8080,reuseaddr,fork system:"echo 'server3'; cat" >/dev/null 2>&1 & -sleep 5 +sleep 20 code=0 j=0 waitCount=0 diff --git a/cicd/tcptunlb/validation.sh b/cicd/tcptunlb/validation.sh index 77c20da79..801002312 100755 --- a/cicd/tcptunlb/validation.sh +++ b/cicd/tcptunlb/validation.sh @@ -42,7 +42,7 @@ for j in {0..2} do #$hexec h$k ping ${ep[j]} -f -c 5 -W 1; res=`$hexec h$k curl --max-time 10 -s 88.88.88.88:2020` - #echo -e $res + echo -e $res if [[ $res != "${servArr[j]}" ]] then echo -e "Expected ${servArr[j]}, Received : $res" diff --git a/common/common.go b/common/common.go index 46e626a1d..e95794d3d 100644 --- a/common/common.go +++ b/common/common.go @@ -408,6 +408,8 @@ type FwOptArg struct { DoSnat bool `json:"doSnat"` ToIP string `json:"toIP"` ToPort uint16 `json:"toPort"` + // OnDefault - Trigger only on default cases + OnDefault bool `json:"onDefault"` // Counter - Traffic counter Counter string `json:"counter"` } @@ -583,6 +585,8 @@ type LbServiceArg struct { HostUrl string `json:"path"` // ProxyProtocolV2 - Enable proxy protocol v2 ProxyProtocolV2 bool `json:"proxyprotocolv2"` + // Egress - Egress Rule + Egress bool `json:"egress"` } // LbEndPointArg - Information related to load-balancer end-point @@ -891,7 +895,8 @@ type BFDMod struct { // ClusterNodeMod - information related to a cluster node instance type ClusterNodeMod struct { // Instance - Cluster Instance - Addr net.IP `json:"Addr"` + Addr net.IP `json:"Addr"` + Egress bool `json:"egress"` } const ( diff --git a/loxilb-ebpf b/loxilb-ebpf index ffa62f6a1..d11c87dc8 160000 --- a/loxilb-ebpf +++ b/loxilb-ebpf @@ -1 +1 @@ -Subproject commit ffa62f6a121e94ce809f5112931ca258f4800d43 +Subproject commit d11c87dc81fe20c18b1557a2cc1430edcd3fab7a diff --git a/options/options.go b/options/options.go index 5810c590e..fa71abc93 100644 --- a/options/options.go +++ b/options/options.go @@ -40,4 +40,5 @@ var Opts struct { ConfigPath string `long:"config-path" description:"Config file path" default:"/etc/loxilb/"` ProxyModeOnly bool `long:"proxyonlymode" description:"Run loxilb in proxy mode only, no Datapath"` WhiteList string `long:"whitelist" description:"Regex string of whitelisted interface(experimental)" default:"none"` + ClusterInterface string `long:"clusterinterface" description:"cluster interface for egress HA" default:""` } diff --git a/pkg/loxinet/cluster.go b/pkg/loxinet/cluster.go index c795e1559..40b72c7b6 100644 --- a/pkg/loxinet/cluster.go +++ b/pkg/loxinet/cluster.go @@ -18,13 +18,16 @@ package loxinet import ( "errors" + "fmt" + "net" + "os" + "time" + nlp "github.com/loxilb-io/loxilb/api/loxinlp" cmn "github.com/loxilb-io/loxilb/common" bfd "github.com/loxilb-io/loxilb/pkg/proto" + utils "github.com/loxilb-io/loxilb/pkg/utils" tk "github.com/loxilb-io/loxilib" - "net" - "os" - "time" ) // error codes for cluster module @@ -34,6 +37,12 @@ const ( CIStateErr ) +const ( + defaultClusterSubnet = "10.252.0.0/16" + defaultCluster6Subnet = "fd55:e81c:146f:66b5::/64" + ClusterNetID = 999 +) + // ClusterInstance - Struct for Cluster Instance information type ClusterInstance struct { State int @@ -44,6 +53,7 @@ type ClusterInstance struct { // ClusterNode - Struct for Cluster Node Information type ClusterNode struct { Addr net.IP + Egress bool Status DpStatusT } @@ -53,18 +63,28 @@ type CIKAArgs struct { RemoteIP net.IP SourceIP net.IP Interval int64 + CSubnet string + CSubnet6 string + CDev string } // CIStateH - Cluster context handler type CIStateH struct { - SpawnKa bool - RemoteIP net.IP - SourceIP net.IP - Interval int64 - ClusterMap map[string]*ClusterInstance - StateMap map[string]int - NodeMap map[string]*ClusterNode - Bs *bfd.Struct + SpawnKa bool + RemoteIP net.IP + SourceIP net.IP + Interval int64 + ClusterMap map[string]*ClusterInstance + StateMap map[string]int + NodeMap map[string]*ClusterNode + Bs *bfd.Struct + ClusterNet string + ClusterNet6 string + ClusterGw string + ClusterGw6 string + ClusterIf string + OGw []string + OGw6 []string } func (ci *CIStateH) BFDSessionNotify(instance string, remote string, ciState string) { @@ -104,7 +124,7 @@ func (ci *CIStateH) startBFDProto(bfdSessConfigArgs bfd.ConfigArgs) { } // CITicker - Periodic ticker for Cluster module -func (h *CIStateH) CITicker() { +func (ci *CIStateH) CITicker() { // Nothing to do currently } @@ -124,6 +144,16 @@ func (ci *CIStateH) CISpawn() { } } +// CIStateGetInst - routine to get HA state +func (h *CIStateH) CIStateGetInst(inst string) (string, error) { + + if ci, ok := h.ClusterMap[inst]; ok { + return ci.StateStr, nil + } + + return "NOT_DEFINED", errors.New("not found") +} + // CIInit - routine to initialize Cluster context func CIInit(args CIKAArgs) *CIStateH { var nCIh = new(CIStateH) @@ -151,17 +181,244 @@ func CIInit(args CIKAArgs) *CIStateH { } nCIh.NodeMap = make(map[string]*ClusterNode) + + if args.CDev != "" { + tk.LogIt(tk.LogInfo, "cluster-dev name %s\n", args.CDev) + _, err := net.InterfaceByName(args.CDev) + if err != nil { + tk.LogIt(tk.LogError, "cluster-dev name error\n") + os.Exit(1) + return nil + } + clusterCIDR := defaultClusterSubnet + if args.CSubnet != "" { + clusterCIDR = args.CSubnet + } + + clusterCIDR6 := defaultCluster6Subnet + if args.CSubnet6 != "" { + clusterCIDR6 = args.CSubnet6 + } + + ip, _, err := net.ParseCIDR(clusterCIDR) + if err != nil { + tk.LogIt(tk.LogError, "ClusterIP address invalid %s\n", clusterCIDR) + return nil + } + + ip6, _, err := net.ParseCIDR(clusterCIDR6) + if err != nil { + tk.LogIt(tk.LogError, "ClusterIP6 address invalid %s\n", clusterCIDR6) + return nil + } + + ifIP, err := utils.GetIfaceIPAddr(args.CDev) + if err != nil || ifIP == nil { + tk.LogIt(tk.LogError, "No IP address found in cluster-dev\n") + return nil + } + + ifIP6, _ := utils.GetIfaceIP6Addr(args.CDev) + if ifIP6 == nil { + tk.LogIt(tk.LogError, "No IP6 address found in cluster-dev\n") + ifIP6 = ip6 + ifIP6[len(ifIP6)-1]++ + } + + ip[len(ip)-2] = ifIP[len(ifIP)-2] + ip[len(ip)-1] = ifIP[len(ifIP)-1] + + ip6[len(ip)-2] = ifIP6[len(ifIP6)-2] + ip6[len(ip)-1] = ifIP6[len(ifIP6)-1] + + clusterIfName := fmt.Sprintf("vxlan%d", ClusterNetID) + + if nlp.AddVxLANBridgeNoHook(ClusterNetID, args.CDev) < 0 { + tk.LogIt(tk.LogError, "Failed to created Cluster Network\n") + return nil + } + + nlp.DelAddrNoHook(ip.String()+"/16", clusterIfName) + if nlp.AddAddrNoHook(ip.String()+"/16", clusterIfName) < 0 { + tk.LogIt(tk.LogError, "Failed to add Cluster Addr %s:%s\n", ip.String(), clusterIfName) + return nil + } + + nlp.DelAddrNoHook(ip6.String()+"/16", clusterIfName) + if nlp.AddAddrNoHook(ip6.String()+"/64", clusterIfName) < 0 { + tk.LogIt(tk.LogError, "Failed to add Cluster Addr %s:%s\n", ip6.String(), clusterIfName) + nCIh.ClusterNet6 = "" + } else { + nCIh.ClusterNet6 = clusterCIDR6 + } + + gw := make(net.IP, len(ip)) + copy(gw, ip) + gw[len(gw)-1] = 254 + + gw6 := make(net.IP, len(ip6)) + copy(gw6, ip6) + gw6[len(gw6)-1] = 254 + + nCIh.ClusterIf = args.CDev + nCIh.ClusterNet = clusterCIDR + nCIh.ClusterNet6 = clusterCIDR6 + nCIh.ClusterGw = gw.String() + nCIh.ClusterGw6 = gw6.String() + + nCIh.OGw, _ = nlp.GetRouteNoHook("8.8.8.8") + nCIh.OGw6, _ = nlp.GetRouteNoHook("2001:4860:4860::8888") + + tk.LogIt(tk.LogInfo, "Cluster IP address %s GW %s oGW %v\n", ip.String(), nCIh.ClusterGw, nCIh.OGw) + tk.LogIt(tk.LogInfo, "Cluster IP6 address %s GW6 %s oGw6 %v\n", ip6.String(), nCIh.ClusterGw, nCIh.OGw6) + + } + return nCIh } -// CIStateGetInst - routine to get HA state -func (h *CIStateH) CIStateGetInst(inst string) (string, error) { +// CIDestroy - routine to destroy Cluster context +func (ci *CIStateH) CIDestroy() { - if ci, ok := h.ClusterMap[inst]; ok { - return ci.StateStr, nil + if ci.ClusterIf != "" { + tk.LogIt(tk.LogError, "cluster-dev name\n") + _, err := net.InterfaceByName(ci.ClusterIf) + if err != nil { + tk.LogIt(tk.LogError, "cluster-dev name error\n") + return + } + + clusterCIDR := ci.ClusterNet + clusterCIDR6 := ci.ClusterNet6 + + ip, _, err := net.ParseCIDR(clusterCIDR) + if err != nil { + tk.LogIt(tk.LogError, "ClusterIP address invalid %s\n", clusterCIDR) + return + } + + ip6, _, err := net.ParseCIDR(clusterCIDR6) + if err != nil { + tk.LogIt(tk.LogError, "ClusterIP6 address invalid %s\n", clusterCIDR6) + return + } + + ifIP, err := utils.GetIfaceIPAddr(ci.ClusterIf) + if err != nil || ifIP == nil { + tk.LogIt(tk.LogError, "No IP address found in cluster-dev\n") + return + } + + ifIP6, _ := utils.GetIfaceIP6Addr(ci.ClusterIf) + if ifIP6 == nil { + tk.LogIt(tk.LogError, "No IP6 address found in cluster-dev\n") + ifIP6 = ip6 + ifIP6[len(ifIP6)-1]++ + } + + ip[len(ip)-2] = ifIP[len(ifIP)-2] + ip[len(ip)-1] = ifIP[len(ifIP)-1] + + ip6[len(ip)-2] = ifIP6[len(ifIP6)-2] + ip6[len(ip)-1] = ifIP6[len(ifIP6)-1] + + clusterIfName := fmt.Sprintf("vxlan%d", ClusterNetID) + + if nlp.DelAddrNoHook(ip.String()+"/16", clusterIfName) < 0 { + tk.LogIt(tk.LogError, "Failed to delete Cluster Addr %s:%s\n", ip.String(), clusterIfName) + } + + if nlp.DelAddrNoHook(ip6.String()+"/64", clusterIfName) < 0 { + tk.LogIt(tk.LogError, "Failed to delete Cluster Addr %s:%s\n", ip6.String(), clusterIfName) + } + + tk.LogIt(tk.LogInfo, "Cluster IP address %s deleted\n", ip.String()) + tk.LogIt(tk.LogInfo, "Cluster IP6 address %s deleted\n", ip6.String()) + + if nlp.DelVxLANNoHook(ClusterNetID) < 0 { + tk.LogIt(tk.LogError, "Failed to delete Cluster Network\n") + } + + if len(mh.has.OGw) > 0 { + nlp.DelRouteNoHook("0.0.0.0/0") + for _, gw := range mh.has.OGw { + nlp.AddRouteNoHook("0.0.0.0/0", gw, "static") + } + } + if len(mh.has.OGw6) > 0 { + nlp.DelRouteNoHook("::/0") + for _, gw := range mh.has.OGw6 { + nlp.AddRouteNoHook("::/0", gw, "static") + } + } } +} - return "NOT_DEFINED", errors.New("not found") +// CIAddClusterRoute - routine to add a cluster route +func (h *CIStateH) CIAddClusterRoute(dest string, add bool) { + + if add { + found := false + if tk.IsNetIPv4(dest) { + gws, _ := nlp.GetRouteNoHook("8.8.8.8") + for _, gw := range gws { + if gw == dest { + found = true + break + } + } + fmt.Printf("gws = %v: dest %s\n", gws, dest) + if !found { + nlp.DelRouteNoHook("0.0.0.0/0") + nlp.AddRouteNoHook("0.0.0.0/0", dest, "static") + } + } else { + found = false + gws, _ := nlp.GetRouteNoHook("2001:4860:4860::8888") + for _, gw := range gws { + if gw == dest { + found = true + break + } + } + if !found { + nlp.DelRouteNoHook("::/0") + nlp.AddRouteNoHook("::/0", dest, "static") + } + } + } else { + found := false + if tk.IsNetIPv4(dest) { + gws, _ := nlp.GetRouteNoHook("8.8.8.8") + for _, gw := range gws { + if gw == dest { + found = true + break + } + } + if found { + nlp.DelRouteNoHook("0.0.0.0/0") + for _, gw := range mh.has.OGw { + nlp.AddRouteNoHook("0.0.0.0/0", gw, "static") + } + } + } else { + found = false + gws, _ := nlp.GetRouteNoHook("2001:4860:4860::8888") + for _, gw := range gws { + if gw == dest { + found = true + break + } + } + if found { + nlp.DelRouteNoHook("::/0") + for _, gw := range mh.has.OGw { + nlp.AddRouteNoHook("::", gw, "static") + } + } + } + } } // CIStateGet - routine to get HA state @@ -223,7 +480,7 @@ func (h *CIStateH) CIStateUpdate(cm cmn.HASMod) (int, error) { if mh.bgp != nil { mh.bgp.UpdateCIState(cm.Instance, ci.State, ci.Vip) } - go mh.zr.Rules.RuleVIPSyncToClusterState() + go mh.zr.Rules.RulesSyncToClusterState() return ci.State, nil } @@ -243,6 +500,7 @@ func (h *CIStateH) ClusterNodeAdd(node cmn.ClusterNodeMod) (int, error) { cNode = new(ClusterNode) cNode.Addr = node.Addr + cNode.Egress = node.Egress h.NodeMap[node.Addr.String()] = cNode cNode.DP(DpCreate) @@ -356,6 +614,23 @@ func (h *CIStateH) CIBFDSessionGet() ([]cmn.BFDMod, error) { // DP - sync state of cluster-node entity to data-path func (cn *ClusterNode) DP(work DpWorkT) int { + if cn.Egress { + if work == DpCreate { + if !utils.IsIPHostAddr(cn.Addr.String()) { + ret := nlp.AddVxLANPeerNoHook(ClusterNetID, cn.Addr.String()) + if ret != 0 { + cn.Status = DpCreateErr + } + } + return 0 + } else { + if !utils.IsIPHostAddr(cn.Addr.String()) { + nlp.DelVxLANPeerNoHook(ClusterNetID, cn.Addr.String()) + return 0 + } + } + } + pwq := new(PeerDpWorkQ) pwq.Work = work pwq.PeerIP = cn.Addr diff --git a/pkg/loxinet/dpbroker.go b/pkg/loxinet/dpbroker.go index e2b54f131..936289f57 100644 --- a/pkg/loxinet/dpbroker.go +++ b/pkg/loxinet/dpbroker.go @@ -239,6 +239,7 @@ type FwDpWorkQ struct { FwVal1 uint16 FwVal2 uint32 FwRecord bool + OnDflt bool } // NatT - type of NAT diff --git a/pkg/loxinet/dpebpf_linux.go b/pkg/loxinet/dpebpf_linux.go index ad064b861..40e82308a 100644 --- a/pkg/loxinet/dpebpf_linux.go +++ b/pkg/loxinet/dpebpf_linux.go @@ -896,7 +896,11 @@ func DpRouteMod(w *RouteDpWorkQ) int { C.memset(unsafe.Pointer(dat), 0, C.sizeof_struct_dp_rt_tact) if w.NMax > 0 { - dat.ca.act_type = C.DP_SET_RT_NHNUM + if w.Dst.IP.IsUnspecified() { + dat.ca.act_type = C.DP_SET_RT_NHNUM_DFLT + } else { + dat.ca.act_type = C.DP_SET_RT_NHNUM + } act = (*rtL3NhAct)(getPtrOffset(unsafe.Pointer(dat), C.sizeof_struct_dp_cmn_act)) act.naps = C.ushort(w.NMax) diff --git a/pkg/loxinet/layer3.go b/pkg/loxinet/layer3.go index 86d52caee..207e56bcf 100644 --- a/pkg/loxinet/layer3.go +++ b/pkg/loxinet/layer3.go @@ -557,6 +557,14 @@ func (ifa *Ifa) DP(work DpWorkT) int { rmWq.PortNum = port.PortNo + if port.SInfo.PortType&cmn.PortVxlanBr == cmn.PortVxlanBr { + if port.SInfo.PortReal == nil { + tk.LogIt(tk.LogError, "No real port : %s\n", port.Name) + ifa.Sync = DpCreateErr + return -1 + } + } + mh.dp.ToDpCh <- rmWq if port.SInfo.PortType&cmn.PortVxlanBr == cmn.PortVxlanBr { @@ -565,7 +573,9 @@ func (ifa *Ifa) DP(work DpWorkT) int { rmWq.Status = &ifa.Sync if port.SInfo.PortReal == nil { - return 0 + tk.LogIt(tk.LogError, "No real port : %s(error)\n", port.Name) + ifa.Sync = DpCreateErr + return -1 } up := port.SInfo.PortReal diff --git a/pkg/loxinet/loxinet.go b/pkg/loxinet/loxinet.go index 779e2a536..5421c30ce 100644 --- a/pkg/loxinet/loxinet.go +++ b/pkg/loxinet/loxinet.go @@ -191,6 +191,7 @@ func loxiNetTicker(bgpPeerMode bool) { if !bgpPeerMode { mh.dpEbpf.DpEbpfUnInit() } + mh.has.CIDestroy() apiserver.ApiServerShutOk() } case t := <-mh.ticker.C: @@ -215,16 +216,22 @@ func sysctlInit() { func loxiNetInit() { var rpcMode int - kaArgs := KAString2Mode(opts.Opts.Ka) + // Initialize logger and specify the log file + logfile := fmt.Sprintf("%s%s.log", "/var/log/loxilb", os.Getenv("HOSTNAME")) + logLevel := LogString2Level(opts.Opts.LogLevel) + mh.logger = tk.LogItInit(logfile, logLevel, true) + + kaArgs := KAString2Mode(opts.Opts.Ka, opts.Opts.ClusterInterface) clusterMode := false if opts.Opts.ClusterNodes != "none" { clusterMode = true } - // Initialize logger and specify the log file - logfile := fmt.Sprintf("%s%s.log", "/var/log/loxilb", os.Getenv("HOSTNAME")) - logLevel := LogString2Level(opts.Opts.LogLevel) - mh.logger = tk.LogItInit(logfile, logLevel, true) + // Initialize the clustering subsystem + if mh.has = CIInit(kaArgs); mh.has == nil { + tk.LogIt(tk.LogError, "cluster init failed\n") + os.Exit(1) + } // It is important to make sure loxilb's eBPF filesystem // is in place and mounted to make sure maps are pinned properly @@ -299,8 +306,6 @@ func loxiNetInit() { return } - // Initialize the clustering subsystem - mh.has = CIInit(kaArgs) if clusterMode { if opts.Opts.Bgp { tk.LogIt(tk.LogInfo, "init-wait cluster mode\n") diff --git a/pkg/loxinet/neighbor.go b/pkg/loxinet/neighbor.go index 94bf24eb7..e7f59957d 100644 --- a/pkg/loxinet/neighbor.go +++ b/pkg/loxinet/neighbor.go @@ -170,6 +170,7 @@ func (n *NeighH) NeighAddTunEP(ne *Neigh, rIP net.IP, sIP net.IP, tunID uint32, // FIXME - Need to be able to support multiple overlays with same entry port := ne.OifPort if port == nil || (port.SInfo.PortOvl == nil && tunType != DpTunIPIP) { + tk.LogIt(tk.LogError, "neigh-add-tunep failed %v\n", port) return -1, nil } diff --git a/pkg/loxinet/port.go b/pkg/loxinet/port.go index eebdbb27d..934874c29 100644 --- a/pkg/loxinet/port.go +++ b/pkg/loxinet/port.go @@ -317,12 +317,12 @@ func (P *PortsH) PortAdd(name string, osid int, ptype int, zone string, return PortCounterErr, err } - var rp *Port = nil + var rp *Port if hwi.Real != "" { rp = P.portSmap[hwi.Real] if rp == nil { tk.LogIt(tk.LogError, "port add - %s no real-port(%s)\n", name, hwi.Real) - return PortNoRealDevErr, errors.New("no-realport error") + //return PortNoRealDevErr, errors.New("no-realport error") } } else if ptype == cmn.PortVxlanBr { tk.LogIt(tk.LogError, "port add - %s real-port needed\n", name) @@ -793,11 +793,26 @@ func (P *PortsH) PortNotifierRegister(notifier PortEventIntf) { // PortTicker - a ticker routine for ports func (P *PortsH) PortTicker() { var ev PortEvent - var portMod = false + portMod := false for _, port := range P.portSmap { portMod = false + if port.HInfo.Real != "" { + rp := P.portSmap[port.HInfo.Real] + if rp == nil { + tk.LogIt(tk.LogError, "port - %s no real-port(%s)\n", port.Name, port.HInfo.Real) + } else if rp.SInfo.PortOvl == nil { + tk.LogIt(tk.LogError, "port - %s set ovl-port(%s)\n", rp.Name, port.Name) + rp.SInfo.PortOvl = port + } + if port.SInfo.PortReal != rp { + port.SInfo.PortReal = rp + } + } + + continue + // TODO - This is not very efficient since internally // it will get all OS interfaces each time osIntf, err := net.InterfaceByName(port.Name) diff --git a/pkg/loxinet/rules.go b/pkg/loxinet/rules.go index aa08c735a..eb97e0623 100644 --- a/pkg/loxinet/rules.go +++ b/pkg/loxinet/rules.go @@ -95,6 +95,7 @@ const ( DefaultPersistTimeOut = 10800 // Default persistent LB session timeout SnatFwMark = 0x80000000 // Snat Marker SrcChkFwMark = 0x40000000 // Src check Marker + OnDfltSnatFwMark = 0x20000000 // Ondefault Snat Marker ) type ruleTType uint @@ -194,6 +195,7 @@ type epHostOpts struct { currProbeDuration uint32 probePort uint16 probeActivated bool + egress bool } type epHost struct { @@ -243,6 +245,7 @@ type ruleFwOpt struct { record bool snatIP string snatPort uint16 + onDflt bool } type ruleFwOpts struct { @@ -293,6 +296,7 @@ type ruleEnt struct { inst string secMode cmn.LBSec ppv2En bool + egress bool srcList []*allowedSrcElem locIPs map[string]struct{} } @@ -336,6 +340,7 @@ type vipElem struct { ref int pVIP net.IP inst string + egr bool } type allowedSrcElem struct { @@ -821,6 +826,7 @@ func (R *RuleH) GetLBRule() ([]cmn.LbRuleMod, error) { ret.Serv.Name = data.name ret.Serv.HostUrl = data.tuples.path ret.Serv.ProxyProtocolV2 = data.ppv2En + ret.Serv.Egress = data.egress if data.act.actType == RtActSnat { ret.Serv.Snat = true } @@ -884,7 +890,7 @@ func validateXlateEPWeights(servEndPoints []cmn.LbEndPointArg) (int, error) { return 0, nil } -func (R *RuleH) modNatEpHost(r *ruleEnt, endpoints []ruleLBEp, doAddOp bool, liveCheckEn bool) { +func (R *RuleH) modNatEpHost(r *ruleEnt, endpoints []ruleLBEp, doAddOp bool, liveCheckEn bool, egressEps bool) { var hopts epHostOpts pType := "" pPort := uint16(0) @@ -931,6 +937,10 @@ func (R *RuleH) modNatEpHost(r *ruleEnt, endpoints []ruleLBEp, doAddOp bool, liv hopts.probeActivated = true } + if egressEps { + hopts.egress = true + } + epKey := makeEPKey(nep.xIP.String(), pType, pPort) if doAddOp { @@ -1395,11 +1405,11 @@ func (R *RuleH) unFoldRecursiveEPs(r *ruleEnt) { // addVIPSys - system specific operations for VIPs of a LB rule func (R *RuleH) addVIPSys(r *ruleEnt) { if r.act.actType != RtActSnat && !strings.Contains(r.name, "ipvs") && !strings.Contains(r.name, "static") { - R.AddRuleVIP(r.tuples.l3Dst.addr.IP, r.RuleVIP2PrivIP(), r.inst) + R.AddRuleVIP(r.tuples.l3Dst.addr.IP, r.RuleVIP2PrivIP(), r.inst, r.egress) // Take care of any secondary VIPs for _, sVIP := range r.secIP { - R.AddRuleVIP(sVIP.sIP, sVIP.sIP, r.inst) + R.AddRuleVIP(sVIP.sIP, sVIP.sIP, r.inst, r.egress) } } } @@ -1493,9 +1503,16 @@ func (R *RuleH) AddLbRule(serv cmn.LbServiceArg, servSecIPs []cmn.LbSecIPArg, al service := "" if tk.IsNetIPv4(serv.ServIP) { service = serv.ServIP + "/32" + if service == "0.0.0.0/32" && serv.Egress && mh.has.ClusterGw != "" { + service = mh.has.ClusterGw + "/32" + } } else { service = serv.ServIP + "/128" + if service == "::/128" && serv.Egress && mh.has.ClusterGw != "" { + service = mh.has.ClusterGw + "/128" + } } + _, sNetAddr, err := net.ParseCIDR(service) if err != nil { return RuleUnknownServiceErr, errors.New("malformed-service error") @@ -1696,6 +1713,10 @@ func (R *RuleH) AddLbRule(serv cmn.LbServiceArg, servSecIPs []cmn.LbSecIPArg, al return RuleExistsErr, errors.New("lbrule-exist error: cant modify rule security mode") } + if eRule.egress != serv.Egress { + return RuleExistsErr, errors.New("lbrule-exist error: cant modify rule egress mode") + } + if len(retEps) == 0 { tk.LogIt(tk.LogDebug, "lb-rule %s has no-endpoints: to be deleted\n", eRule.tuples.String()) return R.DeleteLbRule(serv) @@ -1751,8 +1772,8 @@ func (R *RuleH) AddLbRule(serv cmn.LbServiceArg, servSecIPs []cmn.LbSecIPArg, al // eRule.managed = serv.Managed if !serv.Snat { - R.modNatEpHost(eRule, delEps, false, activateProbe) - R.modNatEpHost(eRule, retEps, true, activateProbe) + R.modNatEpHost(eRule, delEps, false, activateProbe, eRule.egress) + R.modNatEpHost(eRule, retEps, true, activateProbe, eRule.egress) R.electEPSrc(eRule) } @@ -1790,6 +1811,7 @@ func (R *RuleH) AddLbRule(serv cmn.LbServiceArg, servSecIPs []cmn.LbSecIPArg, al r.secIP = nSecIP r.secMode = serv.Security r.ppv2En = serv.ProxyProtocolV2 + r.egress = serv.Egress // Per LB end-point health-check is supposed to be handled at kube-loxilb/CCM, // but it certain cases like stand-alone mode, loxilb can do its own @@ -1837,7 +1859,7 @@ func (R *RuleH) AddLbRule(serv cmn.LbServiceArg, servSecIPs []cmn.LbSecIPArg, al if !serv.Snat { R.foldRecursiveEPs(r) - R.modNatEpHost(r, lBActs.endPoints, true, activateProbe) + R.modNatEpHost(r, lBActs.endPoints, true, activateProbe, r.egress) R.electEPSrc(r) if serv.Mode == cmn.LBModeHostOneArm { R.mkHostAssocs(r) @@ -1876,8 +1898,14 @@ func (R *RuleH) DeleteLbRule(serv cmn.LbServiceArg) (int, error) { service := "" if tk.IsNetIPv4(serv.ServIP) { service = serv.ServIP + "/32" + if service == "0.0.0.0/32" && serv.Egress && mh.has.ClusterGw != "" { + service = mh.has.ClusterGw + "/32" + } } else { service = serv.ServIP + "/128" + if service == "::/128" && serv.Egress && mh.has.ClusterGw != "" { + service = mh.has.ClusterGw + "/128" + } } _, sNetAddr, err := net.ParseCIDR(service) if err != nil { @@ -1916,7 +1944,7 @@ func (R *RuleH) DeleteLbRule(serv cmn.LbServiceArg) (int, error) { activatedProbe = true } if rule.act.actType != RtActSnat { - R.modNatEpHost(rule, eEps, false, activatedProbe) + R.modNatEpHost(rule, eEps, false, activatedProbe, rule.egress) R.unFoldRecursiveEPs(rule) } @@ -1985,6 +2013,7 @@ func (R *RuleH) GetFwRule() ([]cmn.FwRuleMod, error) { ret.Opts.Mark = fwOpts.opt.fwMark } ret.Opts.Record = fwOpts.opt.record + ret.Opts.OnDefault = fwOpts.opt.onDflt data.Fw2DP(DpStatsGetImm) ret.Opts.Counter = fmt.Sprintf("%v:%v", data.stat.packets, data.stat.bytes) @@ -2067,6 +2096,7 @@ func (R *RuleH) AddFwRule(fwRule cmn.FwRuleArg, fwOptArgs cmn.FwOptArg) (int, er fwOpts.op = RtActDrop fwOpts.opt.fwMark = fwOptArgs.Mark fwOpts.opt.record = fwOptArgs.Record + fwOpts.opt.onDflt = fwOptArgs.OnDefault if fwOptArgs.Allow { r.act.actType = RtActFwd @@ -2094,6 +2124,10 @@ func (R *RuleH) AddFwRule(fwRule cmn.FwRuleArg, fwOptArgs cmn.FwOptArg) (int, er if fwOpts.opt.fwMark != 0 { return RuleArgsErr, errors.New("malformed-args fwmark !=0 for snat-error") } + + if fwOpts.opt.onDflt { + R.AddRuleVIP(net.ParseIP(fwOptArgs.ToIP), nil, cmn.CIDefault, true) + } } r.act.action = &fwOpts @@ -2125,13 +2159,26 @@ func (R *RuleH) AddFwRule(fwRule cmn.FwRuleArg, fwOptArgs cmn.FwOptArg) (int, er return RuleArgsErr, errors.New("rule-snat error") } - fwOpts.opt.fwMark = uint32(r.ruleNum) | SnatFwMark + if !fwOptArgs.OnDefault { + fwOpts.opt.fwMark = uint32(r.ruleNum) | SnatFwMark + } else { + fwOpts.opt.fwMark = uint32(r.ruleNum) | OnDfltSnatFwMark + } } tk.LogIt(tk.LogDebug, "fw-rule added - %d:%s-%s\n", r.ruleNum, r.tuples.String(), r.act.String()) R.tables[RtFw].eMap[rt.ruleKey()] = r + if fwOptArgs.OnDefault { + state, err := mh.has.CIStateGetInst(cmn.CIDefault) + if err == nil { + if state == "BACKUP" { + return 0, nil + } + } + } + r.Fw2DP(DpCreate) return 0, nil @@ -2193,7 +2240,6 @@ func (R *RuleH) DeleteFwRule(fwRule cmn.FwRuleArg) (int, error) { if rule.act.actType == RtActSnat { // Delete implicit SNAT Rule - var servArg cmn.LbServiceArg servArg.ServIP = "0.0.0.0" servArg.ServPort = 0 @@ -2206,6 +2252,9 @@ func (R *RuleH) DeleteFwRule(fwRule cmn.FwRuleArg) (int, error) { switch fwOpts := rule.act.action.(type) { case *ruleFwOpts: servArg.Name = fmt.Sprintf("%s:%s:%d", "Masq", fwOpts.opt.snatIP, fwOpts.opt.snatPort) + if fwOpts.opt.onDflt { + R.DeleteRuleVIP(net.ParseIP(fwOpts.opt.snatIP)) + } } _, err := R.DeleteLbRule(servArg) @@ -2349,7 +2398,11 @@ func (R *RuleH) AddEPHost(apiCall bool, hostName string, name string, args epHos ep := R.epMap[epKey] if ep != nil { if apiCall { + egress := ep.opts.egress ep.opts = args + if egress { + ep.opts.egress = egress + } ep.opts.currProbeDuration = ep.opts.probeDuration ep.initProberOn = true return 0, nil @@ -2374,6 +2427,15 @@ func (R *RuleH) AddEPHost(apiCall bool, hostName string, name string, args epHos //ep.sT = time.Now() R.lepHID++ + if args.egress { + epNode := cmn.ClusterNodeMod{Addr: net.ParseIP(hostName), + Egress: true} + _, err := mh.has.ClusterNodeAdd(epNode) + if err != nil { + return -1, errors.New("ep-host add failed as cluster node") + } + } + R.epMap[epKey] = ep tk.LogIt(tk.LogDebug, "ep-host added %v:%d\n", epKey, ep.hID) @@ -2674,7 +2736,7 @@ func (R *RuleH) RulesSync() { ip = net.ParseIP(vip) } if ip != nil { - R.AdvRuleVIPIfL2(ip, net.ParseIP(vip), vipElem.inst) + R.AdvRuleVIP(ip, net.ParseIP(vip), vipElem.inst, vipElem.egr) } } R.vipST = time.Now() @@ -2784,6 +2846,10 @@ func (r *ruleEnt) LB2DP(work DpWorkT) int { return -1 } + if r.egress { + return 0 + } + nWork := new(LBDpWorkQ) nWork.Work = work @@ -3036,6 +3102,7 @@ func (r *ruleEnt) Fw2DP(work DpWorkT) int { } nWork.FwVal2 = at.opt.fwMark nWork.FwRecord = at.opt.record + nWork.OnDflt = at.opt.onDflt default: return -1 } @@ -3127,7 +3194,7 @@ func (r *ruleEnt) DP(work DpWorkT) int { } -func (R *RuleH) AdvRuleVIPIfL2(IP net.IP, eIP net.IP, inst string) error { +func (R *RuleH) AdvRuleVIP(IP net.IP, eIP net.IP, inst string, egress bool) error { if inst == "" { inst = cmn.CIDefault } @@ -3177,6 +3244,10 @@ func (R *RuleH) AdvRuleVIPIfL2(IP net.IP, eIP net.IP, inst string) error { } } + if egress { + mh.has.CIAddClusterRoute(IP.String(), false) + } + } else if ciState != "NOT_DEFINED" { if utils.IsIPHostAddr(IP.String()) { ifname := "lo" @@ -3192,6 +3263,11 @@ func (R *RuleH) AdvRuleVIPIfL2(IP net.IP, eIP net.IP, inst string) error { tk.LogIt(tk.LogInfo, "lb-rule vip %s:%s deleted\n", IP.String(), ifname) } } + + if egress { + mh.has.CIAddClusterRoute(IP.String(), true) + } + } else { if _, foundIP := R.zone.L3.IfaAddrLocal(IP); foundIP == nil { dev := fmt.Sprintf("llb-rule-%s", IP.String()) @@ -3203,12 +3279,16 @@ func (R *RuleH) AdvRuleVIPIfL2(IP net.IP, eIP net.IP, inst string) error { } } } + + if egress { + mh.has.CIAddClusterRoute(IP.String(), false) + } } return nil } -func (R *RuleH) RuleVIPSyncToClusterState() { +func (R *RuleH) RulesSyncToClusterState() { // For Cloud integrations, there is only default instance ciState, _ := mh.has.CIStateGetInst(cmn.CIDefault) @@ -3220,13 +3300,23 @@ func (R *RuleH) RuleVIPSyncToClusterState() { } } + for _, eFw := range R.tables[RtFw].eMap { + if eFw.act.action.(*ruleFwOpts).opt.onDflt { + if ciState == "MASTER" || ciState != "BACKUP" { + eFw.Fw2DP(DpCreate) + } else if ciState == "BACKUP" { + eFw.Fw2DP(DpRemove) + } + } + } + for vip, vipElem := range R.vipMap { ip := vipElem.pVIP if ip == nil { ip = net.ParseIP(vip) } if ip != nil { - R.AdvRuleVIPIfL2(ip, net.ParseIP(vip), vipElem.inst) + R.AdvRuleVIP(ip, net.ParseIP(vip), vipElem.inst, vipElem.egr) } } } @@ -3239,13 +3329,14 @@ func (r *ruleEnt) RuleVIP2PrivIP() net.IP { } } -func (R *RuleH) AddRuleVIP(VIP net.IP, pVIP net.IP, inst string) { +func (R *RuleH) AddRuleVIP(VIP net.IP, pVIP net.IP, inst string, egress bool) { vipEnt := R.vipMap[VIP.String()] if vipEnt == nil { vipEnt = new(vipElem) vipEnt.ref = 1 vipEnt.pVIP = pVIP vipEnt.inst = inst + vipEnt.egr = egress R.vipMap[VIP.String()] = vipEnt } else { vipEnt.ref++ @@ -3253,9 +3344,9 @@ func (R *RuleH) AddRuleVIP(VIP net.IP, pVIP net.IP, inst string) { if vipEnt.ref == 1 { if pVIP == nil { - R.AdvRuleVIPIfL2(VIP, VIP, inst) + R.AdvRuleVIP(VIP, VIP, inst, vipEnt.egr) } else { - R.AdvRuleVIPIfL2(pVIP, VIP, inst) + R.AdvRuleVIP(pVIP, VIP, inst, vipEnt.egr) } } } diff --git a/pkg/loxinet/utils.go b/pkg/loxinet/utils.go index 30465082c..582087099 100644 --- a/pkg/loxinet/utils.go +++ b/pkg/loxinet/utils.go @@ -87,20 +87,20 @@ func LogString2Level(logStr string) tk.LogLevelT { } // KAString2Mode - Convert ka mode in string opts to spawn/KAMode -func KAString2Mode(kaStr string) CIKAArgs { +func KAString2Mode(kaStr, dev string) CIKAArgs { spawnKa := false interval := int64(0) sourceIP := net.ParseIP("0.0.0.0") if kaStr == "none" { - return CIKAArgs{SpawnKa: spawnKa, RemoteIP: nil, Interval: interval} + return CIKAArgs{SpawnKa: spawnKa, RemoteIP: nil, Interval: interval, CDev: dev} } kaArgs := strings.Split(kaStr, ":") remote := net.ParseIP(kaArgs[0]) if remote == nil { - return CIKAArgs{SpawnKa: spawnKa, RemoteIP: nil, SourceIP: nil, Interval: interval} + return CIKAArgs{SpawnKa: spawnKa, RemoteIP: nil, SourceIP: nil, Interval: interval, CDev: dev} } if len(kaArgs) > 1 { @@ -111,7 +111,7 @@ func KAString2Mode(kaStr string) CIKAArgs { interval, _ = strconv.ParseInt(kaArgs[2], 10, 32) } spawnKa = true - return CIKAArgs{SpawnKa: spawnKa, RemoteIP: remote, SourceIP: sourceIP, Interval: interval} + return CIKAArgs{SpawnKa: spawnKa, RemoteIP: remote, SourceIP: sourceIP, Interval: interval, CDev: dev} } diff --git a/pkg/loxinet/zones.go b/pkg/loxinet/zones.go index 91c17ce27..fd3eb5c0d 100644 --- a/pkg/loxinet/zones.go +++ b/pkg/loxinet/zones.go @@ -239,5 +239,6 @@ func (z *ZoneH) ZoneTicker() { zone.Pols.PolTicker() zone.Mirrs.MirrTicker() zone.L3.IfasTicker(false) + zone.Ports.PortTicker() } } diff --git a/pkg/utils/net.go b/pkg/utils/net.go index 447bad96b..7de69a8bc 100644 --- a/pkg/utils/net.go +++ b/pkg/utils/net.go @@ -346,8 +346,32 @@ func Ntohll(i uint64) uint64 { return binary.BigEndian.Uint64((*(*[8]byte)(unsafe.Pointer(&i)))[:]) } -// GetIfaceIpAddr - Get interface IP address -func GetIfaceIpAddr(ifName string) (addr net.IP, err error) { +// GetIfaceIPAddr - Get interface IP address +func GetIfaceIPAddr(ifName string) (addr net.IP, err error) { + var ( + ief *net.Interface + addrs []net.Addr + ipAddr net.IP + ) + if ief, err = net.InterfaceByName(ifName); err != nil { + return nil, errors.New("not such ifname") + } + if addrs, err = ief.Addrs(); err != nil { + return nil, errors.New("not such addrs") + } + for _, addr := range addrs { + if ipAddr = addr.(*net.IPNet).IP.To4(); ipAddr != nil { + break + } + } + if ipAddr == nil { + return nil, errors.New("not ipv4 address") + } + return ipAddr, nil +} + +// GetIfaceIP6Addr - Get interface IP address +func GetIfaceIP6Addr(ifName string) (addr net.IP, err error) { var ( ief *net.Interface addrs []net.Addr @@ -360,7 +384,8 @@ func GetIfaceIpAddr(ifName string) (addr net.IP, err error) { return } for _, addr := range addrs { - if ipAddr = addr.(*net.IPNet).IP.To4(); ipAddr != nil { + if tk.IsNetIPv6(addr.(*net.IPNet).IP.String()) { + ipAddr = addr.(*net.IPNet).IP break } } @@ -374,7 +399,7 @@ func GetIfaceIpAddr(ifName string) (addr net.IP, err error) { func SendArpReq(AdvIP net.IP, ifName string) (int, error) { zeroAddr := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - srcIP, err := GetIfaceIpAddr(ifName) + srcIP, err := GetIfaceIPAddr(ifName) if err != nil { return -1, err }