diff --git a/cicd/k8s-calico-ipvs3/Vagrantfile b/cicd/k8s-calico-ipvs3/Vagrantfile
new file mode 100644
index 000000000..59982ed7e
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/Vagrantfile
@@ -0,0 +1,94 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+require "yaml"
+settings = YAML.load_file "yaml/settings.yaml"
+
+workers = settings["nodes"]["workers"]["count"]
+loxilbs = (ENV['LOXILBS'] || "2").to_i
+
+Vagrant.configure("2") do |config|
+
+ if Vagrant.has_plugin?("vagrant-vbguest")
+ config.vbguest.auto_update = false
+ end
+ config.vm.define "host" do |host|
+ host.vm.hostname = 'host1'
+ host.vm.box = settings["software"]["cluster"]["box"]
+ host.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0"
+ host.vm.network :private_network, ip: "192.168.90.9", :netmask => "255.255.255.0"
+ host.vm.provision :shell, :path => "node_scripts/host.sh"
+ host.vm.provider :virtualbox do |vbox|
+ vbox.customize ["modifyvm", :id, "--memory", 2048]
+ vbox.customize ["modifyvm", :id, "--cpus", 1]
+ end
+ end
+
+ (1..loxilbs).each do |node_number|
+ config.vm.define "llb#{node_number}" do |loxilb|
+ loxilb.vm.box = settings["software"]["loxilb"]["box"]["name"]
+ loxilb.vm.box_version = settings["software"]["loxilb"]["box"]["version"]
+ loxilb.vm.hostname = "llb#{node_number}"
+ ip = node_number + 251
+ loxilb.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0"
+ loxilb.vm.provision :shell, :path => "node_scripts/loxilb.sh"
+ loxilb.vm.provider :virtualbox do |vbox|
+ vbox.customize ["modifyvm", :id, "--memory", 6000]
+ vbox.customize ["modifyvm", :id, "--cpus", 4]
+ vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"]
+ end
+ end
+ end
+
+ config.vm.define "master" do |master|
+ master.vm.box = settings["software"]["cluster"]["box"]
+ master.vm.hostname = 'master'
+ master.vm.network :private_network, ip: settings["network"]["control_ip"], :netmask => "255.255.255.0"
+ master.vm.provision "shell",
+ env: {
+ "DNS_SERVERS" => settings["network"]["dns_servers"].join(" "),
+ "ENVIRONMENT" => settings["environment"],
+ "KUBERNETES_VERSION" => settings["software"]["kubernetes"],
+ "OS" => settings["software"]["os"]
+ },
+ path: "node_scripts/common.sh"
+ master.vm.provision "shell",
+ env: {
+ "CALICO_VERSION" => settings["software"]["calico"],
+ "CONTROL_IP" => settings["network"]["control_ip"],
+ "POD_CIDR" => settings["network"]["pod_cidr"],
+ "SERVICE_CIDR" => settings["network"]["service_cidr"]
+ },
+ path: "node_scripts/master.sh"
+
+ master.vm.provider :virtualbox do |vbox|
+ vbox.customize ["modifyvm", :id, "--memory", 4096]
+ vbox.customize ["modifyvm", :id, "--cpus", 2]
+ vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"]
+ end
+ end
+
+ (1..workers).each do |node_number|
+ config.vm.define "worker#{node_number}" do |worker|
+ worker.vm.box = settings["software"]["cluster"]["box"]
+ worker.vm.hostname = "worker#{node_number}"
+ ip = node_number + 200
+ worker.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0"
+ worker.vm.provision "shell",
+ env: {
+ "DNS_SERVERS" => settings["network"]["dns_servers"].join(" "),
+ "ENVIRONMENT" => settings["environment"],
+ "KUBERNETES_VERSION" => settings["software"]["kubernetes"],
+ "OS" => settings["software"]["os"]
+ },
+ path: "node_scripts/common.sh"
+ worker.vm.provision "shell", path: "node_scripts/worker.sh"
+
+ worker.vm.provider :virtualbox do |vbox|
+ vbox.customize ["modifyvm", :id, "--memory", 4096]
+ vbox.customize ["modifyvm", :id, "--cpus", 2]
+ vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"]
+ end
+ end
+ end
+end
diff --git a/cicd/k8s-calico-ipvs3/bird_config/bird.conf b/cicd/k8s-calico-ipvs3/bird_config/bird.conf
new file mode 100644
index 000000000..c2b55f649
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/bird_config/bird.conf
@@ -0,0 +1,224 @@
+# This is a basic configuration file, which contains boilerplate options and
+# some basic examples. It allows the BIRD daemon to start but will not cause
+# anything else to happen.
+#
+# Please refer to the BIRD User's Guide documentation, which is also available
+# online at http://bird.network.cz/ in HTML format, for more information on
+# configuring BIRD and adding routing protocols.
+
+# Configure logging
+#log syslog all;
+log "/var/log/bird.log" { debug, trace, info, remote, warning, error, auth, fatal, bug };
+
+# Set router ID. It is a unique identification of your router, usually one of
+# IPv4 addresses of the router. It is recommended to configure it explicitly.
+router id 192.168.80.9;
+
+# Turn on global debugging of all protocols (all messages or just selected classes)
+# debug protocols all;
+# debug protocols { events, states };
+
+# Turn on internal watchdog
+# watchdog warning 5 s;
+# watchdog timeout 30 s;
+
+# You can define your own constants
+# define my_asn = 65000;
+# define my_addr = 198.51.100.1;
+
+# Tables master4 and master6 are defined by default
+# ipv4 table master4;
+# ipv6 table master6;
+
+# Define more tables, e.g. for policy routing or as MRIB
+# ipv4 table mrib4;
+# ipv6 table mrib6;
+
+# The Device protocol is not a real routing protocol. It does not generate any
+# routes and it only serves as a module for getting information about network
+# interfaces from the kernel. It is necessary in almost any configuration.
+protocol device {
+}
+
+# The direct protocol is not a real routing protocol. It automatically generates
+# direct routes to all network interfaces. Can exist in as many instances as you
+# wish if you want to populate multiple routing tables with direct routes.
+protocol direct {
+ #disabled; # Disable by default
+ ipv4; # Connect to default IPv4 table
+ #ipv6; # ... and to default IPv6 table
+}
+
+# The Kernel protocol is not a real routing protocol. Instead of communicating
+# with other routers in the network, it performs synchronization of BIRD
+# routing tables with the OS kernel. One instance per table.
+protocol kernel {
+ ipv4 { # Connect protocol to IPv4 table by channel
+# table master4; # Default IPv4 table is master4
+# import all; # Import to table, default is import all
+ export all; # Export to protocol. default is export none
+ };
+# learn; # Learn alien routes from the kernel
+# kernel table 10; # Kernel table to synchronize with (default: main)
+}
+
+# Another instance for IPv6, skipping default options
+protocol kernel {
+ ipv6 { export all; };
+}
+
+# Static routes (Again, there can be multiple instances, for different address
+# families and to disable/enable various groups of static routes on the fly).
+protocol static {
+ ipv4; # Again, IPv4 channel with default options
+
+# route 0.0.0.0/0 via 198.51.100.10;
+# route 192.0.2.0/24 blackhole;
+# route 10.0.0.0/8 unreachable;
+# route 10.2.0.0/24 via "eth0";
+# # Static routes can be defined with optional attributes
+# route 10.1.1.0/24 via 198.51.100.3 { rip_metric = 3; };
+# route 10.1.2.0/24 via 198.51.100.3 { ospf_metric1 = 100; };
+# route 10.1.3.0/24 via 198.51.100.4 { ospf_metric2 = 100; };
+}
+
+# Pipe protocol connects two routing tables. Beware of loops.
+# protocol pipe {
+# table master4; # No ipv4/ipv6 channel definition like in other protocols
+# peer table mrib4;
+# import all; # Direction peer table -> table
+# export all; # Direction table -> peer table
+# }
+
+# RIP example, both RIP and RIPng are supported
+# protocol rip {
+# ipv4 {
+# # Export direct, static routes and ones from RIP itself
+# import all;
+# export where source ~ [ RTS_DEVICE, RTS_STATIC, RTS_RIP ];
+# };
+# interface "eth*" {
+# update time 10; # Default period is 30
+# timeout time 60; # Default timeout is 180
+# authentication cryptographic; # No authentication by default
+# password "hello" { algorithm hmac sha256; }; # Default is MD5
+# };
+# }
+
+# OSPF example, both OSPFv2 and OSPFv3 are supported
+# protocol ospf v3 {
+# ipv6 {
+# import all;
+# export where source = RTS_STATIC;
+# };
+# area 0 {
+# interface "eth*" {
+# type broadcast; # Detected by default
+# cost 10; # Interface metric
+# hello 5; # Default hello perid 10 is too long
+# };
+# interface "tun*" {
+# type ptp; # PtP mode, avoids DR selection
+# cost 100; # Interface metric
+# hello 5; # Default hello perid 10 is too long
+# };
+# interface "dummy0" {
+# stub; # Stub interface, just propagate it
+# };
+# };
+#}
+
+# Define simple filter as an example for BGP import filter
+# See https://gitlab.labs.nic.cz/labs/bird/wikis/BGP_filtering for more examples
+# filter rt_import
+# {
+# if bgp_path.first != 64496 then accept;
+# if bgp_path.len > 64 then accept;
+# if bgp_next_hop != from then accept;
+# reject;
+# }
+
+# BGP example, explicit name 'uplink1' is used instead of default 'bgp1'
+# protocol bgp uplink1 {
+# description "My BGP uplink";
+# local 198.51.100.1 as 65000;
+# neighbor 198.51.100.10 as 64496;
+# hold time 90; # Default is 240
+# password "secret"; # Password used for MD5 authentication
+#
+# ipv4 { # regular IPv4 unicast (1/1)
+# import filter rt_import;
+# export where source ~ [ RTS_STATIC, RTS_BGP ];
+# };
+#
+# ipv6 { # regular IPv6 unicast (2/1)
+# import filter rt_import;
+# export filter { # The same as 'where' expression above
+# if source ~ [ RTS_STATIC, RTS_BGP ]
+# then accept;
+# else reject;
+# };
+# };
+#
+# ipv4 multicast { # IPv4 multicast topology (1/2)
+# table mrib4; # explicit IPv4 table
+# import filter rt_import;
+# export all;
+# };
+#
+# ipv6 multicast { # IPv6 multicast topology (2/2)
+# table mrib6; # explicit IPv6 table
+# import filter rt_import;
+# export all;
+# };
+#}
+
+# Template example. Using templates to define IBGP route reflector clients.
+# template bgp rr_clients {
+# local 10.0.0.1 as 65000;
+# neighbor as 65000;
+# rr client;
+# rr cluster id 1.0.0.1;
+#
+# ipv4 {
+# import all;
+# export where source = RTS_BGP;
+# };
+#
+# ipv6 {
+# import all;
+# export where source = RTS_BGP;
+# };
+# }
+#
+# protocol bgp client1 from rr_clients {
+# neighbor 10.0.1.1;
+# }
+#
+# protocol bgp client2 from rr_clients {
+# neighbor 10.0.2.1;
+# }
+#
+# protocol bgp client3 from rr_clients {
+# neighbor 10.0.3.1;
+# }
+#
+protocol bgp llb1 {
+ local as 64512;
+ neighbor 192.168.80.252 as 64511;
+
+ ipv4 {
+ import all;
+ export all;
+ };
+}
+
+protocol bgp llb2 {
+ neighbor 192.168.80.253 as 64511;
+
+ ipv4 {
+ import all;
+ export all;
+ };
+}
+
diff --git a/cicd/k8s-calico-ipvs3/config.sh b/cicd/k8s-calico-ipvs3/config.sh
new file mode 100755
index 000000000..ed986fbfb
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/config.sh
@@ -0,0 +1,37 @@
+#!/bin/bash
+VMs=$(vagrant global-status | grep -i virtualbox)
+while IFS= read -a VMs; do
+ read -a vm <<< "$VMs"
+ cd ${vm[4]} 2>&1>/dev/null
+ echo "Destroying ${vm[1]}"
+ vagrant destroy -f ${vm[1]}
+ cd - 2>&1>/dev/null
+done <<< "$VMs"
+
+vagrant up
+
+for((i=1; i<=60; i++))
+do
+ fin=1
+ pods=$(vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null | grep -v "NAMESPACE")
+
+ while IFS= read -a pods; do
+ read -a pod <<< "$pods"
+ if [[ ${pod[3]} != *"Running"* ]]; then
+ echo "${pod[1]} is not UP yet"
+ fin=0
+ fi
+ done <<< "$pods"
+ if [ $fin == 1 ];
+ then
+ break;
+ fi
+ echo "Will try after 10s"
+ sleep 10
+done
+
+sudo sysctl net.ipv4.conf.vboxnet1.arp_accept=1
+
+#Create fullnat Service
+vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp_fullnat.yml' 2> /dev/null
+vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/udp_fullnat.yml' 2> /dev/null
diff --git a/cicd/k8s-calico-ipvs3/configs/config b/cicd/k8s-calico-ipvs3/configs/config
new file mode 100644
index 000000000..ff6dd4137
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/configs/config
@@ -0,0 +1,19 @@
+apiVersion: v1
+clusters:
+- cluster:
+ certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJek1Ea3hPREE0TURBeU4xb1hEVE16TURreE5UQTRNREF5TjFvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTkVJCjJjVE83YWxyaStmS0dBaWZqZXBGWXpoMGdCWmZ6K1JnTFcxV1JsV1NZVnhzNFUvcThTMHhJZzhJWW9SS3F2ZWEKcmtNTlVYVVZtUXVUQ3kwclJzRE9hRlpGN2VjVC9ST3JQRjFvY0NZOVJyaWRJOWFnMlNFNU9HMnRkbStuZ0V0WQpmdElHYWdESnRwQzVLZmkvdjluYzF3RmxVSW14YStVeTllajhUeWlWNG8xbVFLTklNY3dXd2diWlZxV1dPOG1jCmhHckF2MUhMRnFVOUVNRmR4OFZXMW5SN01qeVJPTU8wTlJIQXBDV1A4SmEwQXJlaTFDcmJSSStHcG8xb2cyZlUKVXhxcXB1ZVNIVkFtOTJVb2VpNk9DTDRiWFV2TXViU3lMaXh1cjJUUzZXclZiZUtPSVNCZ3VHdHNHeC90T3M2YgpFT1VYRXZhTG5PTTVXcHZjUzhrQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZLU3VHT1ZuRnArYU5ZQkZKVUVZTDB5Rm5HNEtNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBRm1iN0hqMEJ6TVBmRlZnd0ZYOQpKUWUyVkl0aDlaaWVSZEFaVkU2OWlnQkRMWldmOWo4TXRGTWRUQkM3SlNxeWpETXVURDNXdDJPWnNnTXZBZ0syCjVLcU40VGk4QysyWCtFa2JZUjNvOFJVemZON3ViNlFscUZNR1cwSzFrajlOYTlpZlBWRE1qZUY3dVpiYnFkcXcKRFlmdGczOW9tSjI5dEdCekFScmNRdkwzQXViN2lKOU5IL1dZcGFvcXZpaURGYnU1ODRObThpWjkvSXI2c1JCTQo2aExGakZiTGFCbC8vWmdsRjRKNWdOT1J5RGpxVUYwMkx0SXZRdG8ySTJEYm1GbVVuK2VsUTFGT1VFa0hJaWtVCmJFNEFTbUFoSnVvNHBQVlY1a0s2UGpBZDBWOVVDNkxZNFdnd2p4TllIdWM1eEhHRFYwM1BqQTdWOTJFYmxXM3UKTEJBPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
+ server: https://192.168.80.250:6443
+ name: kubernetes
+contexts:
+- context:
+ cluster: kubernetes
+ user: kubernetes-admin
+ name: kubernetes-admin@kubernetes
+current-context: kubernetes-admin@kubernetes
+kind: Config
+preferences: {}
+users:
+- name: kubernetes-admin
+ user:
+ client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJVENDQWdtZ0F3SUJBZ0lJUFUvMFNDVUV6RkV3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TXpBNU1UZ3dPREF3TWpkYUZ3MHlOREE1TVRjd09EQXdNekJhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXRSM25wbW5jMGVPaUtXeG8KSFN1alFpRXNHZXZSSHkwSTFMUkJuc0x0WS91ZE9EQ256WWplTDBFeGtzTllqR2xzTEUvOUxEbE9VY3BUd0dCZwpxVnRaYnBzNUo2RjNZeElVUkdLSUo1N3AzSklGNmNNa0dNbmorOFoxYlRmQ09wd3JWVUdaTUhlSmFwOWljWUZ0CncvdjRXUnkxQ09laWZLRDBRVU9lRVlXL00ySC9JWDhLbzErdGNINHF6RXpmZlRkVE9qZmFNM1NZSWxqMXZSRmoKZDhWeGFOMjBDWUFQa1QrcXJuaTNCV2FGKzl1cDZ1S0tKcmd0dmJvTkZia0lORy9xcTVsWVlhdy9ZYTN4ZGFmNApyeGM3bCtWdDAxU2Jndmx4UllmeWdrS3BEMzZqUWIvaFNkV0FHb28yOFFOYVMwS1JHMUJLaDVpcjIxV1dRMjI1CmxkQjVpd0lEQVFBQm8xWXdWREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JTa3JoamxaeGFmbWpXQVJTVkJHQzlNaFp4dQpDakFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBYzR0ZnRIbTdUb2dYQm8xWm1hakxwWkRMZGdtdy9tNGcxeG0xCkhmVG9McC84YkloV2xOdHZxYzI1U1Z2ZEVyMFpveHRhdC80M1JvRTZBZHlxTUNqeU93QnQ1ejZQY3FqeUNQZDcKbDJBMjY0a2tpLy9DYjdYVnlOTWZGb0tpK1JHdlZuWENkQVBQWTE3U1dtNk1aSnJ2NGxaTTkwOXVjb0Npc1lOTwp0RTBVS0gzRHZaSlVGd2FCU0JZdTZPT0ZtTmJ5dlRnbE9KdzRWaW5rUTFTN0dOcTU3VHNSbDA4V1ZUZFI1bStJCkkvNGFBWk5IeklaMVNKWldia2tydDdmUmg3RGhZUFNiNVd3SjhhMi9KSzltN09GVHlFeEljUnIxUHNEQVd0VHIKd1Y4SDZ0b2VNK1ZjU2FTQVdrZHZhMklMWlZ5cU9sSWdKN0Exb1p2Z2QxUTBGemttWGc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
+ client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBdFIzbnBtbmMwZU9pS1d4b0hTdWpRaUVzR2V2Ukh5MEkxTFJCbnNMdFkvdWRPRENuCnpZamVMMEV4a3NOWWpHbHNMRS85TERsT1VjcFR3R0JncVZ0WmJwczVKNkYzWXhJVVJHS0lKNTdwM0pJRjZjTWsKR01uais4WjFiVGZDT3B3clZVR1pNSGVKYXA5aWNZRnR3L3Y0V1J5MUNPZWlmS0QwUVVPZUVZVy9NMkgvSVg4SwpvMSt0Y0g0cXpFemZmVGRUT2pmYU0zU1lJbGoxdlJGamQ4VnhhTjIwQ1lBUGtUK3FybmkzQldhRis5dXA2dUtLCkpyZ3R2Ym9ORmJrSU5HL3FxNWxZWWF3L1lhM3hkYWY0cnhjN2wrVnQwMVNiZ3ZseFJZZnlna0twRDM2alFiL2gKU2RXQUdvbzI4UU5hUzBLUkcxQktoNWlyMjFXV1EyMjVsZEI1aXdJREFRQUJBb0lCQUcrMW9EbU9OZnJuTWcvbQpvMWpLbjRtY09EMU1xOUZVejR1U0dwODNqNEFKbzNFUzRZenVERXlOSTJ4b1BHZHVacC8yUGl0V3RTM1JZMUxXCm53Z1dKZFVJdkhuckoxM2R1czVyRDV1UWNxSHZGdUtWejkvU05tSUdpRXJ6QjBOckszY01YTlgrQkszVTNMMHgKbldneU0zMlRMVDN1ME5PWjJzMmVUUkoxc2ZhWVFnOGN3S0tXQjJhUWwwSXE5T1o3RnAxTWpLTTlpSm5SdmNtRApmRDQrdTd0T3QrZ0pjSEcxRXhnUEJ1VnJBbVZzNTRaY2xydzNUUHVzQmt5SHJ3NnpwR1Vmak84cU15czhaSUJpCjVmR3pQNnVFb0xDUXplelhOb1JTWUppV3plWjFjYTQ1LzEvS05PNkhqL2R5aTFFbmFvdHk2ZWljRGh6Z09GTEQKWVo4S2FrRUNnWUVBdzQ0aWsrYTFFbXN1Nzc1RCtYRGxZc1Vjc2srRVkrMERlV1pCOHZtamttL1htd3l0aEk3YwpXOGplRkFJRnhGdDVBMjFmUm16Z25Xc2RXTytqVlNoTGtSS2MvZ2lwVDBYNjZaakxydDdHU3N0RVVLMDJQUW9FCm82YmtNTUVtQkcvRjN3V0ZaK0hlYmpBb1pRUXR1K0c0T2lIOVZ3Y1AzT0lVSFl5Wmp3MC8xanNDZ1lFQTdSbEsKVFVWL1BHbHY3SGZGc2w5MlVxVUtlWUJLZHlxM284L3VSVnBPUGM0NzVZdHY3cWs3ZmlLaFBoanZ6SUVRMjRxcwpJY21qQkJ6TWkzWDcvVUhiQ3ZKLzFGaDhKeWpwdzRYU0xiRExjT293MW4wRFhIa3p6MDZvbHUrV3JqRVNqcndQCk9KczNSWEFNRkFaSEo4Rnd1dkd3akF2VG93Y3pCamQxSnp6T3BQRUNnWUVBcTloZ2pHWUZvc3pycnlRYzZidUsKNkRQZEZ0aUh0NlFWRkg2RzM3VCtSL1hlbXRNV1BGR3FWVUg3M2NzMU5DdEorWGdJYklQTEVxcUpYSmtoaC9qawpCdlU3WG9WUCsyaThxTUJyNVJ6QlZnS3Y0dEdEd3pubGY4WnQ2bVloT1c3YmpETjIyeVlDYUFRTHkxRlR1eklNCnlaRUs1b28vSjFUSVpuc2lpaklUY2ZjQ2dZQnRsaG5jbXptUzM1em1xL2xKVjR3bDZMaHRyaGs4K1JhRjlXNUkKeWRPT0I0dHhya0FmREVNK0twMWlTVDVVeEhiWk1rQWJJY3BoOVRtcFgvQUhmMi9Sb0lIcTBMY1psblJ0ZGFYTwpqbitKZ2dUdXJqdG1lM0t4NThaT2RSQ3RGR3RpWTVPWmMweFpvTTlFVmg1L1JvNW5LWnlUTGtnMGV2aGZjRFNLCjNUVVowUUtCZ0JvblpOL1FVeVBDUFFhdHB3Wm5MYTBXaWNQd3Q2TEJ4N0ZWYm1UbUxhZG5hOVNTMDB6azlKbFkKSm1KdG9ralU2U0o1TjlyUEM0SStxNmIxTDFCRDRpM2ZWVGdvRzNGL1R4MWhLVk5JVndkU3ZOVy9GUnlRYlp0SgpGMUJSVlB6Ni9MckhYeU5rMW1HcmZiTFhlNWlyaFVaczgycVREMnkrM2VxZk50aHFuUlpNCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
diff --git a/cicd/k8s-calico-ipvs3/configs/join.sh b/cicd/k8s-calico-ipvs3/configs/join.sh
new file mode 100755
index 000000000..bda95b5f6
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/configs/join.sh
@@ -0,0 +1 @@
+kubeadm join 192.168.80.250:6443 --token xznj68.toygwobued5ivlwv --discovery-token-ca-cert-hash sha256:bfd28724fb202d1b1780fe03d9a265a5c9bd656b0574132b026d4ae770b8b8e1
diff --git a/cicd/k8s-calico-ipvs3/index1.html b/cicd/k8s-calico-ipvs3/index1.html
new file mode 100644
index 000000000..fe19c9655
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/index1.html
@@ -0,0 +1,24 @@
+
+
+
+Welcome to tcp-ss-0!
+
+
+
+Welcome to nginx!
+If you see this page, the nginx web server is successfully installed and
+working. Further configuration is required.
+
+For online documentation and support please refer to
+nginx.org.
+Commercial support is available at
+nginx.com.
+
+Thank you for using nginx.
+
+
+
diff --git a/cicd/k8s-calico-ipvs3/index2.html b/cicd/k8s-calico-ipvs3/index2.html
new file mode 100644
index 000000000..47aaa6bdb
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/index2.html
@@ -0,0 +1,24 @@
+
+
+
+Welcome to tcp-ss-1!
+
+
+
+Welcome to nginx!
+If you see this page, the nginx web server is successfully installed and
+working. Further configuration is required.
+
+For online documentation and support please refer to
+nginx.org.
+Commercial support is available at
+nginx.com.
+
+Thank you for using nginx.
+
+
+
diff --git a/cicd/k8s-calico-ipvs3/input b/cicd/k8s-calico-ipvs3/input
new file mode 100644
index 000000000..6fb66a5e2
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/input
@@ -0,0 +1,6 @@
+
+
+
+
+
+
diff --git a/cicd/k8s-calico-ipvs3/node_scripts/common.sh b/cicd/k8s-calico-ipvs3/node_scripts/common.sh
new file mode 100644
index 000000000..b8634194f
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/node_scripts/common.sh
@@ -0,0 +1,93 @@
+#!/bin/bash
+#
+# Common setup for all servers (Control Plane and Nodes)
+
+set -euxo pipefail
+
+# Variable Declaration
+
+# DNS Setting
+if [ ! -d /etc/systemd/resolved.conf.d ]; then
+ sudo mkdir /etc/systemd/resolved.conf.d/
+fi
+cat </dev/null; echo "@reboot /sbin/swapoff -a") | crontab - || true
+sudo apt-get update -y
+# Install CRI-O Runtime
+
+VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')"
+
+# Create the .conf file to load the modules at bootup
+cat <> /etc/default/crio << EOF
+${ENVIRONMENT}
+EOF
+sudo systemctl daemon-reload
+sudo systemctl enable crio --now
+
+echo "CRI runtime installed successfully"
+
+sudo apt-get update
+sudo apt-get install -y apt-transport-https ca-certificates curl
+curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-archive-keyring.gpg
+
+echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
+sudo apt-get update -y
+sudo apt-get install -y kubelet="$KUBERNETES_VERSION" kubectl="$KUBERNETES_VERSION" kubeadm="$KUBERNETES_VERSION"
+sudo apt-get update -y
+sudo apt-get install -y jq
+sudo apt-get install -y ipvsadm
+
+local_ip="$(ip --json a s | jq -r '.[] | if .ifname == "eth1" then .addr_info[] | if .family == "inet" then .local else empty end else empty end')"
+cat > /etc/default/kubelet << EOF
+KUBELET_EXTRA_ARGS=--node-ip=$local_ip
+${ENVIRONMENT}
+EOF
diff --git a/cicd/k8s-calico-ipvs3/node_scripts/host.sh b/cicd/k8s-calico-ipvs3/node_scripts/host.sh
new file mode 100755
index 000000000..056354353
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/node_scripts/host.sh
@@ -0,0 +1,13 @@
+# Install Bird to work with k3s
+sudo apt install bird2 --yes
+
+sleep 5
+
+sudo cp -f /vagrant/bird_config/bird.conf /etc/bird/bird.conf
+if [ ! -f /var/log/bird.log ]; then
+ sudo touch /var/log/bird.log
+fi
+sudo chown bird:bird /var/log/bird.log
+sudo service bird start
+
+echo "Host is up"
diff --git a/cicd/k8s-calico-ipvs3/node_scripts/loxilb.sh b/cicd/k8s-calico-ipvs3/node_scripts/loxilb.sh
new file mode 100644
index 000000000..8c7e2cea3
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/node_scripts/loxilb.sh
@@ -0,0 +1,9 @@
+export LOXILB_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep -v '192.168.80' | awk '{print $2}' | cut -f1 -d '/')
+
+apt-get update
+apt-get install -y software-properties-common
+curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
+apt-get update
+apt-get install -y docker-ce
+docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --net=host --name loxilb ghcr.io/loxilb-io/loxilb:latest -b
diff --git a/cicd/k8s-calico-ipvs3/node_scripts/master.sh b/cicd/k8s-calico-ipvs3/node_scripts/master.sh
new file mode 100644
index 000000000..41793b5fa
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/node_scripts/master.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+#
+# Setup for Control Plane (Master) servers
+
+set -euxo pipefail
+
+NODENAME=$(hostname -s)
+
+sudo kubeadm config images pull
+
+echo "Preflight Check Passed: Downloaded All Required Images"
+
+#sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap
+sudo kubeadm init --ignore-preflight-errors Swap --config /vagrant/yaml/kubeadm-config.yaml
+
+mkdir -p "$HOME"/.kube
+sudo cp -i /etc/kubernetes/admin.conf "$HOME"/.kube/config
+sudo chown "$(id -u)":"$(id -g)" "$HOME"/.kube/config
+
+# Save Configs to shared /Vagrant location
+
+# For Vagrant re-runs, check if there is existing configs in the location and delete it for saving new configuration.
+
+config_path="/vagrant/configs"
+
+if [ -d $config_path ]; then
+ rm -f $config_path/*
+else
+ mkdir -p $config_path
+fi
+
+cp -i /etc/kubernetes/admin.conf $config_path/config
+touch $config_path/join.sh
+chmod +x $config_path/join.sh
+
+kubeadm token create --print-join-command > $config_path/join.sh
+
+# Install Calico Network Plugin
+
+curl https://raw.githubusercontent.com/projectcalico/calico/v${CALICO_VERSION}/manifests/calico.yaml -O
+
+kubectl apply -f calico.yaml
+
+sudo -i -u vagrant bash << EOF
+whoami
+mkdir -p /home/vagrant/.kube
+sudo cp -i $config_path/config /home/vagrant/.kube/
+sudo chown 1000:1000 /home/vagrant/.kube/config
+EOF
+
+# Install Metrics Server
+
+kubectl apply -f https://raw.githubusercontent.com/techiescamp/kubeadm-scripts/main/manifests/metrics-server.yaml
+
+# Install loxilb
+kubectl apply -f /vagrant/yaml/kube-loxilb.yml
diff --git a/cicd/k8s-calico-ipvs3/node_scripts/worker.sh b/cicd/k8s-calico-ipvs3/node_scripts/worker.sh
new file mode 100644
index 000000000..a5754170b
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/node_scripts/worker.sh
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Setup for Node servers
+
+set -euxo pipefail
+
+config_path="/vagrant/configs"
+
+/bin/bash $config_path/join.sh -v
+
+sudo -i -u vagrant bash << EOF
+whoami
+mkdir -p /home/vagrant/.kube
+sudo cp -i $config_path/config /home/vagrant/.kube/
+sudo chown 1000:1000 /home/vagrant/.kube/config
+NODENAME=$(hostname -s)
+kubectl label node $(hostname -s) node-role.kubernetes.io/worker=worker
+EOF
diff --git a/cicd/k8s-calico-ipvs3/rmconfig.sh b/cicd/k8s-calico-ipvs3/rmconfig.sh
new file mode 100755
index 000000000..6cadc7e4e
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/rmconfig.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+vagrant destroy -f worker2
+vagrant destroy -f worker1
+vagrant destroy -f master
+vagrant destroy -f llb1
+vagrant destroy -f llb2
diff --git a/cicd/k8s-calico-ipvs3/validation.sh b/cicd/k8s-calico-ipvs3/validation.sh
new file mode 100755
index 000000000..2e49fad1c
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/validation.sh
@@ -0,0 +1,123 @@
+#!/bin/bash
+source ../common.sh
+echo k8s-calico-ipvs2
+
+if [ "$1" ]; then
+ KUBECONFIG="$1"
+fi
+
+# Set space as the delimiter
+IFS=' '
+
+for((i=0; i<120; i++))
+do
+ extLB=$(vagrant ssh master -c 'kubectl get svc' 2> /dev/null | grep "tcp-lb-fullnat")
+ read -a strarr <<< "$extLB"
+ len=${#strarr[*]}
+ if [[ $((len)) -lt 6 ]]; then
+ echo "Can't find tcp-lb service"
+ sleep 1
+ continue
+ fi
+ if [[ ${strarr[3]} != *"none"* ]]; then
+ extIP="$(cut -d'-' -f2 <<<${strarr[3]})"
+ break
+ fi
+ echo "No external LB allocated"
+ sleep 1
+done
+
+## Any routing updates ??
+sleep 30
+
+vagrant ssh master -c 'kubectl cp /vagrant/index1.html tcp-ss-0:/usr/share/nginx/html/index.html'
+vagrant ssh master -c 'kubectl cp /vagrant/index2.html tcp-ss-1:/usr/share/nginx/html/index.html'
+
+echo Service IP : $extIP
+echo -e "\nEnd Points List"
+echo "******************************************************************************"
+vagrant ssh master -c 'kubectl get endpoints -A' 2> /dev/null
+echo "******************************************************************************"
+echo -e "\nSVC List"
+echo "******************************************************************************"
+vagrant ssh master -c 'kubectl get svc' 2> /dev/null
+echo "******************************************************************************"
+echo -e "\nPod List"
+echo "******************************************************************************"
+vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null
+echo "******************************************************************************"
+echo -e "\nLB List"
+echo "******************************************************************************"
+vagrant ssh llb1 -c 'sudo docker exec -it loxilb loxicmd get lb -o wide' 2> /dev/null
+echo "******************************************************************************"
+echo -e "\nEP List"
+echo "******************************************************************************"
+vagrant ssh llb1 -c 'sudo docker exec -it loxilb loxicmd get ep -o wide' 2> /dev/null
+echo "******************************************************************************"
+echo -e "\nTEST RESULTS"
+echo "******************************************************************************"
+mode=( "onearm" "fullnat" "onearm-ss" "fullnat-ss")
+tcp_port=( 56002 57002 58002 59002)
+udp_port=( 56003 57003 58003 59003)
+sctp_port=( 56004 57004 58004 59004)
+
+code=0
+for ((i=0;i<=1;i++)); do
+out=$(curl -s --connect-timeout 10 http://$extIP:${tcp_port[i]})
+if [[ ${out} == *"Welcome to nginx"* ]]; then
+ echo -e "K8s-calico-ipvs2 TCP\t(${mode[i]})\t[OK]"
+else
+ echo -e "K8s-calico-ipvs2 TCP\t(${mode[i]})\t[FAILED]"
+ ## Dump some debug info
+ echo "llb1 lb-info"
+ vagrant ssh loxilb -c 'sudo docker exec -it llb1 loxicmd get lb -o wide' 2> /dev/null
+ echo "llb1 route-info"
+ vagrant ssh loxilb -c 'sudo docker exec -it llb1 ip route' 2> /dev/null
+ code=1
+fi
+
+out=$(timeout 5 ../common/udp_client $extIP ${udp_port[i]})
+if [[ ${out} == *"Client"* ]]; then
+ echo -e "K8s-calico-ipvs2 UDP\t(${mode[i]})\t[OK]"
+else
+ echo -e "K8s-calico-ipvs2 UDP\t(${mode[i]})\t[FAILED]"
+ ## Dump some debug info
+ echo "llb1 lb-info"
+ vagrant ssh loxilb -c 'sudo docker exec -it llb1 loxicmd get lb -o wide' 2> /dev/null
+ echo "llb1 route-info"
+ vagrant ssh loxilb -c 'sudo docker exec -it llb1 ip route' 2> /dev/null
+ code=1
+fi
+
+done
+
+for ((i=2;i<=3;i++)); do
+out=$(curl -s --connect-timeout 10 http://$extIP:${tcp_port[i]})
+if [[ ${out} == *"Welcome to nginx"* ]]; then
+ echo -e "K8s-calico-ipvs2 TCP\t(${mode[i]})\t[OK]"
+else
+ echo -e "K8s-calico-ipvs2 TCP\t(${mode[i]})\t[FAILED]"
+ ## Dump some debug info
+ echo "llb1 lb-info"
+ vagrant ssh loxilb -c 'sudo docker exec -it llb1 loxicmd get lb -o wide' 2> /dev/null
+ echo "llb1 route-info"
+ vagrant ssh loxilb -c 'sudo docker exec -it llb1 ip route' 2> /dev/null
+ code=1
+fi
+
+out=$(timeout 5 ../common/udp_client $extIP ${udp_port[i]})
+if [[ ${out} == *"Client"* ]]; then
+ echo -e "K8s-calico-ipvs2 UDP\t(${mode[i]})\t[OK]"
+else
+ echo -e "K8s-calico-ipvs2 UDP\t(${mode[i]})\t[FAILED]"
+ ## Dump some debug info
+ echo "llb1 lb-info"
+ vagrant ssh loxilb -c 'sudo docker exec -it llb1 loxicmd get lb -o wide' 2> /dev/null
+ echo "llb1 route-info"
+ vagrant ssh loxilb -c 'sudo docker exec -it llb1 ip route' 2> /dev/null
+ code=1
+fi
+
+done
+
+exit $code
diff --git a/cicd/k8s-calico-ipvs3/yaml/kube-loxilb.yml b/cicd/k8s-calico-ipvs3/yaml/kube-loxilb.yml
new file mode 100644
index 000000000..18405bd9b
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/yaml/kube-loxilb.yml
@@ -0,0 +1,134 @@
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: kube-loxilb
+ namespace: kube-system
+---
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: kube-loxilb
+rules:
+ - apiGroups:
+ - ""
+ resources:
+ - nodes
+ verbs:
+ - get
+ - watch
+ - list
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - get
+ - watch
+ - list
+ - patch
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ - services
+ - services/status
+ verbs:
+ - get
+ - watch
+ - list
+ - patch
+ - update
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - get
+ - watch
+ - list
+ - apiGroups:
+ - authentication.k8s.io
+ resources:
+ - tokenreviews
+ verbs:
+ - create
+ - apiGroups:
+ - authorization.k8s.io
+ resources:
+ - subjectaccessreviews
+ verbs:
+ - create
+---
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1
+metadata:
+ name: kube-loxilb
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: kube-loxilb
+subjects:
+ - kind: ServiceAccount
+ name: kube-loxilb
+ namespace: kube-system
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: kube-loxilb
+ namespace: kube-system
+ labels:
+ app: loxilb
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: loxilb
+ template:
+ metadata:
+ labels:
+ app: loxilb
+ spec:
+ hostNetwork: true
+ tolerations:
+ - effect: NoSchedule
+ operator: Exists
+ # Mark the pod as a critical add-on for rescheduling.
+ - key: CriticalAddonsOnly
+ operator: Exists
+ - effect: NoExecute
+ operator: Exists
+ priorityClassName: system-node-critical
+ serviceAccountName: kube-loxilb
+ terminationGracePeriodSeconds: 0
+ containers:
+ - name: kube-loxilb
+ image: ghcr.io/loxilb-io/kube-loxilb:debug
+ imagePullPolicy: Always
+ command:
+ - /bin/kube-loxilb
+ args:
+ - --loxiURL=http://192.168.80.252:11111,http://192.168.80.253:11111
+ - --externalCIDR=20.20.20.1/32
+ #- --externalSecondaryCIDRs=124.124.124.1/24,125.125.125.1/24
+ #- --monitor
+ - --setBGP=64511
+ - --extBGPPeers=192.168.80.9:64512
+ #- --setRoles=0.0.0.0
+ #- --monitor
+ #- --setBGP
+ #- --setLBMode=1
+ #- --config=/opt/loxilb/agent/kube-loxilb.conf
+ resources:
+ requests:
+ cpu: "100m"
+ memory: "50Mi"
+ limits:
+ cpu: "100m"
+ memory: "50Mi"
+ securityContext:
+ privileged: true
+ capabilities:
+ add: ["NET_ADMIN", "NET_RAW"]
diff --git a/cicd/k8s-calico-ipvs3/yaml/kubeadm-config.yaml b/cicd/k8s-calico-ipvs3/yaml/kubeadm-config.yaml
new file mode 100644
index 000000000..31afe601c
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/yaml/kubeadm-config.yaml
@@ -0,0 +1,69 @@
+apiVersion: kubeadm.k8s.io/v1beta3
+bootstrapTokens:
+- groups:
+ - system:bootstrappers:kubeadm:default-node-token
+ ttl: 24h0m0s
+ usages:
+ - signing
+ - authentication
+kind: InitConfiguration
+localAPIEndpoint:
+ advertiseAddress: 192.168.80.250
+ bindPort: 6443
+nodeRegistration:
+ imagePullPolicy: IfNotPresent
+ name: master
+ taints: null
+---
+apiVersion: kubeadm.k8s.io/v1beta3
+certificatesDir: /etc/kubernetes/pki
+kind: ClusterConfiguration
+apiServer:
+ timeoutForControlPlane: 4m0s
+ certSANs:
+ - 192.168.80.250
+controlPlaneEndpoint: 192.168.80.250:6443
+clusterName: kubernetes
+controllerManager: {}
+dns: {}
+etcd:
+ local:
+ dataDir: /var/lib/etcd
+imageRepository: registry.k8s.io
+kubernetesVersion: v1.27.5
+networking:
+ dnsDomain: cluster.local
+ podSubnet: 172.16.1.0/16
+ serviceSubnet: 172.17.1.0/18
+scheduler: {}
+---
+apiVersion: kubeproxy.config.k8s.io/v1alpha1
+bindAddress: 0.0.0.0
+clientConnection:
+ acceptContentTypes: ""
+ burst: 10
+ contentType: application/vnd.kubernetes.protobuf
+ kubeconfig: /var/lib/kube-proxy/kubeconfig.conf
+ qps: 5
+clusterCIDR: ""
+configSyncPeriod: 15m0s
+#featureGates: "SupportIPVSProxyMode=true"
+mode: ipvs
+enableProfiling: false
+healthzBindAddress: 0.0.0.0:10256
+hostnameOverride: ""
+iptables:
+ masqueradeAll: false
+ masqueradeBit: 14
+ minSyncPeriod: 0s
+ syncPeriod: 30s
+ipvs:
+ excludeCIDRs: null
+ minSyncPeriod: 0s
+ scheduler: ""
+ syncPeriod: 30s
+kind: KubeProxyConfiguration
+metricsBindAddress: 127.0.0.1:10249
+nodePortAddresses: null
+oomScoreAdj: -999
+portRange: ""
diff --git a/cicd/k8s-calico-ipvs3/yaml/sctp.yml b/cicd/k8s-calico-ipvs3/yaml/sctp.yml
new file mode 100644
index 000000000..c9a7d4afd
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/yaml/sctp.yml
@@ -0,0 +1,41 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: sctp-lb-default
+ annotations:
+ loxilb.io/liveness: "yes"
+ loxilb.io/lbmode: "default"
+spec:
+ loadBalancerClass: loxilb.io/loxilb
+ externalTrafficPolicy: Local
+ selector:
+ what: sctp-default-test
+ ports:
+ - port: 55004
+ protocol: SCTP
+ targetPort: 9999
+ type: LoadBalancer
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: sctp-default-test
+ labels:
+ what: sctp-default-test
+spec:
+ containers:
+ - name: sctp-default-test
+ image: ghcr.io/loxilb-io/alpine-socat:latest
+ command: [ "sh", "-c"]
+ args:
+ - while true; do
+ socat -v -T2 sctp-l:9999,reuseaddr,fork system:"echo 'server1'; cat";
+ sleep 20;
+ done;
+ ports:
+ - containerPort: 9999
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
diff --git a/cicd/k8s-calico-ipvs3/yaml/sctp_fullnat.yml b/cicd/k8s-calico-ipvs3/yaml/sctp_fullnat.yml
new file mode 100644
index 000000000..6b43037a5
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/yaml/sctp_fullnat.yml
@@ -0,0 +1,39 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: sctp-lb-fullnat
+ annotations:
+ loxilb.io/num-secondary-networks: "2"
+ loxilb.io/liveness: "yes"
+ loxilb.io/lbmode: "fullnat"
+spec:
+ loadBalancerClass: loxilb.io/loxilb
+ externalTrafficPolicy: Local
+ selector:
+ what: sctp-fullnat-test
+ ports:
+ - port: 57004
+ protocol: SCTP
+ targetPort: 9999
+ type: LoadBalancer
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: sctp-fullnat-test
+ labels:
+ what: sctp-fullnat-test
+spec:
+ containers:
+ - name: sctp-fullnat-test
+ image: loxilbio/sctp-darn:latest
+ imagePullPolicy: Always
+ #command: ["/bin/sh", "-ec", "while :; do echo '.'; sleep 6 ; done"]
+ command: ["sctp_darn","-H", "0.0.0.0","-P", "9999", "-l"]
+ ports:
+ - containerPort: 9999
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
diff --git a/cicd/k8s-calico-ipvs3/yaml/sctp_onearm.yml b/cicd/k8s-calico-ipvs3/yaml/sctp_onearm.yml
new file mode 100644
index 000000000..b4b736962
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/yaml/sctp_onearm.yml
@@ -0,0 +1,41 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: sctp-lb-onearm
+ annotations:
+ loxilb.io/liveness: "yes"
+ loxilb.io/lbmode: "onearm"
+spec:
+ loadBalancerClass: loxilb.io/loxilb
+ externalTrafficPolicy: Local
+ selector:
+ what: sctp-onearm-test
+ ports:
+ - port: 56004
+ protocol: SCTP
+ targetPort: 9999
+ type: LoadBalancer
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: sctp-onearm-test
+ labels:
+ what: sctp-onearm-test
+spec:
+ containers:
+ - name: sctp-onearm-test
+ image: ghcr.io/loxilb-io/alpine-socat:latest
+ command: [ "sh", "-c"]
+ args:
+ - while true; do
+ socat -v -T2 sctp-l:9999,reuseaddr,fork system:"echo 'server1'; cat";
+ sleep 20;
+ done;
+ ports:
+ - containerPort: 9999
+ env:
+ - name: MY_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
diff --git a/cicd/k8s-calico-ipvs3/yaml/settings.yaml b/cicd/k8s-calico-ipvs3/yaml/settings.yaml
new file mode 100644
index 000000000..e5b02a60b
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/yaml/settings.yaml
@@ -0,0 +1,44 @@
+---
+# cluster_name is used to group the nodes in a folder within VirtualBox:
+cluster_name: Kubernetes Cluster
+# Uncomment to set environment variables for services such as crio and kubelet.
+# For example, configure the cluster to pull images via a proxy.
+# environment: |
+# HTTP_PROXY=http://my-proxy:8000
+# HTTPS_PROXY=http://my-proxy:8000
+# NO_PROXY=127.0.0.1,localhost,master-node,node01,node02,node03
+# All IPs/CIDRs should be private and allowed in /etc/vbox/networks.conf.
+network:
+ iloxilb_ip: 192.168.80.253
+ oloxilb_ip: 192.168.90.253
+ # Worker IPs are simply incremented from the control IP.
+ control_ip: 192.168.80.250
+ dns_servers:
+ - 8.8.8.8
+ - 1.1.1.1
+ pod_cidr: 172.16.1.0/16
+ service_cidr: 172.17.1.0/18
+nodes:
+ control:
+ cpu: 2
+ memory: 4096
+ workers:
+ count: 2
+ cpu: 1
+ memory: 2048
+# Mount additional shared folders from the host into each virtual machine.
+# Note that the project directory is automatically mounted at /vagrant.
+# shared_folders:
+# - host_path: ../images
+# vm_path: /vagrant/images
+software:
+ loxilb:
+ box:
+ name: sysnet4admin/Ubuntu-k8s
+ version: 0.7.1
+ cluster:
+ box: bento/ubuntu-22.04
+ calico: 3.26.0
+ # To skip the dashboard installation, set its version to an empty value or comment it out:
+ kubernetes: 1.27.1-00
+ os: xUbuntu_22.04
diff --git a/cicd/k8s-calico-ipvs3/yaml/tcp.yml b/cicd/k8s-calico-ipvs3/yaml/tcp.yml
new file mode 100644
index 000000000..8c8983403
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/yaml/tcp.yml
@@ -0,0 +1,29 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: tcp-lb-default
+ annotations:
+ loxilb.io/liveness: "yes"
+ loxilb.io/lbmode: "default"
+spec:
+ externalTrafficPolicy: Local
+ loadBalancerClass: loxilb.io/loxilb
+ selector:
+ what: tcp-default-test
+ ports:
+ - port: 55002
+ targetPort: 80
+ type: LoadBalancer
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: tcp-default-test
+ labels:
+ what: tcp-default-test
+spec:
+ containers:
+ - name: tcp-default-test
+ image: ghcr.io/loxilb-io/nginx:stable
+ ports:
+ - containerPort: 80
diff --git a/cicd/k8s-calico-ipvs3/yaml/tcp_deployment.yml b/cicd/k8s-calico-ipvs3/yaml/tcp_deployment.yml
new file mode 100644
index 000000000..2ba67f822
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/yaml/tcp_deployment.yml
@@ -0,0 +1,47 @@
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: tcp-lb-oa-ss
+spec:
+ selector:
+ matchLabels:
+ run: tcp-lb-oa-ss
+ replicas: 2
+ template:
+ metadata:
+ labels:
+ run: tcp-lb-oa-ss
+ spec:
+ containers:
+ - name: tcp-lb-oa-ss
+ image: ghcr.io/loxilb-io/nginx:stable
+ ports:
+ - containerPort: 80
+ volumeMounts:
+ - name: www
+ mountPath: /usr/share/nginx/html
+ volumeClaimTemplates:
+ - metadata:
+ name: www
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 1Gi
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: tcp-lb-oa-ss
+ annotations:
+ loxilb.io/liveness: "yes"
+ loxilb.io/lbmode: "onearm"
+spec:
+ externalTrafficPolicy: Local
+ loadBalancerClass: loxilb.io/loxilb
+ selector:
+ run: tcp-lb-oa-ss
+ ports:
+ - port: 65002
+ targetPort: 80
+ type: LoadBalancer
diff --git a/cicd/k8s-calico-ipvs3/yaml/tcp_fullnat.yml b/cicd/k8s-calico-ipvs3/yaml/tcp_fullnat.yml
new file mode 100644
index 000000000..3303ac35e
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/yaml/tcp_fullnat.yml
@@ -0,0 +1,29 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: tcp-lb-fullnat
+ annotations:
+ loxilb.io/liveness: "yes"
+ loxilb.io/lbmode: "fullnat"
+spec:
+ externalTrafficPolicy: Local
+ loadBalancerClass: loxilb.io/loxilb
+ selector:
+ what: tcp-fullnat-test
+ ports:
+ - port: 57002
+ targetPort: 80
+ type: LoadBalancer
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: tcp-fullnat-test
+ labels:
+ what: tcp-fullnat-test
+spec:
+ containers:
+ - name: tcp-fullnat-test
+ image: ghcr.io/loxilb-io/nginx:stable
+ ports:
+ - containerPort: 80
diff --git a/cicd/k8s-calico-ipvs3/yaml/tcp_onearm.yml b/cicd/k8s-calico-ipvs3/yaml/tcp_onearm.yml
new file mode 100644
index 000000000..b3d345483
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/yaml/tcp_onearm.yml
@@ -0,0 +1,29 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: tcp-lb-onearm
+ annotations:
+ loxilb.io/liveness: "yes"
+ loxilb.io/lbmode: "onearm"
+spec:
+ externalTrafficPolicy: Local
+ loadBalancerClass: loxilb.io/loxilb
+ selector:
+ what: tcp-onearm-test
+ ports:
+ - port: 56002
+ targetPort: 80
+ type: LoadBalancer
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: tcp-onearm-test
+ labels:
+ what: tcp-onearm-test
+spec:
+ containers:
+ - name: tcp-onearm-test
+ image: ghcr.io/loxilb-io/nginx:stable
+ ports:
+ - containerPort: 80
diff --git a/cicd/k8s-calico-ipvs3/yaml/tcp_ss_oa.yml b/cicd/k8s-calico-ipvs3/yaml/tcp_ss_oa.yml
new file mode 100644
index 000000000..e4aa06fea
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/yaml/tcp_ss_oa.yml
@@ -0,0 +1,42 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: tcp-lb-oa-ss
+ labels:
+ app: tcp-lb-oa-ss
+ annotations:
+ loxilb.io/liveness: "yes"
+ loxilb.io/lbmode: "onearm"
+spec:
+ externalTrafficPolicy: Local
+ loadBalancerClass: loxilb.io/loxilb
+ selector:
+ app: tcp-lb-oa-ss
+ ports:
+ - port: 58002
+ targetPort: 80
+ name: tcp-ss
+ type: LoadBalancer
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: tcp-ss
+spec:
+ selector:
+ matchLabels:
+ app: tcp-lb-oa-ss
+ serviceName: "tcp-lb-oa-ss"
+ replicas: 2
+ template:
+ metadata:
+ labels:
+ app: tcp-lb-oa-ss
+ spec:
+ terminationGracePeriodSeconds: 10
+ containers:
+ - name: tcp-lb-oa-ss
+ image: ghcr.io/loxilb-io/nginx:stable
+ ports:
+ - containerPort: 80
+ name: tcp-ss
diff --git a/cicd/k8s-calico-ipvs3/yaml/udp.yml b/cicd/k8s-calico-ipvs3/yaml/udp.yml
new file mode 100644
index 000000000..ac6ef997d
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/yaml/udp.yml
@@ -0,0 +1,30 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: udp-lb-default
+ annotations:
+ loxilb.io/liveness: "yes"
+ loxilb.io/lbmode: "default"
+spec:
+ loadBalancerClass: loxilb.io/loxilb
+ externalTrafficPolicy: Local
+ selector:
+ what: udp-default-test
+ ports:
+ - port: 55003
+ protocol: UDP
+ targetPort: 33333
+ type: LoadBalancer
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: udp-default-test
+ labels:
+ what: udp-default-test
+spec:
+ containers:
+ - name: udp-default-test
+ image: ghcr.io/loxilb-io/udp-echo:latest
+ ports:
+ - containerPort: 33333
diff --git a/cicd/k8s-calico-ipvs3/yaml/udp_fullnat.yml b/cicd/k8s-calico-ipvs3/yaml/udp_fullnat.yml
new file mode 100644
index 000000000..67b729019
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/yaml/udp_fullnat.yml
@@ -0,0 +1,30 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: udp-lb-fullnat
+ annotations:
+ loxilb.io/liveness: "yes"
+ loxilb.io/lbmode: "fullnat"
+spec:
+ loadBalancerClass: loxilb.io/loxilb
+ externalTrafficPolicy: Local
+ selector:
+ what: udp-fullnat-test
+ ports:
+ - port: 57003
+ protocol: UDP
+ targetPort: 33333
+ type: LoadBalancer
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: udp-fullnat-test
+ labels:
+ what: udp-fullnat-test
+spec:
+ containers:
+ - name: udp-fullnat-test
+ image: ghcr.io/loxilb-io/udp-echo:latest
+ ports:
+ - containerPort: 33333
diff --git a/cicd/k8s-calico-ipvs3/yaml/udp_onearm.yml b/cicd/k8s-calico-ipvs3/yaml/udp_onearm.yml
new file mode 100644
index 000000000..833187e73
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/yaml/udp_onearm.yml
@@ -0,0 +1,30 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: udp-lb-onearm
+ annotations:
+ loxilb.io/liveness: "yes"
+ loxilb.io/lbmode: "onearm"
+spec:
+ loadBalancerClass: loxilb.io/loxilb
+ externalTrafficPolicy: Local
+ selector:
+ what: udp-onearm-test
+ ports:
+ - port: 56003
+ protocol: UDP
+ targetPort: 33333
+ type: LoadBalancer
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: udp-onearm-test
+ labels:
+ what: udp-onearm-test
+spec:
+ containers:
+ - name: udp-onearm-test
+ image: ghcr.io/loxilb-io/udp-echo:latest
+ ports:
+ - containerPort: 33333
diff --git a/cicd/k8s-calico-ipvs3/yaml/udp_ss_oa.yml b/cicd/k8s-calico-ipvs3/yaml/udp_ss_oa.yml
new file mode 100644
index 000000000..514f2e9bd
--- /dev/null
+++ b/cicd/k8s-calico-ipvs3/yaml/udp_ss_oa.yml
@@ -0,0 +1,43 @@
+apiVersion: v1
+kind: Service
+metadata:
+ name: udp-lb-oa-ss
+ labels:
+ app: udp-lb-oa-ss
+ annotations:
+ loxilb.io/liveness: "yes"
+ loxilb.io/lbmode: "onearm"
+spec:
+ externalTrafficPolicy: Local
+ loadBalancerClass: loxilb.io/loxilb
+ selector:
+ app: udp-lb-oa-ss
+ ports:
+ - port: 58003
+ protocol: UDP
+ targetPort: 33333
+ name: udp-ss
+ type: LoadBalancer
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: udp-ss
+spec:
+ selector:
+ matchLabels:
+ app: udp-lb-oa-ss
+ serviceName: "udp-lb-oa-ss"
+ replicas: 2
+ template:
+ metadata:
+ labels:
+ app: udp-lb-oa-ss
+ spec:
+ terminationGracePeriodSeconds: 10
+ containers:
+ - name: udp-lb-oa-ss
+ image: ghcr.io/loxilb-io/udp-echo:latest
+ ports:
+ - containerPort: 33333
+ name: udp-ss