diff --git a/cicd/k3s-flannel-incluster-l2/README b/cicd/k3s-flannel-incluster-l2/README new file mode 100644 index 000000000..ff714ec1e --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/README @@ -0,0 +1,7 @@ +## Test Case Description + +This scenario will have K3s(2 Master Nodes & 2 Worker Nodes) cluster with flannel CNI. LoxiLB will be running in the in-cluster Active-Backup High Availabity mode(in both the master nodes) but without State Syncronization. Workloads will be spawned in all the cluster nodes. + +Client will be connected directly to the cluster with L2 network. Service CIDR will also be a Virtual IP from the K3s cluster network. + +In in-cluster scenarios, it is advised to create LB services in either one-arm or fullnat mode for ease of connectivity. diff --git a/cicd/k3s-flannel-incluster-l2/Vagrantfile b/cicd/k3s-flannel-incluster-l2/Vagrantfile new file mode 100644 index 000000000..2132c4936 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/Vagrantfile @@ -0,0 +1,60 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +workers = (ENV['WORKERS'] || "2").to_i +box_name = (ENV['VAGRANT_BOX'] || "sysnet4admin/Ubuntu-k8s") +box_version = "0.7.1" +Vagrant.configure("2") do |config| + config.vm.box = "#{box_name}" + config.vm.box_version = "#{box_version}" + + if Vagrant.has_plugin?("vagrant-vbguest") + config.vbguest.auto_update = false + end + + config.vm.define "host" do |host| + host.vm.hostname = 'host1' + host.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0" + host.vm.network :private_network, ip: "192.168.90.9", :netmask => "255.255.255.0" + host.vm.provision :shell, :path => "host.sh" + host.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 2048] + vbox.customize ["modifyvm", :id, "--cpus", 1] + end + end + + config.vm.define "master1" do |master| + master.vm.hostname = 'master1' + master.vm.network :private_network, ip: "192.168.90.10", :netmask => "255.255.255.0" + master.vm.network :private_network, ip: "192.168.80.10", :netmask => "255.255.255.0" + master.vm.provision :shell, :path => "master1.sh" + master.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 8192] + vbox.customize ["modifyvm", :id, "--cpus", 4] + end + end + + config.vm.define "master2" do |master| + master.vm.hostname = 'master2' + master.vm.network :private_network, ip: "192.168.90.11", :netmask => "255.255.255.0" + master.vm.network :private_network, ip: "192.168.80.11", :netmask => "255.255.255.0" + master.vm.provision :shell, :path => "master2.sh" + master.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 8192] + vbox.customize ["modifyvm", :id, "--cpus", 4] + end + end + + (1..workers).each do |node_number| + config.vm.define "worker#{node_number}" do |worker| + worker.vm.hostname = "worker#{node_number}" + ip = node_number + 100 + worker.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0" + worker.vm.provision :shell, :path => "worker.sh" + worker.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 4096] + vbox.customize ["modifyvm", :id, "--cpus", 2] + end + end + end +end diff --git a/cicd/k3s-flannel-incluster-l2/config.sh b/cicd/k3s-flannel-incluster-l2/config.sh new file mode 100755 index 000000000..b0cfb3651 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/config.sh @@ -0,0 +1,7 @@ +#!/bin/bash +vagrant global-status | grep -i virtualbox | cut -f 1 -d ' ' | xargs -L 1 vagrant destroy -f +vagrant up +#sudo ip route add 123.123.123.1 via 192.168.90.10 || true +vagrant ssh master1 -c 'sudo kubectl create -f /vagrant/tcp-onearm-ds.yml' +vagrant ssh master1 -c 'sudo kubectl create -f /vagrant/udp-onearm-ds.yml' +vagrant ssh master1 -c 'sudo kubectl create -f /vagrant/sctp-onearm-ds.yml' diff --git a/cicd/k3s-flannel-incluster-l2/host.sh b/cicd/k3s-flannel-incluster-l2/host.sh new file mode 100755 index 000000000..32cc4275a --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/host.sh @@ -0,0 +1,3 @@ +sudo apt install lksctp-tools +sudo ip route add 123.123.123.0/24 via 192.168.90.10 +echo "Host is up" diff --git a/cicd/k3s-flannel-incluster-l2/host_validation.sh b/cicd/k3s-flannel-incluster-l2/host_validation.sh new file mode 100755 index 000000000..56b4fb193 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/host_validation.sh @@ -0,0 +1,53 @@ +#!/bin/bash +extIP=$(cat /vagrant/extIP) + +mode="onearm" +tcp_port=55001 +udp_port=55002 +sctp_port=55003 + +code=0 +echo Service IP: $extIP + +ip route list match $extIP | grep $extIP -A 2 + +echo -e "\n*********************************************" +echo "Testing Service" +echo "*********************************************" +for((i=0;i<20;i++)) +do + +out=$(curl -s --connect-timeout 10 http://$extIP:$tcp_port) +if [[ ${out} == *"Welcome to nginx"* ]]; then + echo -e "K3s-flannel-incluster-l2 TCP\t($mode)\t[OK]" +else + echo -e "K3s-flannel-incluster-l2 TCP\t($mode)\t[FAILED]" + code=1 +fi + +out=$(timeout 5 /vagrant/udp_client $extIP $udp_port) +if [[ ${out} == *"Client"* ]]; then + echo -e "K3s-flannel-incluster-l2 UDP\t($mode)\t[OK]" +else + echo -e "K3s-flannel-incluster-l2 UDP\t($mode)\t[FAILED]" + code=1 +fi + +sctp_darn -H 192.168.80.9 -h $extIP -p $sctp_port -s < /vagrant/input > output +#sleep 2 +exp="New connection, peer addresses +192.168.80.200:55003" + +res=`cat output | grep -A 1 "New connection, peer addresses"` +sudo rm -rf output +if [[ "$res" == "$exp" ]]; then + #echo $res + echo -e "K3s-flannel-incluster-l2 SCTP\t($mode)\t[OK]" +else + echo -e "K3s-flannel-incluster-l2 SCTP\t($mode)\t[FAILED]" + code=1 +fi + + +done +exit $code diff --git a/cicd/k3s-flannel-incluster-l2/input b/cicd/k3s-flannel-incluster-l2/input new file mode 100644 index 000000000..6fb66a5e2 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/input @@ -0,0 +1,6 @@ + + + + + + diff --git a/cicd/k3s-flannel-incluster-l2/k3s.yaml b/cicd/k3s-flannel-incluster-l2/k3s.yaml new file mode 100644 index 000000000..f18efb521 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/k3s.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkekNDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdGMyVnkKZG1WeUxXTmhRREUyT1RnM01UWTBPREV3SGhjTk1qTXhNRE14TURFME1USXhXaGNOTXpNeE1ESTRNREUwTVRJeApXakFqTVNFd0h3WURWUVFEREJock0zTXRjMlZ5ZG1WeUxXTmhRREUyT1RnM01UWTBPREV3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFUcE0zVW11N0J3M1pVMzNwS3YrS0dwTHUwUXkvSllISUQrNVVqcWM4NGcKTnJudHZTcVdISmJEUExtWWhNVng5S3FiU0I3dU9HMmVvVGN2dzEwY1Z0RVRvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVWx4a2ZxcXp0dzhHMHcvb2VFb0EzCmZjejh0eDB3Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUlnT2hUamlVYnU4TDl2YmNISlpTTWFPR3FsWlgwZ205dm0KWncxZ1hBV0VvTFlDSVFDbWpJQ1FSRzJUWnhpdldJUVhrVERhekJLbHZJTWdPWkk3bFlCTTJvVFd6dz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + server: https://192.168.80.10:6443 + name: default +contexts: +- context: + cluster: default + user: default + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: default + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJrRENDQVRlZ0F3SUJBZ0lJYUxFcUF6cEl5aGt3Q2dZSUtvWkl6ajBFQXdJd0l6RWhNQjhHQTFVRUF3d1kKYXpOekxXTnNhV1Z1ZEMxallVQXhOams0TnpFMk5EZ3hNQjRYRFRJek1UQXpNVEF4TkRFeU1Wb1hEVEkwTVRBegpNREF4TkRFeU1Wb3dNREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhGVEFUQmdOVkJBTVRESE41CmMzUmxiVHBoWkcxcGJqQlpNQk1HQnlxR1NNNDlBZ0VHQ0NxR1NNNDlBd0VIQTBJQUJBYnR3WEQvcWt2ajhrU0kKZlIzZXNWUGRDQnpyYU4zV1hrS3NvOTZhWnFBcUFiOHdkRlFPRnZIdTlJSEgyK2dEY0N0MXJOWC9TK1FNcFVlWgpmbEUremRtalNEQkdNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUtCZ2dyQmdFRkJRY0RBakFmCkJnTlZIU01FR0RBV2dCUWNNaW1IbFdCOTdjTE5YTW92OExpc2ZwemVvakFLQmdncWhrak9QUVFEQWdOSEFEQkUKQWlBSy9TVEJ0V0VJblpGNVF0Zkx1dVRQZ0pXZ3BvL2JCbThwNXhvTXRJN3JKd0lnRXZ3MkdOaVY5QmRtR1lLTwpmVk5lMlE2YVZwdW1hTTZ5eEFaZjdTRW1hV2c9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlRENDQVIyZ0F3SUJBZ0lCQURBS0JnZ3Foa2pPUFFRREFqQWpNU0V3SHdZRFZRUUREQmhyTTNNdFkyeHAKWlc1MExXTmhRREUyT1RnM01UWTBPREV3SGhjTk1qTXhNRE14TURFME1USXhXaGNOTXpNeE1ESTRNREUwTVRJeApXakFqTVNFd0h3WURWUVFEREJock0zTXRZMnhwWlc1MExXTmhRREUyT1RnM01UWTBPREV3V1RBVEJnY3Foa2pPClBRSUJCZ2dxaGtqT1BRTUJCd05DQUFRSzR3QzhyTGdXWmFMcWE0Yjh6NllLN0dkQTFxWUJjTHNhZ0R3TmNpcnQKaGN3SHVjUEJ2cTN2elN2STVsRGpua3VlenZUdmlydy9jR0doZGlIRGdwcVNvMEl3UURBT0JnTlZIUThCQWY4RQpCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVUhESXBoNVZnZmUzQ3pWektML0M0CnJINmMzcUl3Q2dZSUtvWkl6ajBFQXdJRFNRQXdSZ0loQU9EdW4vU09MT2w0MzhycmkyazdFWTV6bktXd2IzLzcKNVp3Z2pDUndaQkZsQWlFQXVsSjQwelFqT05SWXVVN3dNa29JQkEzNjRaR2FuaWdzaFdtd2JZTmZQVGs9Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K + client-key-data: LS0tLS1CRUdJTiBFQyBQUklWQVRFIEtFWS0tLS0tCk1IY0NBUUVFSUFJTUloU1BCSFFaN3BHY2ZxNEVsZVVQY0wxR0g1TEVyRVV0akNnRTNxTDdvQW9HQ0NxR1NNNDkKQXdFSG9VUURRZ0FFQnUzQmNQK3FTK1B5UkloOUhkNnhVOTBJSE90bzNkWmVRcXlqM3BwbW9Db0J2ekIwVkE0Vwo4ZTcwZ2NmYjZBTndLM1dzMWY5TDVBeWxSNWwrVVQ3TjJRPT0KLS0tLS1FTkQgRUMgUFJJVkFURSBLRVktLS0tLQo= diff --git a/cicd/k3s-flannel-incluster-l2/kube-loxilb.yml b/cicd/k3s-flannel-incluster-l2/kube-loxilb.yml new file mode 100644 index 000000000..b4c8dcbae --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/kube-loxilb.yml @@ -0,0 +1,132 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-loxilb + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - endpoints + - services + - services/status + verbs: + - get + - watch + - list + - patch + - update + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - watch + - list + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-loxilb +subjects: + - kind: ServiceAccount + name: kube-loxilb + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-loxilb + namespace: kube-system + labels: + app: loxilb +spec: + replicas: 1 + selector: + matchLabels: + app: loxilb + template: + metadata: + labels: + app: loxilb + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + priorityClassName: system-node-critical + serviceAccountName: kube-loxilb + terminationGracePeriodSeconds: 0 + containers: + - name: kube-loxilb + image: ghcr.io/loxilb-io/kube-loxilb:latest + imagePullPolicy: Always + command: + - /bin/kube-loxilb + args: + #- --loxiURL=http://192.168.80.10:11111 + - --externalCIDR=192.168.80.200/32 + #- --setBGP=64512 + - --setRoles=0.0.0.0 + #- --monitor + #- --setBGP + #- --setLBMode=1 + #- --config=/opt/loxilb/agent/kube-loxilb.conf + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: true + capabilities: + add: ["NET_ADMIN", "NET_RAW"] diff --git a/cicd/k3s-flannel-incluster-l2/loxilb.yml b/cicd/k3s-flannel-incluster-l2/loxilb.yml new file mode 100644 index 000000000..6fee9b7bd --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/loxilb.yml @@ -0,0 +1,59 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: loxilb-lb + namespace: kube-system +spec: + selector: + matchLabels: + app: loxilb-app + template: + metadata: + name: loxilb-lb + labels: + app: loxilb-app + spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + tolerations: + - key: "node-role.kubernetes.io/master" + operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: "node-role.kubernetes.io/master" + operator: Exists + - key: "node-role.kubernetes.io/control-plane" + operator: Exists + containers: + - name: loxilb-app + image: "ghcr.io/loxilb-io/loxilb:latest" + imagePullPolicy: Always + command: [ "/root/loxilb-io/loxilb/loxilb", "--egr-hooks", "--blacklist=cni[0-9a-z]|veth.|flannel." ] + ports: + - containerPort: 11111 + - containerPort: 179 + securityContext: + privileged: true + capabilities: + add: + - SYS_ADMIN +--- +apiVersion: v1 +kind: Service +metadata: + name: loxilb-lb-service + namespace: kube-system +spec: + clusterIP: None + selector: + app: loxilb-app + ports: + - name: loxilb-app + port: 11111 + targetPort: 11111 + protocol: TCP diff --git a/cicd/k3s-flannel-incluster-l2/master1.sh b/cicd/k3s-flannel-incluster-l2/master1.sh new file mode 100755 index 000000000..c6f0613d1 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/master1.sh @@ -0,0 +1,12 @@ +sudo su +export MASTER_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep -v '192.168.90' | grep '192.168.80' | awk '{print $2}' | cut -f1 -d '/') +curl -fL https://get.k3s.io | sh -s - server --node-ip=192.168.80.10 --disable servicelb --disable traefik --cluster-init external-hostname=192.168.80.10 --node-external-ip=192.168.80.10 --disable-cloud-controller +curl -sfL https://github.com/loxilb-io/loxilb-ebpf/raw/main/kprobe/install.sh | sh - +sleep 60 +echo $MASTER_IP > /vagrant/master-ip +cp /var/lib/rancher/k3s/server/node-token /vagrant/node-token +sed -i -e "s/127.0.0.1/${MASTER_IP}/g" /etc/rancher/k3s/k3s.yaml +cp /etc/rancher/k3s/k3s.yaml /vagrant/k3s.yaml +sudo kubectl apply -f /vagrant/loxilb.yml +sudo kubectl apply -f /vagrant/kube-loxilb.yml +/vagrant/wait_ready.sh diff --git a/cicd/k3s-flannel-incluster-l2/master2.sh b/cicd/k3s-flannel-incluster-l2/master2.sh new file mode 100755 index 000000000..5ec72af6e --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/master2.sh @@ -0,0 +1,10 @@ +sudo su +export WORKER_ADDR=$(ip a |grep global | grep -v '10.0.2.15' | grep '192.168.80' | awk '{print $2}' | cut -f1 -d '/') +export MASTER_ADDR=$(cat /vagrant/master-ip) +export NODE_TOKEN=$(cat /vagrant/node-token) + +#curl -fL https://get.k3s.io | K3S_TOKEN=${NODE_TOKEN} sh -s - server --server https://192.168.80.10:6443 --disable traefik --disable servicelb --node-ip=192.168.80.11 external-hostname=192.168.80.11 --node-external-ip=192.168.80.11 --disable-cloud-controller -t ${NODE_TOKEN} +curl -fL https://get.k3s.io | K3S_TOKEN=${NODE_TOKEN} sh -s - server --server https://192.168.80.10:6443 --disable traefik --disable servicelb --node-ip=192.168.80.11 external-hostname=192.168.80.11 --node-external-ip=192.168.80.11 -t ${NODE_TOKEN} +curl -sfL https://github.com/loxilb-io/loxilb-ebpf/raw/main/kprobe/install.sh | sh - + +/vagrant/wait_ready.sh diff --git a/cicd/k3s-flannel-incluster-l2/node-token b/cicd/k3s-flannel-incluster-l2/node-token new file mode 100644 index 000000000..41447f218 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/node-token @@ -0,0 +1 @@ +K104ba03b7d623244660768d0475dbaab00b38a44cf3dbd7f8cfb749899d6917dfe::server:53d79e122c4fb6b54f104932e26995dd diff --git a/cicd/k3s-flannel-incluster-l2/rmconfig.sh b/cicd/k3s-flannel-incluster-l2/rmconfig.sh new file mode 100755 index 000000000..bd4b79e81 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/rmconfig.sh @@ -0,0 +1,7 @@ +#!/bin/bash +sudo ip route del 123.123.123.1 via 192.168.90.10 || true +vagrant destroy -f worker1 +vagrant destroy -f worker2 +vagrant destroy -f master1 +vagrant destroy -f master2 +vagrant destroy -f host diff --git a/cicd/k3s-flannel-incluster-l2/sctp-onearm-ds.yml b/cicd/k3s-flannel-incluster-l2/sctp-onearm-ds.yml new file mode 100644 index 000000000..793bab3ef --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/sctp-onearm-ds.yml @@ -0,0 +1,39 @@ +apiVersion: v1 +kind: Service +metadata: + name: sctp-onearm-svc + annotations: + loxilb.io/lbmode: "onearm" + loxilb.io/liveness: "yes" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: sctp-onearm-test + ports: + - port: 55003 + protocol: SCTP + targetPort: 9999 + type: LoadBalancer +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: sctp-onearm-ds + labels: + what: sctp-onearm-test +spec: + selector: + matchLabels: + what: sctp-onearm-test + template: + metadata: + labels: + what: sctp-onearm-test + spec: + containers: + - name: sctp-onearm-pod + image: loxilbio/sctp-darn:latest + command: ["sctp_darn","-H", "0.0.0.0","-P", "9999", "-l"] + ports: + - containerPort: 9999 diff --git a/cicd/k3s-flannel-incluster-l2/tcp-onearm-ds.yml b/cicd/k3s-flannel-incluster-l2/tcp-onearm-ds.yml new file mode 100644 index 000000000..b1ea660d7 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/tcp-onearm-ds.yml @@ -0,0 +1,37 @@ +apiVersion: v1 +kind: Service +metadata: + name: tcp-onearm-svc + annotations: + loxilb.io/lbmode: "onearm" + loxilb.io/liveness: "yes" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: tcp-onearm-test + ports: + - port: 55001 + targetPort: 80 + type: LoadBalancer +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: tcp-onearm-ds + labels: + what: tcp-onearm-test +spec: + selector: + matchLabels: + what: tcp-onearm-test + template: + metadata: + labels: + what: tcp-onearm-test + spec: + containers: + - name: tcp-onearm-pod + image: ghcr.io/loxilb-io/nginx:stable + ports: + - containerPort: 80 diff --git a/cicd/k3s-flannel-incluster-l2/udp-onearm-ds.yml b/cicd/k3s-flannel-incluster-l2/udp-onearm-ds.yml new file mode 100644 index 000000000..01bcf8e70 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/udp-onearm-ds.yml @@ -0,0 +1,38 @@ +apiVersion: v1 +kind: Service +metadata: + name: udp-onearm-svc + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "onearm" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: udp-onearm-test + ports: + - port: 55002 + protocol: UDP + targetPort: 33333 + type: LoadBalancer +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: udp-onearm-ds + labels: + what: udp-onearm-test +spec: + selector: + matchLabels: + what: udp-onearm-test + template: + metadata: + labels: + what: udp-onearm-test + spec: + containers: + - name: udp-onearm-test + image: ghcr.io/loxilb-io/udp-echo:latest + ports: + - containerPort: 33333 diff --git a/cicd/k3s-flannel-incluster-l2/udp_client b/cicd/k3s-flannel-incluster-l2/udp_client new file mode 100755 index 000000000..b70cd81fc Binary files /dev/null and b/cicd/k3s-flannel-incluster-l2/udp_client differ diff --git a/cicd/k3s-flannel-incluster-l2/validation.sh b/cicd/k3s-flannel-incluster-l2/validation.sh new file mode 100755 index 000000000..0995fb89c --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/validation.sh @@ -0,0 +1,34 @@ +#!/bin/bash +source ../common.sh +echo k3s-flannel-cluster-l2 + +if [ "$1" ]; then + KUBECONFIG="$1" +fi + +# Set space as the delimiter +IFS=' ' + +sleep 5 +extIP="192.168.80.200" +echo $extIP +echo $extIP > extIP + +echo "******************************************************************************" +echo -e "\nSVC List" +echo "******************************************************************************" +vagrant ssh master1 -c 'sudo kubectl get svc' 2> /dev/null +echo "******************************************************************************" +echo -e "\nCluster Info" +echo "******************************************************************************" +echo "******************************************************************************" +echo -e "\nPods" +echo "******************************************************************************" +vagrant ssh master1 -c 'sudo kubectl get pods -A' 2> /dev/null +echo "******************************************************************************" +echo -e "\nNodes" +echo "******************************************************************************" +vagrant ssh master1 -c 'sudo kubectl get nodes' 2> /dev/null + +vagrant ssh host -c 'sudo /vagrant/host_validation.sh' 2> /dev/null +sudo rm extIP diff --git a/cicd/k3s-flannel-incluster-l2/wait_ready.sh b/cicd/k3s-flannel-incluster-l2/wait_ready.sh new file mode 100755 index 000000000..5ff06e373 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/wait_ready.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +function wait_cluster_ready { + Res=$(sudo kubectl get pods -A | + while IFS= read -r line; do + if [[ "$line" != *"Running"* && "$line" != *"READY"* ]]; then + echo "not ready" + return + fi + done) + if [[ $Res == *"not ready"* ]]; then + return 1 + fi + return 0 +} + +function wait_cluster_ready_full { + i=1 + nr=0 + for ((;;)) do + wait_cluster_ready + nr=$? + if [[ $nr == 0 ]]; then + echo "Cluster is ready" + break + fi + i=$(( $i + 1 )) + if [[ $i -ge 40 ]]; then + echo "Cluster is not ready.Giving up" + exit 1 + fi + echo "Cluster is not ready...." + sleep 10 + done +} + +wait_cluster_ready_full diff --git a/cicd/k3s-flannel-incluster-l2/worker.sh b/cicd/k3s-flannel-incluster-l2/worker.sh new file mode 100644 index 000000000..b03d55fb5 --- /dev/null +++ b/cicd/k3s-flannel-incluster-l2/worker.sh @@ -0,0 +1,12 @@ +sudo su +export WORKER_ADDR=$(ip a |grep global | grep -v '10.0.2.15' | grep '192.168.80' | awk '{print $2}' | cut -f1 -d '/') +export MASTER_ADDR=$(cat /vagrant/master-ip) +export NODE_TOKEN=$(cat /vagrant/node-token) +mkdir -p /etc/rancher/k3s +cp -f /vagrant/k3s.yaml /etc/rancher/k3s/k3s.yaml +curl -sfL https://get.k3s.io | K3S_TOKEN=${NODE_TOKEN} sh -s - agent --server https://192.168.80.10:6443 --node-ip=${WORKER_ADDR} --node-external-ip=${WORKER_ADDR} -t ${NODE_TOKEN} +#sudo kubectl apply -f /vagrant/loxilb-peer.yml +#sudo kubectl apply -f /vagrant/nginx.yml +#sudo kubectl apply -f /vagrant/udp.yml +#sudo kubectl apply -f /vagrant/sctp.yml +/vagrant/wait_ready.sh diff --git a/cicd/k8s-calico-ipvs3/README b/cicd/k8s-calico-ipvs3/README new file mode 100644 index 000000000..3b7d30181 --- /dev/null +++ b/cicd/k8s-calico-ipvs3/README @@ -0,0 +1,8 @@ +## Test Case Description + +This scenario will have K8s(1 Master Nodes & 2 Worker Nodes) cluster with Calico CNI in ipvs mode. LoxiLB will be running as external Service LB. Workloads will be spawned in all the cluster nodes. + +Client will be connected to the LoxiLB with L3 network. Client and LoxiLB will run BGP peering. LoxiLB will advertise the Service CIDR or VirtualIP to the client. +Service CIDR will also be a Virtual IP, different from the K8s cluster network. + +In scenarios where LoxiLB runs outside of the cluster, it is advised to create LB services in either one-arm or fullnat mode for ease of connectivity or else user has to resolve connectivity with either static routes or through BGP. diff --git a/cicd/k8s-calico-ipvs3/host_validation.sh b/cicd/k8s-calico-ipvs3/host_validation.sh new file mode 100755 index 000000000..4a3737540 --- /dev/null +++ b/cicd/k8s-calico-ipvs3/host_validation.sh @@ -0,0 +1,60 @@ +#!/bin/bash +extIP=$(cat /vagrant/extIP) + +mode="onearm" +tcp_port=56002 +udp_port=56003 +sctp_port=56004 + +code=0 +echo Service IP: $extIP + +numECMP=$(ip route list match $extIP | grep $extIP -A 2 | tail -n 2 | wc -l) + +ip route list match $extIP | grep $extIP -A 2 + +if [ $numECMP == "2" ]; then + echo "Host ECMP route [OK]" +else + echo "Host ECMP route [NOK]" +fi +echo -e "\n*********************************************" +echo "Testing Service" +echo "*********************************************" +for((i=0;i<20;i++)) +do + +out=$(curl -s --connect-timeout 10 http://$extIP:$tcp_port) +if [[ ${out} == *"Welcome to nginx"* ]]; then + echo -e "K8s-calico-ipvs3 TCP\t($mode)\t[OK]" +else + echo -e "K8s-calico-ipvs3 TCP\t($mode)\t[FAILED]" + code=1 +fi + +out=$(timeout 5 /vagrant/udp_client $extIP $udp_port) +if [[ ${out} == *"Client"* ]]; then + echo -e "K8s-calico-ipvs3 UDP\t($mode)\t[OK]" +else + echo -e "K8s-calico-ipvs3 UDP\t($mode)\t[FAILED]" + code=1 +fi + +sctp_darn -H 192.168.80.9 -h 20.20.20.1 -p 56004 -s < /vagrant/input > output +#sleep 2 +exp="New connection, peer addresses +20.20.20.1:56004" + +res=`cat output | grep -A 1 "New connection, peer addresses"` +sudo rm -rf output +if [[ "$res" == "$exp" ]]; then + #echo $res + echo -e "K8s-calico-ipvs3 SCTP\t($mode)\t[OK]" +else + echo -e "K8s-calico-ipvs3 SCTP\t($mode)\t[FAILED]" + code=1 +fi + + +done +exit $code diff --git a/cicd/k8s-calico-ipvs3/node_scripts/host.sh b/cicd/k8s-calico-ipvs3/node_scripts/host.sh index a552c006a..7e2726014 100755 --- a/cicd/k8s-calico-ipvs3/node_scripts/host.sh +++ b/cicd/k8s-calico-ipvs3/node_scripts/host.sh @@ -8,6 +8,6 @@ if [ ! -f /var/log/bird.log ]; then sudo touch /var/log/bird.log fi sudo chown bird:bird /var/log/bird.log -sudo service bird start +sudo service bird restart echo "Host is up" diff --git a/cicd/k8s-calico-ipvs3/udp_client b/cicd/k8s-calico-ipvs3/udp_client new file mode 100755 index 000000000..b70cd81fc Binary files /dev/null and b/cicd/k8s-calico-ipvs3/udp_client differ