From 4889b179071c056d114d820f3c18b1545ec5974c Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Sun, 17 Sep 2023 23:20:16 +0900 Subject: [PATCH 1/4] cicd:extended tests for k8s with calico/ipvs --- cicd/k8s-calico-ipvs2/Vagrantfile | 83 +++++++++++ cicd/k8s-calico-ipvs2/config.sh | 37 +++++ cicd/k8s-calico-ipvs2/configs/config | 19 +++ cicd/k8s-calico-ipvs2/configs/join.sh | 1 + cicd/k8s-calico-ipvs2/input | 6 + cicd/k8s-calico-ipvs2/node_scripts/common.sh | 93 ++++++++++++ cicd/k8s-calico-ipvs2/node_scripts/loxilb.sh | 9 ++ cicd/k8s-calico-ipvs2/node_scripts/master.sh | 56 ++++++++ cicd/k8s-calico-ipvs2/node_scripts/worker.sh | 18 +++ cicd/k8s-calico-ipvs2/rmconfig.sh | 6 + cicd/k8s-calico-ipvs2/validation.sh | 90 ++++++++++++ .../yaml/.kube-loxilb.yml.swp | Bin 0 -> 12288 bytes cicd/k8s-calico-ipvs2/yaml/kube-loxilb.yml | 134 ++++++++++++++++++ .../k8s-calico-ipvs2/yaml/kubeadm-config.yaml | 69 +++++++++ cicd/k8s-calico-ipvs2/yaml/sctp.yml | 41 ++++++ cicd/k8s-calico-ipvs2/yaml/sctp_fullnat.yml | 39 +++++ cicd/k8s-calico-ipvs2/yaml/sctp_onearm.yml | 41 ++++++ cicd/k8s-calico-ipvs2/yaml/settings.yaml | 44 ++++++ cicd/k8s-calico-ipvs2/yaml/tcp.yml | 29 ++++ cicd/k8s-calico-ipvs2/yaml/tcp_fullnat.yml | 29 ++++ cicd/k8s-calico-ipvs2/yaml/tcp_onearm.yml | 29 ++++ cicd/k8s-calico-ipvs2/yaml/udp.yml | 30 ++++ cicd/k8s-calico-ipvs2/yaml/udp_fullnat.yml | 30 ++++ cicd/k8s-calico-ipvs2/yaml/udp_onearm.yml | 30 ++++ 24 files changed, 963 insertions(+) create mode 100644 cicd/k8s-calico-ipvs2/Vagrantfile create mode 100755 cicd/k8s-calico-ipvs2/config.sh create mode 100644 cicd/k8s-calico-ipvs2/configs/config create mode 100755 cicd/k8s-calico-ipvs2/configs/join.sh create mode 100644 cicd/k8s-calico-ipvs2/input create mode 100644 cicd/k8s-calico-ipvs2/node_scripts/common.sh create mode 100644 cicd/k8s-calico-ipvs2/node_scripts/loxilb.sh create mode 100644 cicd/k8s-calico-ipvs2/node_scripts/master.sh create mode 100644 cicd/k8s-calico-ipvs2/node_scripts/worker.sh create mode 100755 cicd/k8s-calico-ipvs2/rmconfig.sh create mode 100755 cicd/k8s-calico-ipvs2/validation.sh create mode 100644 cicd/k8s-calico-ipvs2/yaml/.kube-loxilb.yml.swp create mode 100644 cicd/k8s-calico-ipvs2/yaml/kube-loxilb.yml create mode 100644 cicd/k8s-calico-ipvs2/yaml/kubeadm-config.yaml create mode 100644 cicd/k8s-calico-ipvs2/yaml/sctp.yml create mode 100644 cicd/k8s-calico-ipvs2/yaml/sctp_fullnat.yml create mode 100644 cicd/k8s-calico-ipvs2/yaml/sctp_onearm.yml create mode 100644 cicd/k8s-calico-ipvs2/yaml/settings.yaml create mode 100644 cicd/k8s-calico-ipvs2/yaml/tcp.yml create mode 100644 cicd/k8s-calico-ipvs2/yaml/tcp_fullnat.yml create mode 100644 cicd/k8s-calico-ipvs2/yaml/tcp_onearm.yml create mode 100644 cicd/k8s-calico-ipvs2/yaml/udp.yml create mode 100644 cicd/k8s-calico-ipvs2/yaml/udp_fullnat.yml create mode 100644 cicd/k8s-calico-ipvs2/yaml/udp_onearm.yml diff --git a/cicd/k8s-calico-ipvs2/Vagrantfile b/cicd/k8s-calico-ipvs2/Vagrantfile new file mode 100644 index 000000000..41124f4d6 --- /dev/null +++ b/cicd/k8s-calico-ipvs2/Vagrantfile @@ -0,0 +1,83 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +require "yaml" +settings = YAML.load_file "yaml/settings.yaml" + +workers = settings["nodes"]["workers"]["count"] +loxilbs = (ENV['LOXILBS'] || "2").to_i + +Vagrant.configure("2") do |config| + + if Vagrant.has_plugin?("vagrant-vbguest") + config.vbguest.auto_update = false + end + + (1..loxilbs).each do |node_number| + config.vm.define "llb#{node_number}" do |loxilb| + loxilb.vm.box = settings["software"]["loxilb"]["box"]["name"] + loxilb.vm.box_version = settings["software"]["loxilb"]["box"]["version"] + loxilb.vm.hostname = "llb#{node_number}" + ip = node_number + 251 + loxilb.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0" + loxilb.vm.provision :shell, :path => "node_scripts/loxilb.sh" + loxilb.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 6000] + vbox.customize ["modifyvm", :id, "--cpus", 4] + vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] + end + end + end + + config.vm.define "master" do |master| + master.vm.box = settings["software"]["cluster"]["box"] + master.vm.hostname = 'master' + master.vm.network :private_network, ip: settings["network"]["control_ip"], :netmask => "255.255.255.0" + master.vm.provision "shell", + env: { + "DNS_SERVERS" => settings["network"]["dns_servers"].join(" "), + "ENVIRONMENT" => settings["environment"], + "KUBERNETES_VERSION" => settings["software"]["kubernetes"], + "OS" => settings["software"]["os"] + }, + path: "node_scripts/common.sh" + master.vm.provision "shell", + env: { + "CALICO_VERSION" => settings["software"]["calico"], + "CONTROL_IP" => settings["network"]["control_ip"], + "POD_CIDR" => settings["network"]["pod_cidr"], + "SERVICE_CIDR" => settings["network"]["service_cidr"] + }, + path: "node_scripts/master.sh" + + master.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 4096] + vbox.customize ["modifyvm", :id, "--cpus", 2] + vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] + end + end + + (1..workers).each do |node_number| + config.vm.define "worker#{node_number}" do |worker| + worker.vm.box = settings["software"]["cluster"]["box"] + worker.vm.hostname = "worker#{node_number}" + ip = node_number + 200 + worker.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0" + worker.vm.provision "shell", + env: { + "DNS_SERVERS" => settings["network"]["dns_servers"].join(" "), + "ENVIRONMENT" => settings["environment"], + "KUBERNETES_VERSION" => settings["software"]["kubernetes"], + "OS" => settings["software"]["os"] + }, + path: "node_scripts/common.sh" + worker.vm.provision "shell", path: "node_scripts/worker.sh" + + worker.vm.provider :virtualbox do |vbox| + vbox.customize ["modifyvm", :id, "--memory", 4096] + vbox.customize ["modifyvm", :id, "--cpus", 2] + vbox.customize ["modifyvm", :id, "--nicpromisc2", "allow-all"] + end + end + end +end diff --git a/cicd/k8s-calico-ipvs2/config.sh b/cicd/k8s-calico-ipvs2/config.sh new file mode 100755 index 000000000..ed986fbfb --- /dev/null +++ b/cicd/k8s-calico-ipvs2/config.sh @@ -0,0 +1,37 @@ +#!/bin/bash +VMs=$(vagrant global-status | grep -i virtualbox) +while IFS= read -a VMs; do + read -a vm <<< "$VMs" + cd ${vm[4]} 2>&1>/dev/null + echo "Destroying ${vm[1]}" + vagrant destroy -f ${vm[1]} + cd - 2>&1>/dev/null +done <<< "$VMs" + +vagrant up + +for((i=1; i<=60; i++)) +do + fin=1 + pods=$(vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null | grep -v "NAMESPACE") + + while IFS= read -a pods; do + read -a pod <<< "$pods" + if [[ ${pod[3]} != *"Running"* ]]; then + echo "${pod[1]} is not UP yet" + fin=0 + fi + done <<< "$pods" + if [ $fin == 1 ]; + then + break; + fi + echo "Will try after 10s" + sleep 10 +done + +sudo sysctl net.ipv4.conf.vboxnet1.arp_accept=1 + +#Create fullnat Service +vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/tcp_fullnat.yml' 2> /dev/null +vagrant ssh master -c 'kubectl apply -f /vagrant/yaml/udp_fullnat.yml' 2> /dev/null diff --git a/cicd/k8s-calico-ipvs2/configs/config b/cicd/k8s-calico-ipvs2/configs/config new file mode 100644 index 000000000..62c73aab1 --- /dev/null +++ b/cicd/k8s-calico-ipvs2/configs/config @@ -0,0 +1,19 @@ +apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJek1Ea3hOekUwTURNek4xb1hEVE16TURreE5ERTBNRE16TjFvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS28wCmJDS0puTFRWOFVkbVNDSXczZU5PY0VJYXpteVFsYnZaNVR2OXRpMFNaeDFlMnJUZ1NBc1F3M2NiaWZIU1c1YWgKU2luRVJlS1dWemlvMkZKRWpTMDh5bEVDdGRMbE1JUXFaSlkvK2lGeldKWUhJZm1MRlJJdGxCdXBNNW1TVEZkMQpQbTlkVDdNNTJYUnhMYnRRY0ZDRjd2MXFqZURwM1RJT010Qko1b1BCT2xrVzF6SVduS2pEUUUxUkx2MGZqZmpuCm0rcW5nc0lvR0NjdFJzdU04cFcrSE83MW5hek9ITFVDK3RuU0Y3a0dEWnBkNlNuL1NIdFJYOVFDSklIMkZLVUMKODZONmhVMFovQTlmZEh1Q3ZMeXNuM0FyNzgrSDNyR0QzUTM1d0hIS0ZrdHZKdlExdUdCb3NVTllkM0drZStrMApBcnMraUtOREhPVmJTTEdpM1hVQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZKYXV5a3pFV05WSlpCWjRRc0plaE8va3dmM2lNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBRCs2bWM2VW12akE3MzZOTGp0bQp3UHp4NlhnWjhvT01kMDJndDZMMWpBNXBqY21ERFc3MGgzamJrSVl6bXRLV29ZZGJzMllUTE9ZTWFnZTFFQVdvCnZuS1d4Q08wQ293UWo4ZmJLWWlUNTlidnFGZGdtT1VacTR1YXpqZXUvMWJHcXFHb0pzcW4ySjcvY0s0d3FRdm4KQ1M1cGFzNXpnVlhzY2tLK1I5SXRNak5hYlV0Qjl5UzUzS2pOZzV4UnV3eTBsVlZpaVo2d29DNEFQVGtqZStxLwpWUC8wdVRpZUhrajVaUnZMTGxlUm1zTnVyUC9vK0pkNDB5bDdOWjRmWjZzdDRVSGl4TkpZRld4VHZ0aDUzcjMwClArMWRxSlYxdXM3MXV0TmZvWW9PTGxRSUNLcTA5cmRNTm9tYTFoWTBkNStHWjhhYkxNelVoc0pxbXU2aUhPcXgKbDQ0PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://192.168.80.250:6443 + name: kubernetes +contexts: +- context: + cluster: kubernetes + user: kubernetes-admin + name: kubernetes-admin@kubernetes +current-context: kubernetes-admin@kubernetes +kind: Config +preferences: {} +users: +- name: kubernetes-admin + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJVENDQWdtZ0F3SUJBZ0lJYVgxTFE5RzE5Sll3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TXpBNU1UY3hOREF6TXpkYUZ3MHlOREE1TVRZeE5EQXpOREJhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXM5VzNkSWtPdHVEbmRNenYKWFhiQkhLTXFjclQzYlhMQ0tCakE2Z2d5anJRaVh2TDNsTThSQXliYXk5ZUpjUEVRcHkwKzFwM1JFN2UxU2lYNgpKaGFvN0RaYUZRMm82TjQwWlNsMDl0d29LM0xXNU9pTG1udGVveWpSdStRMmF4RnpSWllZNVorOVY5ZWwxd3dKClYraUUrTGp6LzJJZ2hhSm92ejlHRU8rRElGYzZ1TTlZOWRTdlAvY0tRMXFncnR6RUppUDZuNzVRNGRKMVhCRG0Kb2pzZXU2aXBIdC90TXN1NEpKZW1EL0l6eHRlWHE1SXd5QmRNRkxheEcxb2xONGllMUVYY2NKRnBldTJ5amRzeQpvWE5sd1pBNkxtQTVJZkZLS1djRjFXMVEwT2ltU0hCNld6eldYUkpOMWl0SW9lc2NzaW5qUDhDRlNVWmZXSlBjCm9JL25Od0lEQVFBQm8xWXdWREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JTV3JzcE14RmpWU1dRV2VFTENYb1R2NU1IOQo0akFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBR3M3ajY2R3RBcGU4OVB1WDhNWU9sbVZSb0FJTmc4blp6MzNhCmM3c25lR3J4Y05Ma1FTa3h3TzNET1RJMjdPRFJuT0RRVXZZOEdEcG1qQjZNTUxIbDRLQlZ0dUUvRFlUd01IemcKZXRESmtnU3B4Rk1RTTN0ZzQzRlNiQ0FhVTFYaDUrMWgvZTBJY0Jqak94VHpQdnN0Ym1jemlBZWkrOTVhWTNOTgpMYkVQV3NoNmlCN2NuekZIcGhEWEhCaVcxMDVzbzk3VUFRc3Z3ZnoxREdJb2dVKzgrOXBVWWFYNkJqV0tGSEMrClpkZWhxMlZ3amFCaklvekhxMXg2dGVFV0N1RFhEc0tUeXIxUldBSXIxblEySWRaNWQ0bmZrRitrY3BjdHdyeUgKbFlMVTlHZVZLU1YyUC9QcTZhakQ5Vjg1MnpYbXBZckNQZmY3RUVFb0tPSnhIM2RQQ0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBczlXM2RJa090dURuZE16dlhYYkJIS01xY3JUM2JYTENLQmpBNmdneWpyUWlYdkwzCmxNOFJBeWJheTllSmNQRVFweTArMXAzUkU3ZTFTaVg2SmhhbzdEWmFGUTJvNk40MFpTbDA5dHdvSzNMVzVPaUwKbW50ZW95alJ1K1EyYXhGelJaWVk1Wis5VjllbDF3d0pWK2lFK0xqei8ySWdoYUpvdno5R0VPK0RJRmM2dU05WQo5ZFN2UC9jS1ExcWdydHpFSmlQNm43NVE0ZEoxWEJEbW9qc2V1NmlwSHQvdE1zdTRKSmVtRC9Jenh0ZVhxNUl3CnlCZE1GTGF4RzFvbE40aWUxRVhjY0pGcGV1MnlqZHN5b1hObHdaQTZMbUE1SWZGS0tXY0YxVzFRME9pbVNIQjYKV3p6V1hSSk4xaXRJb2VzY3NpbmpQOENGU1VaZldKUGNvSS9uTndJREFRQUJBb0lCQUFkczRhQk1NSEZZby9YcwpxRTJCbzhPYzhSbS9ZeC8veHdpRXpTSmNxRW5BSWxNdFVRK3g1US91UU1aOTVjem1CenYvNEVKWVRkTzBtQ1d4Cm43eUczdjZ4MTlKYXVBaTFkQkJoUDlVWmpnY05QTlpDeTlEYkd6TTljanVCMXNWZytSTjhlUmF5RzgxT3VDaEMKOVBmT1AvOU5FN1lOeFMzL1lMZlUwdjNNQmV3RjNFcW1vQjQ0Vkl0bmxNVWNIVmQ0TDljNWpYVWpsRTltTGlJKwpka2FYeVdsbE9GUWVOWEtDTGxZYjh5U1dIMXNENXMxMDMyRzFwZDlXWTB5MnpUN2YxcUU5R0ppMHprL2VxM0tUCjlvR3Irb1VIQS9sWnRENU1LMDdFc21zUmNCVUtLcjFmZ0xUWEVRQUZpbGlsSkRCR3RPWUc1VStaTjhCOGtJa3QKR1BpSWlpRUNnWUVBM1ZJR2dYVEJuUjdQUVk2MXYrYzJsdXFNRGI4MXlJc3RmUDhaTDZJYnVjNVhCemhIVFV1RApEMEtJajhBU1JIRWN3c3BhdWN2STUxN0thS3RaYk9vYVNjelZub2J0QjJrbno5QnhkMSttUGlDVWxrZ0pjWWlJCnhBSUt3MVBVQ2JoekV6bmtNeDVaNUlSbHBYc3M1NEpQdVpEN2dVVXJGLzV6R0tkSitUcDNNdmtDZ1lFQTBBT04KVzM0Qk5udVY0eUVxbXpDMGJzRTdodFB5aXJTaWpmekxBeWJuazcwek1pUUowQlY4YkU4RUx3dzFGYUwvVjY4YgpZZTJqeENzRTBlTnBLczM5bkVKTzlPV0pPdGJXQ0hhMFhkS0FJT2xPQ21xZ2E2UzR5VHY3MXc3RExGOHpMdEhKCjgvUVAvZHBUZ1Z4ZDJlVGVFU0gxSFhBNHhxQ0IwY2twNWVTNVI2OENnWUFLOUlCMTcvMlZkV3owNWNlQlZFU1QKZTlDcTdZZ1FBVE9qT0UxYTJqOWRGeVdmcWQxOXFmczJ5aWhRVlVXU05iY2l2VGw0Y09uazZnQkM1aGhyRjd0dApZOEkvTXBhUkExZUJKZVEzOEhnZUl5YTN1YUVlQ1pBYmc2NWN0TTJPSFo4MG1Hei9Tc1ZRVm0xaG41TTljbW5YCjJhRlphTDhWVlBjRmhXSlgvOWplb1FLQmdCVW9aMVpwQ0tLTUM0RGd2TDZ3UTRvSWFZK2JPMDVVZFR4UC9DNUQKNU1Zb05tN2hMNFpVeVNVVEhHL1VwQW1oWEt1RWlocDUyM0IybDdUcGxxYmQ4Ty9WNXNZT0d0azdST1h6ZlFFVwpDaVpPMUx0R2dTNnh3SUhuY2dRNzRpby9QS3VFZHpBeklQVkhJYnRFczk4WDRtYUh0N0RZMjAySXdldWV0SDVICnQzak5Bb0dBTVNlQ2ZzamY2bkNBNng1M29GeXNyc1ZEUk9sS04zSnlDeGt1dk1RVGhYTzlNMWNMdU9kUDhPVWMKMjdIMHE0SkZzZHFCWWRWaUFDS3kxUXkwU0oxV2dEZTRiTU1IS3l0SlZhMU9vYlFjb2hvbnY1UWJtSitURDI3QgpqMVRTQzV1RWYzMm9saE5pVUUwajZRRWxmNTJBeTNoK0JuQXBPRFNwQ01SRnJ5dDk3WFE9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/cicd/k8s-calico-ipvs2/configs/join.sh b/cicd/k8s-calico-ipvs2/configs/join.sh new file mode 100755 index 000000000..123e57e98 --- /dev/null +++ b/cicd/k8s-calico-ipvs2/configs/join.sh @@ -0,0 +1 @@ +kubeadm join 192.168.80.250:6443 --token hd7r6h.lwlhrhraltch29am --discovery-token-ca-cert-hash sha256:069ecfbe37f380fa0ca3a0a3a2a1a1c4d42e9b5b38e6757aec0bf2cf3366bb70 diff --git a/cicd/k8s-calico-ipvs2/input b/cicd/k8s-calico-ipvs2/input new file mode 100644 index 000000000..6fb66a5e2 --- /dev/null +++ b/cicd/k8s-calico-ipvs2/input @@ -0,0 +1,6 @@ + + + + + + diff --git a/cicd/k8s-calico-ipvs2/node_scripts/common.sh b/cicd/k8s-calico-ipvs2/node_scripts/common.sh new file mode 100644 index 000000000..b8634194f --- /dev/null +++ b/cicd/k8s-calico-ipvs2/node_scripts/common.sh @@ -0,0 +1,93 @@ +#!/bin/bash +# +# Common setup for all servers (Control Plane and Nodes) + +set -euxo pipefail + +# Variable Declaration + +# DNS Setting +if [ ! -d /etc/systemd/resolved.conf.d ]; then + sudo mkdir /etc/systemd/resolved.conf.d/ +fi +cat </dev/null; echo "@reboot /sbin/swapoff -a") | crontab - || true +sudo apt-get update -y +# Install CRI-O Runtime + +VERSION="$(echo ${KUBERNETES_VERSION} | grep -oE '[0-9]+\.[0-9]+')" + +# Create the .conf file to load the modules at bootup +cat <> /etc/default/crio << EOF +${ENVIRONMENT} +EOF +sudo systemctl daemon-reload +sudo systemctl enable crio --now + +echo "CRI runtime installed successfully" + +sudo apt-get update +sudo apt-get install -y apt-transport-https ca-certificates curl +curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-archive-keyring.gpg + +echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list +sudo apt-get update -y +sudo apt-get install -y kubelet="$KUBERNETES_VERSION" kubectl="$KUBERNETES_VERSION" kubeadm="$KUBERNETES_VERSION" +sudo apt-get update -y +sudo apt-get install -y jq +sudo apt-get install -y ipvsadm + +local_ip="$(ip --json a s | jq -r '.[] | if .ifname == "eth1" then .addr_info[] | if .family == "inet" then .local else empty end else empty end')" +cat > /etc/default/kubelet << EOF +KUBELET_EXTRA_ARGS=--node-ip=$local_ip +${ENVIRONMENT} +EOF diff --git a/cicd/k8s-calico-ipvs2/node_scripts/loxilb.sh b/cicd/k8s-calico-ipvs2/node_scripts/loxilb.sh new file mode 100644 index 000000000..6df67208f --- /dev/null +++ b/cicd/k8s-calico-ipvs2/node_scripts/loxilb.sh @@ -0,0 +1,9 @@ +export LOXILB_IP=$(ip a |grep global | grep -v '10.0.2.15' | grep -v '192.168.80' | awk '{print $2}' | cut -f1 -d '/') + +apt-get update +apt-get install -y software-properties-common +curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - +add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" +apt-get update +apt-get install -y docker-ce +docker run -u root --cap-add SYS_ADMIN --restart unless-stopped --privileged -dit -v /dev/log:/dev/log --net=host --name loxilb ghcr.io/loxilb-io/loxilb:latest diff --git a/cicd/k8s-calico-ipvs2/node_scripts/master.sh b/cicd/k8s-calico-ipvs2/node_scripts/master.sh new file mode 100644 index 000000000..41793b5fa --- /dev/null +++ b/cicd/k8s-calico-ipvs2/node_scripts/master.sh @@ -0,0 +1,56 @@ +#!/bin/bash +# +# Setup for Control Plane (Master) servers + +set -euxo pipefail + +NODENAME=$(hostname -s) + +sudo kubeadm config images pull + +echo "Preflight Check Passed: Downloaded All Required Images" + +#sudo kubeadm init --apiserver-advertise-address=$CONTROL_IP --apiserver-cert-extra-sans=$CONTROL_IP --pod-network-cidr=$POD_CIDR --service-cidr=$SERVICE_CIDR --node-name "$NODENAME" --ignore-preflight-errors Swap +sudo kubeadm init --ignore-preflight-errors Swap --config /vagrant/yaml/kubeadm-config.yaml + +mkdir -p "$HOME"/.kube +sudo cp -i /etc/kubernetes/admin.conf "$HOME"/.kube/config +sudo chown "$(id -u)":"$(id -g)" "$HOME"/.kube/config + +# Save Configs to shared /Vagrant location + +# For Vagrant re-runs, check if there is existing configs in the location and delete it for saving new configuration. + +config_path="/vagrant/configs" + +if [ -d $config_path ]; then + rm -f $config_path/* +else + mkdir -p $config_path +fi + +cp -i /etc/kubernetes/admin.conf $config_path/config +touch $config_path/join.sh +chmod +x $config_path/join.sh + +kubeadm token create --print-join-command > $config_path/join.sh + +# Install Calico Network Plugin + +curl https://raw.githubusercontent.com/projectcalico/calico/v${CALICO_VERSION}/manifests/calico.yaml -O + +kubectl apply -f calico.yaml + +sudo -i -u vagrant bash << EOF +whoami +mkdir -p /home/vagrant/.kube +sudo cp -i $config_path/config /home/vagrant/.kube/ +sudo chown 1000:1000 /home/vagrant/.kube/config +EOF + +# Install Metrics Server + +kubectl apply -f https://raw.githubusercontent.com/techiescamp/kubeadm-scripts/main/manifests/metrics-server.yaml + +# Install loxilb +kubectl apply -f /vagrant/yaml/kube-loxilb.yml diff --git a/cicd/k8s-calico-ipvs2/node_scripts/worker.sh b/cicd/k8s-calico-ipvs2/node_scripts/worker.sh new file mode 100644 index 000000000..a5754170b --- /dev/null +++ b/cicd/k8s-calico-ipvs2/node_scripts/worker.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# +# Setup for Node servers + +set -euxo pipefail + +config_path="/vagrant/configs" + +/bin/bash $config_path/join.sh -v + +sudo -i -u vagrant bash << EOF +whoami +mkdir -p /home/vagrant/.kube +sudo cp -i $config_path/config /home/vagrant/.kube/ +sudo chown 1000:1000 /home/vagrant/.kube/config +NODENAME=$(hostname -s) +kubectl label node $(hostname -s) node-role.kubernetes.io/worker=worker +EOF diff --git a/cicd/k8s-calico-ipvs2/rmconfig.sh b/cicd/k8s-calico-ipvs2/rmconfig.sh new file mode 100755 index 000000000..6cadc7e4e --- /dev/null +++ b/cicd/k8s-calico-ipvs2/rmconfig.sh @@ -0,0 +1,6 @@ +#!/bin/bash +vagrant destroy -f worker2 +vagrant destroy -f worker1 +vagrant destroy -f master +vagrant destroy -f llb1 +vagrant destroy -f llb2 diff --git a/cicd/k8s-calico-ipvs2/validation.sh b/cicd/k8s-calico-ipvs2/validation.sh new file mode 100755 index 000000000..ad5bb5a5e --- /dev/null +++ b/cicd/k8s-calico-ipvs2/validation.sh @@ -0,0 +1,90 @@ +#!/bin/bash +source ../common.sh +echo k8s-calico-ipvs2 + +if [ "$1" ]; then + KUBECONFIG="$1" +fi + +# Set space as the delimiter +IFS=' ' + +for((i=0; i<120; i++)) +do + extLB=$(vagrant ssh master -c 'kubectl get svc' 2> /dev/null | grep "tcp-lb-fullnat") + read -a strarr <<< "$extLB" + len=${#strarr[*]} + if [[ $((len)) -lt 6 ]]; then + echo "Can't find tcp-lb service" + sleep 1 + continue + fi + if [[ ${strarr[3]} != *"none"* ]]; then + extIP="$(cut -d'-' -f2 <<<${strarr[3]})" + break + fi + echo "No external LB allocated" + sleep 1 +done + +## Any routing updates ?? +sleep 30 + +echo Service IP : $extIP +echo -e "\nEnd Points List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get endpoints -A' 2> /dev/null +echo "******************************************************************************" +echo -e "\nSVC List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get svc' 2> /dev/null +echo "******************************************************************************" +echo -e "\nPod List" +echo "******************************************************************************" +vagrant ssh master -c 'kubectl get pods -A' 2> /dev/null +echo "******************************************************************************" +echo -e "\nLB List" +echo "******************************************************************************" +vagrant ssh llb1 -c 'sudo docker exec -it loxilb loxicmd get lb -o wide' 2> /dev/null +echo "******************************************************************************" +echo -e "\nEP List" +echo "******************************************************************************" +vagrant ssh llb1 -c 'sudo docker exec -it loxilb loxicmd get ep -o wide' 2> /dev/null +echo "******************************************************************************" + +echo -e "\nTEST RESULTS" +echo "******************************************************************************" +mode=( "default" "onearm" "fullnat" ) +tcp_port=( 55002 56002 57002 ) +udp_port=( 55003 56003 57003 ) +sctp_port=( 55004 56004 57004 ) +code=0 +for ((i=2;i<=2;i++)); do +out=$(curl -s --connect-timeout 10 http://$extIP:${tcp_port[i]}) +if [[ ${out} == *"Welcome to nginx"* ]]; then + echo -e "K8s-calico-ipvs2 TCP\t(${mode[i]})\t[OK]" +else + echo -e "K8s-calico-ipvs2 TCP\t(${mode[i]})\t[FAILED]" + ## Dump some debug info + echo "llb1 lb-info" + vagrant ssh loxilb -c 'sudo docker exec -it llb1 loxicmd get lb -o wide' 2> /dev/null + echo "llb1 route-info" + vagrant ssh loxilb -c 'sudo docker exec -it llb1 ip route' 2> /dev/null + code=1 +fi + +out=$(timeout 5 ../common/udp_client $extIP ${udp_port[i]}) +if [[ ${out} == *"Client"* ]]; then + echo -e "K8s-calico-ipvs2 UDP\t(${mode[i]})\t[OK]" +else + echo -e "K8s-calico-ipvs2 UDP\t(${mode[i]})\t[FAILED]" + ## Dump some debug info + echo "llb1 lb-info" + vagrant ssh loxilb -c 'sudo docker exec -it llb1 loxicmd get lb -o wide' 2> /dev/null + echo "llb1 route-info" + vagrant ssh loxilb -c 'sudo docker exec -it llb1 ip route' 2> /dev/null + code=1 +fi + +done +exit $code diff --git a/cicd/k8s-calico-ipvs2/yaml/.kube-loxilb.yml.swp b/cicd/k8s-calico-ipvs2/yaml/.kube-loxilb.yml.swp new file mode 100644 index 0000000000000000000000000000000000000000..14cc2659afc9c0e372985d79421124ef6d5874ac GIT binary patch literal 12288 zcmeI2UuYaf9LKk<)@rSat%W`)WAY%C?A`4(3GwiXX;VreNkeL@RQxlyJGqPw*@;)53ai>Tm(4+`Q#RTLkqh~K^2+uQuP{!#j1nN2=i zZf1V}zVn;Cglw&JXx|hWD>xRe+brwtkJq!$Ha@$q&evKVkB%h6m%2i)e5obv`KJ?y zMem3m`in(hEONhAbh+yln`6p$sn1dhqkvJsC}0#Y3K#{90{=q=sApN<*CToS|G)e9 z|6jLS*011O@HsdOUI!<^^PmPE1{0tP3Sbay0qen^w^-J1;0N$E_z0Wz;!j#P2i6k zEbB+`3J_pB*a9xBx2*TU8{hskhA()04ZVgvYARiiur& zXO@lyBIH`gY%qxn2eZOws#+?vzp}};d&mecGmJ@99Vrz$-%e$8#3?yLBTk|7-5Dq^ zHHBlCR!4_NoMovkPoo=&$&mU77>e|$Z0+7RF{`RhdAN{KEDsMkRyb6>gOjE)t?kauxMFS}%6@(|p?*~w}{>!?yJ=9ZR6$`vQMhE|)~(V5GFQd!Tn zh)s$$9_Du}Tj+`)prP06NgiN(XJX%<5qL@ujXCZw(3Z*!+Vd4sZ@99+McNWJe#$+> zSMjMvccnHq32DG8B)1@)fufr75S!SI519fUQq4uUS5lYFFv*42(Oy+ZDK%6~&U2TI zyRL{sJ>ADW4y0(GJD)1mS)^J@v%n6~n6`^Kj%N|G(g+a|lN6m;A$t}%jz>0S6E-); zTwNj4V$UMZv+f=5i<&hVB;1`w8uvUAs{Ntg${Y`pDJq*pHyDY8N2nr{bQ2@EBiSM( zb3zgvPq)Fm*ymxrkXc_ebT8ur9n)^g8s`dSLgP&LhERH%=>;L1xto`YnA_bMLTD6K zNarcdGy$L1n4kIxn5G`pG+7Ucqof78w#zL8s@=w9W~`VGX`wnyC5zB@6npJhz3@vN z^ui{Mr6Tl6+FhOPg`1Tf*UPWT(eGpefkZ37oy?*UKZG8H9+jj>Rh)M$+qT<9_KmPw z3_@(u@qS=QgIX%C9YKk0saB>&Qls5|jP8`lZ4a@L)m|xLXfA8iEznpu(9KV__pN}( zXmR>1iKVk_E)i~}+g`=_0{c2ks_rD8%VQU`LdRutBo(@nVQ<^&%rjZbddMbenbgI{ zh>~ocvxUwPTJhmxlU3_U+;{8F`RyN3izW*%BNDb5U6r(aSGqo-d>s3BY03OT(x=(G zI_u0VE1HE~BzUM5`j+)A*W=0+uxwo-ejMQtbq}gdq9qwzd2;EEQWce^I_}=juv4A> z>)fGY?qWBe|1_0Cy(!1~^8E_S+Cz2oGM Ie0n?k2blPMl>h($ literal 0 HcmV?d00001 diff --git a/cicd/k8s-calico-ipvs2/yaml/kube-loxilb.yml b/cicd/k8s-calico-ipvs2/yaml/kube-loxilb.yml new file mode 100644 index 000000000..276346d91 --- /dev/null +++ b/cicd/k8s-calico-ipvs2/yaml/kube-loxilb.yml @@ -0,0 +1,134 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kube-loxilb + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +rules: + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - pods + verbs: + - get + - watch + - list + - patch + - apiGroups: + - "" + resources: + - endpoints + - services + - services/status + verbs: + - get + - watch + - list + - patch + - update + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - get + - watch + - list + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: kube-loxilb +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kube-loxilb +subjects: + - kind: ServiceAccount + name: kube-loxilb + namespace: kube-system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kube-loxilb + namespace: kube-system + labels: + app: loxilb +spec: + replicas: 1 + selector: + matchLabels: + app: loxilb + template: + metadata: + labels: + app: loxilb + spec: + hostNetwork: true + tolerations: + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + priorityClassName: system-node-critical + serviceAccountName: kube-loxilb + terminationGracePeriodSeconds: 0 + containers: + - name: kube-loxilb + image: ghcr.io/loxilb-io/kube-loxilb:latest + imagePullPolicy: Always + command: + - /bin/kube-loxilb + args: + - --loxiURL=http://192.168.80.252:11111,http://192.168.80.253:11111 + - --externalCIDR=192.168.80.5/32 + #- --externalSecondaryCIDRs=124.124.124.1/24,125.125.125.1/24 + #- --monitor + #- --setBGP=64511 + #- --extBGPPeers=50.50.50.1:65101,51.51.51.1:65102 + - --setRoles=0.0.0.0 + #- --monitor + #- --setBGP + #- --setLBMode=1 + #- --config=/opt/loxilb/agent/kube-loxilb.conf + resources: + requests: + cpu: "100m" + memory: "50Mi" + limits: + cpu: "100m" + memory: "50Mi" + securityContext: + privileged: true + capabilities: + add: ["NET_ADMIN", "NET_RAW"] diff --git a/cicd/k8s-calico-ipvs2/yaml/kubeadm-config.yaml b/cicd/k8s-calico-ipvs2/yaml/kubeadm-config.yaml new file mode 100644 index 000000000..31afe601c --- /dev/null +++ b/cicd/k8s-calico-ipvs2/yaml/kubeadm-config.yaml @@ -0,0 +1,69 @@ +apiVersion: kubeadm.k8s.io/v1beta3 +bootstrapTokens: +- groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 192.168.80.250 + bindPort: 6443 +nodeRegistration: + imagePullPolicy: IfNotPresent + name: master + taints: null +--- +apiVersion: kubeadm.k8s.io/v1beta3 +certificatesDir: /etc/kubernetes/pki +kind: ClusterConfiguration +apiServer: + timeoutForControlPlane: 4m0s + certSANs: + - 192.168.80.250 +controlPlaneEndpoint: 192.168.80.250:6443 +clusterName: kubernetes +controllerManager: {} +dns: {} +etcd: + local: + dataDir: /var/lib/etcd +imageRepository: registry.k8s.io +kubernetesVersion: v1.27.5 +networking: + dnsDomain: cluster.local + podSubnet: 172.16.1.0/16 + serviceSubnet: 172.17.1.0/18 +scheduler: {} +--- +apiVersion: kubeproxy.config.k8s.io/v1alpha1 +bindAddress: 0.0.0.0 +clientConnection: + acceptContentTypes: "" + burst: 10 + contentType: application/vnd.kubernetes.protobuf + kubeconfig: /var/lib/kube-proxy/kubeconfig.conf + qps: 5 +clusterCIDR: "" +configSyncPeriod: 15m0s +#featureGates: "SupportIPVSProxyMode=true" +mode: ipvs +enableProfiling: false +healthzBindAddress: 0.0.0.0:10256 +hostnameOverride: "" +iptables: + masqueradeAll: false + masqueradeBit: 14 + minSyncPeriod: 0s + syncPeriod: 30s +ipvs: + excludeCIDRs: null + minSyncPeriod: 0s + scheduler: "" + syncPeriod: 30s +kind: KubeProxyConfiguration +metricsBindAddress: 127.0.0.1:10249 +nodePortAddresses: null +oomScoreAdj: -999 +portRange: "" diff --git a/cicd/k8s-calico-ipvs2/yaml/sctp.yml b/cicd/k8s-calico-ipvs2/yaml/sctp.yml new file mode 100644 index 000000000..c9a7d4afd --- /dev/null +++ b/cicd/k8s-calico-ipvs2/yaml/sctp.yml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: Service +metadata: + name: sctp-lb-default + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "default" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: sctp-default-test + ports: + - port: 55004 + protocol: SCTP + targetPort: 9999 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: sctp-default-test + labels: + what: sctp-default-test +spec: + containers: + - name: sctp-default-test + image: ghcr.io/loxilb-io/alpine-socat:latest + command: [ "sh", "-c"] + args: + - while true; do + socat -v -T2 sctp-l:9999,reuseaddr,fork system:"echo 'server1'; cat"; + sleep 20; + done; + ports: + - containerPort: 9999 + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP diff --git a/cicd/k8s-calico-ipvs2/yaml/sctp_fullnat.yml b/cicd/k8s-calico-ipvs2/yaml/sctp_fullnat.yml new file mode 100644 index 000000000..6b43037a5 --- /dev/null +++ b/cicd/k8s-calico-ipvs2/yaml/sctp_fullnat.yml @@ -0,0 +1,39 @@ +apiVersion: v1 +kind: Service +metadata: + name: sctp-lb-fullnat + annotations: + loxilb.io/num-secondary-networks: "2" + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "fullnat" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: sctp-fullnat-test + ports: + - port: 57004 + protocol: SCTP + targetPort: 9999 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: sctp-fullnat-test + labels: + what: sctp-fullnat-test +spec: + containers: + - name: sctp-fullnat-test + image: loxilbio/sctp-darn:latest + imagePullPolicy: Always + #command: ["/bin/sh", "-ec", "while :; do echo '.'; sleep 6 ; done"] + command: ["sctp_darn","-H", "0.0.0.0","-P", "9999", "-l"] + ports: + - containerPort: 9999 + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP diff --git a/cicd/k8s-calico-ipvs2/yaml/sctp_onearm.yml b/cicd/k8s-calico-ipvs2/yaml/sctp_onearm.yml new file mode 100644 index 000000000..b4b736962 --- /dev/null +++ b/cicd/k8s-calico-ipvs2/yaml/sctp_onearm.yml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: Service +metadata: + name: sctp-lb-onearm + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "onearm" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: sctp-onearm-test + ports: + - port: 56004 + protocol: SCTP + targetPort: 9999 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: sctp-onearm-test + labels: + what: sctp-onearm-test +spec: + containers: + - name: sctp-onearm-test + image: ghcr.io/loxilb-io/alpine-socat:latest + command: [ "sh", "-c"] + args: + - while true; do + socat -v -T2 sctp-l:9999,reuseaddr,fork system:"echo 'server1'; cat"; + sleep 20; + done; + ports: + - containerPort: 9999 + env: + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP diff --git a/cicd/k8s-calico-ipvs2/yaml/settings.yaml b/cicd/k8s-calico-ipvs2/yaml/settings.yaml new file mode 100644 index 000000000..e5b02a60b --- /dev/null +++ b/cicd/k8s-calico-ipvs2/yaml/settings.yaml @@ -0,0 +1,44 @@ +--- +# cluster_name is used to group the nodes in a folder within VirtualBox: +cluster_name: Kubernetes Cluster +# Uncomment to set environment variables for services such as crio and kubelet. +# For example, configure the cluster to pull images via a proxy. +# environment: | +# HTTP_PROXY=http://my-proxy:8000 +# HTTPS_PROXY=http://my-proxy:8000 +# NO_PROXY=127.0.0.1,localhost,master-node,node01,node02,node03 +# All IPs/CIDRs should be private and allowed in /etc/vbox/networks.conf. +network: + iloxilb_ip: 192.168.80.253 + oloxilb_ip: 192.168.90.253 + # Worker IPs are simply incremented from the control IP. + control_ip: 192.168.80.250 + dns_servers: + - 8.8.8.8 + - 1.1.1.1 + pod_cidr: 172.16.1.0/16 + service_cidr: 172.17.1.0/18 +nodes: + control: + cpu: 2 + memory: 4096 + workers: + count: 2 + cpu: 1 + memory: 2048 +# Mount additional shared folders from the host into each virtual machine. +# Note that the project directory is automatically mounted at /vagrant. +# shared_folders: +# - host_path: ../images +# vm_path: /vagrant/images +software: + loxilb: + box: + name: sysnet4admin/Ubuntu-k8s + version: 0.7.1 + cluster: + box: bento/ubuntu-22.04 + calico: 3.26.0 + # To skip the dashboard installation, set its version to an empty value or comment it out: + kubernetes: 1.27.1-00 + os: xUbuntu_22.04 diff --git a/cicd/k8s-calico-ipvs2/yaml/tcp.yml b/cicd/k8s-calico-ipvs2/yaml/tcp.yml new file mode 100644 index 000000000..8c8983403 --- /dev/null +++ b/cicd/k8s-calico-ipvs2/yaml/tcp.yml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: tcp-lb-default + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "default" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: tcp-default-test + ports: + - port: 55002 + targetPort: 80 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: tcp-default-test + labels: + what: tcp-default-test +spec: + containers: + - name: tcp-default-test + image: ghcr.io/loxilb-io/nginx:stable + ports: + - containerPort: 80 diff --git a/cicd/k8s-calico-ipvs2/yaml/tcp_fullnat.yml b/cicd/k8s-calico-ipvs2/yaml/tcp_fullnat.yml new file mode 100644 index 000000000..3303ac35e --- /dev/null +++ b/cicd/k8s-calico-ipvs2/yaml/tcp_fullnat.yml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: tcp-lb-fullnat + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "fullnat" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: tcp-fullnat-test + ports: + - port: 57002 + targetPort: 80 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: tcp-fullnat-test + labels: + what: tcp-fullnat-test +spec: + containers: + - name: tcp-fullnat-test + image: ghcr.io/loxilb-io/nginx:stable + ports: + - containerPort: 80 diff --git a/cicd/k8s-calico-ipvs2/yaml/tcp_onearm.yml b/cicd/k8s-calico-ipvs2/yaml/tcp_onearm.yml new file mode 100644 index 000000000..b3d345483 --- /dev/null +++ b/cicd/k8s-calico-ipvs2/yaml/tcp_onearm.yml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Service +metadata: + name: tcp-lb-onearm + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "onearm" +spec: + externalTrafficPolicy: Local + loadBalancerClass: loxilb.io/loxilb + selector: + what: tcp-onearm-test + ports: + - port: 56002 + targetPort: 80 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: tcp-onearm-test + labels: + what: tcp-onearm-test +spec: + containers: + - name: tcp-onearm-test + image: ghcr.io/loxilb-io/nginx:stable + ports: + - containerPort: 80 diff --git a/cicd/k8s-calico-ipvs2/yaml/udp.yml b/cicd/k8s-calico-ipvs2/yaml/udp.yml new file mode 100644 index 000000000..ac6ef997d --- /dev/null +++ b/cicd/k8s-calico-ipvs2/yaml/udp.yml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: udp-lb-default + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "default" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: udp-default-test + ports: + - port: 55003 + protocol: UDP + targetPort: 33333 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: udp-default-test + labels: + what: udp-default-test +spec: + containers: + - name: udp-default-test + image: ghcr.io/loxilb-io/udp-echo:latest + ports: + - containerPort: 33333 diff --git a/cicd/k8s-calico-ipvs2/yaml/udp_fullnat.yml b/cicd/k8s-calico-ipvs2/yaml/udp_fullnat.yml new file mode 100644 index 000000000..67b729019 --- /dev/null +++ b/cicd/k8s-calico-ipvs2/yaml/udp_fullnat.yml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: udp-lb-fullnat + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "fullnat" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: udp-fullnat-test + ports: + - port: 57003 + protocol: UDP + targetPort: 33333 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: udp-fullnat-test + labels: + what: udp-fullnat-test +spec: + containers: + - name: udp-fullnat-test + image: ghcr.io/loxilb-io/udp-echo:latest + ports: + - containerPort: 33333 diff --git a/cicd/k8s-calico-ipvs2/yaml/udp_onearm.yml b/cicd/k8s-calico-ipvs2/yaml/udp_onearm.yml new file mode 100644 index 000000000..833187e73 --- /dev/null +++ b/cicd/k8s-calico-ipvs2/yaml/udp_onearm.yml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + name: udp-lb-onearm + annotations: + loxilb.io/liveness: "yes" + loxilb.io/lbmode: "onearm" +spec: + loadBalancerClass: loxilb.io/loxilb + externalTrafficPolicy: Local + selector: + what: udp-onearm-test + ports: + - port: 56003 + protocol: UDP + targetPort: 33333 + type: LoadBalancer +--- +apiVersion: v1 +kind: Pod +metadata: + name: udp-onearm-test + labels: + what: udp-onearm-test +spec: + containers: + - name: udp-onearm-test + image: ghcr.io/loxilb-io/udp-echo:latest + ports: + - containerPort: 33333 From 80ab51405357b18368b5f6a67b86fc40fbee5603 Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Sun, 17 Sep 2023 23:23:20 +0900 Subject: [PATCH 2/4] cicd:extended tests for k8s with calico/ipvs --- cicd/k8s-calico-ipvs2/configs/config | 19 ------------------- cicd/k8s-calico-ipvs2/configs/join.sh | 1 - 2 files changed, 20 deletions(-) delete mode 100644 cicd/k8s-calico-ipvs2/configs/config delete mode 100755 cicd/k8s-calico-ipvs2/configs/join.sh diff --git a/cicd/k8s-calico-ipvs2/configs/config b/cicd/k8s-calico-ipvs2/configs/config deleted file mode 100644 index 62c73aab1..000000000 --- a/cicd/k8s-calico-ipvs2/configs/config +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -clusters: -- cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJek1Ea3hOekUwTURNek4xb1hEVE16TURreE5ERTBNRE16TjFvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBS28wCmJDS0puTFRWOFVkbVNDSXczZU5PY0VJYXpteVFsYnZaNVR2OXRpMFNaeDFlMnJUZ1NBc1F3M2NiaWZIU1c1YWgKU2luRVJlS1dWemlvMkZKRWpTMDh5bEVDdGRMbE1JUXFaSlkvK2lGeldKWUhJZm1MRlJJdGxCdXBNNW1TVEZkMQpQbTlkVDdNNTJYUnhMYnRRY0ZDRjd2MXFqZURwM1RJT010Qko1b1BCT2xrVzF6SVduS2pEUUUxUkx2MGZqZmpuCm0rcW5nc0lvR0NjdFJzdU04cFcrSE83MW5hek9ITFVDK3RuU0Y3a0dEWnBkNlNuL1NIdFJYOVFDSklIMkZLVUMKODZONmhVMFovQTlmZEh1Q3ZMeXNuM0FyNzgrSDNyR0QzUTM1d0hIS0ZrdHZKdlExdUdCb3NVTllkM0drZStrMApBcnMraUtOREhPVmJTTEdpM1hVQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZKYXV5a3pFV05WSlpCWjRRc0plaE8va3dmM2lNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBRCs2bWM2VW12akE3MzZOTGp0bQp3UHp4NlhnWjhvT01kMDJndDZMMWpBNXBqY21ERFc3MGgzamJrSVl6bXRLV29ZZGJzMllUTE9ZTWFnZTFFQVdvCnZuS1d4Q08wQ293UWo4ZmJLWWlUNTlidnFGZGdtT1VacTR1YXpqZXUvMWJHcXFHb0pzcW4ySjcvY0s0d3FRdm4KQ1M1cGFzNXpnVlhzY2tLK1I5SXRNak5hYlV0Qjl5UzUzS2pOZzV4UnV3eTBsVlZpaVo2d29DNEFQVGtqZStxLwpWUC8wdVRpZUhrajVaUnZMTGxlUm1zTnVyUC9vK0pkNDB5bDdOWjRmWjZzdDRVSGl4TkpZRld4VHZ0aDUzcjMwClArMWRxSlYxdXM3MXV0TmZvWW9PTGxRSUNLcTA5cmRNTm9tYTFoWTBkNStHWjhhYkxNelVoc0pxbXU2aUhPcXgKbDQ0PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - server: https://192.168.80.250:6443 - name: kubernetes -contexts: -- context: - cluster: kubernetes - user: kubernetes-admin - name: kubernetes-admin@kubernetes -current-context: kubernetes-admin@kubernetes -kind: Config -preferences: {} -users: -- name: kubernetes-admin - user: - client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURJVENDQWdtZ0F3SUJBZ0lJYVgxTFE5RzE5Sll3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TXpBNU1UY3hOREF6TXpkYUZ3MHlOREE1TVRZeE5EQXpOREJhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQXM5VzNkSWtPdHVEbmRNenYKWFhiQkhLTXFjclQzYlhMQ0tCakE2Z2d5anJRaVh2TDNsTThSQXliYXk5ZUpjUEVRcHkwKzFwM1JFN2UxU2lYNgpKaGFvN0RaYUZRMm82TjQwWlNsMDl0d29LM0xXNU9pTG1udGVveWpSdStRMmF4RnpSWllZNVorOVY5ZWwxd3dKClYraUUrTGp6LzJJZ2hhSm92ejlHRU8rRElGYzZ1TTlZOWRTdlAvY0tRMXFncnR6RUppUDZuNzVRNGRKMVhCRG0Kb2pzZXU2aXBIdC90TXN1NEpKZW1EL0l6eHRlWHE1SXd5QmRNRkxheEcxb2xONGllMUVYY2NKRnBldTJ5amRzeQpvWE5sd1pBNkxtQTVJZkZLS1djRjFXMVEwT2ltU0hCNld6eldYUkpOMWl0SW9lc2NzaW5qUDhDRlNVWmZXSlBjCm9JL25Od0lEQVFBQm8xWXdWREFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RBWURWUjBUQVFIL0JBSXdBREFmQmdOVkhTTUVHREFXZ0JTV3JzcE14RmpWU1dRV2VFTENYb1R2NU1IOQo0akFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBR3M3ajY2R3RBcGU4OVB1WDhNWU9sbVZSb0FJTmc4blp6MzNhCmM3c25lR3J4Y05Ma1FTa3h3TzNET1RJMjdPRFJuT0RRVXZZOEdEcG1qQjZNTUxIbDRLQlZ0dUUvRFlUd01IemcKZXRESmtnU3B4Rk1RTTN0ZzQzRlNiQ0FhVTFYaDUrMWgvZTBJY0Jqak94VHpQdnN0Ym1jemlBZWkrOTVhWTNOTgpMYkVQV3NoNmlCN2NuekZIcGhEWEhCaVcxMDVzbzk3VUFRc3Z3ZnoxREdJb2dVKzgrOXBVWWFYNkJqV0tGSEMrClpkZWhxMlZ3amFCaklvekhxMXg2dGVFV0N1RFhEc0tUeXIxUldBSXIxblEySWRaNWQ0bmZrRitrY3BjdHdyeUgKbFlMVTlHZVZLU1YyUC9QcTZhakQ5Vjg1MnpYbXBZckNQZmY3RUVFb0tPSnhIM2RQQ0E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== - client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBczlXM2RJa090dURuZE16dlhYYkJIS01xY3JUM2JYTENLQmpBNmdneWpyUWlYdkwzCmxNOFJBeWJheTllSmNQRVFweTArMXAzUkU3ZTFTaVg2SmhhbzdEWmFGUTJvNk40MFpTbDA5dHdvSzNMVzVPaUwKbW50ZW95alJ1K1EyYXhGelJaWVk1Wis5VjllbDF3d0pWK2lFK0xqei8ySWdoYUpvdno5R0VPK0RJRmM2dU05WQo5ZFN2UC9jS1ExcWdydHpFSmlQNm43NVE0ZEoxWEJEbW9qc2V1NmlwSHQvdE1zdTRKSmVtRC9Jenh0ZVhxNUl3CnlCZE1GTGF4RzFvbE40aWUxRVhjY0pGcGV1MnlqZHN5b1hObHdaQTZMbUE1SWZGS0tXY0YxVzFRME9pbVNIQjYKV3p6V1hSSk4xaXRJb2VzY3NpbmpQOENGU1VaZldKUGNvSS9uTndJREFRQUJBb0lCQUFkczRhQk1NSEZZby9YcwpxRTJCbzhPYzhSbS9ZeC8veHdpRXpTSmNxRW5BSWxNdFVRK3g1US91UU1aOTVjem1CenYvNEVKWVRkTzBtQ1d4Cm43eUczdjZ4MTlKYXVBaTFkQkJoUDlVWmpnY05QTlpDeTlEYkd6TTljanVCMXNWZytSTjhlUmF5RzgxT3VDaEMKOVBmT1AvOU5FN1lOeFMzL1lMZlUwdjNNQmV3RjNFcW1vQjQ0Vkl0bmxNVWNIVmQ0TDljNWpYVWpsRTltTGlJKwpka2FYeVdsbE9GUWVOWEtDTGxZYjh5U1dIMXNENXMxMDMyRzFwZDlXWTB5MnpUN2YxcUU5R0ppMHprL2VxM0tUCjlvR3Irb1VIQS9sWnRENU1LMDdFc21zUmNCVUtLcjFmZ0xUWEVRQUZpbGlsSkRCR3RPWUc1VStaTjhCOGtJa3QKR1BpSWlpRUNnWUVBM1ZJR2dYVEJuUjdQUVk2MXYrYzJsdXFNRGI4MXlJc3RmUDhaTDZJYnVjNVhCemhIVFV1RApEMEtJajhBU1JIRWN3c3BhdWN2STUxN0thS3RaYk9vYVNjelZub2J0QjJrbno5QnhkMSttUGlDVWxrZ0pjWWlJCnhBSUt3MVBVQ2JoekV6bmtNeDVaNUlSbHBYc3M1NEpQdVpEN2dVVXJGLzV6R0tkSitUcDNNdmtDZ1lFQTBBT04KVzM0Qk5udVY0eUVxbXpDMGJzRTdodFB5aXJTaWpmekxBeWJuazcwek1pUUowQlY4YkU4RUx3dzFGYUwvVjY4YgpZZTJqeENzRTBlTnBLczM5bkVKTzlPV0pPdGJXQ0hhMFhkS0FJT2xPQ21xZ2E2UzR5VHY3MXc3RExGOHpMdEhKCjgvUVAvZHBUZ1Z4ZDJlVGVFU0gxSFhBNHhxQ0IwY2twNWVTNVI2OENnWUFLOUlCMTcvMlZkV3owNWNlQlZFU1QKZTlDcTdZZ1FBVE9qT0UxYTJqOWRGeVdmcWQxOXFmczJ5aWhRVlVXU05iY2l2VGw0Y09uazZnQkM1aGhyRjd0dApZOEkvTXBhUkExZUJKZVEzOEhnZUl5YTN1YUVlQ1pBYmc2NWN0TTJPSFo4MG1Hei9Tc1ZRVm0xaG41TTljbW5YCjJhRlphTDhWVlBjRmhXSlgvOWplb1FLQmdCVW9aMVpwQ0tLTUM0RGd2TDZ3UTRvSWFZK2JPMDVVZFR4UC9DNUQKNU1Zb05tN2hMNFpVeVNVVEhHL1VwQW1oWEt1RWlocDUyM0IybDdUcGxxYmQ4Ty9WNXNZT0d0azdST1h6ZlFFVwpDaVpPMUx0R2dTNnh3SUhuY2dRNzRpby9QS3VFZHpBeklQVkhJYnRFczk4WDRtYUh0N0RZMjAySXdldWV0SDVICnQzak5Bb0dBTVNlQ2ZzamY2bkNBNng1M29GeXNyc1ZEUk9sS04zSnlDeGt1dk1RVGhYTzlNMWNMdU9kUDhPVWMKMjdIMHE0SkZzZHFCWWRWaUFDS3kxUXkwU0oxV2dEZTRiTU1IS3l0SlZhMU9vYlFjb2hvbnY1UWJtSitURDI3QgpqMVRTQzV1RWYzMm9saE5pVUUwajZRRWxmNTJBeTNoK0JuQXBPRFNwQ01SRnJ5dDk3WFE9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== diff --git a/cicd/k8s-calico-ipvs2/configs/join.sh b/cicd/k8s-calico-ipvs2/configs/join.sh deleted file mode 100755 index 123e57e98..000000000 --- a/cicd/k8s-calico-ipvs2/configs/join.sh +++ /dev/null @@ -1 +0,0 @@ -kubeadm join 192.168.80.250:6443 --token hd7r6h.lwlhrhraltch29am --discovery-token-ca-cert-hash sha256:069ecfbe37f380fa0ca3a0a3a2a1a1c4d42e9b5b38e6757aec0bf2cf3366bb70 From 7108cf122c49ecb4f1ce7d528e667f8238d51913 Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Tue, 19 Sep 2023 09:21:10 +0900 Subject: [PATCH 3/4] Auto advertise VIPs - Dont operate in NOT_DEFINED state --- loxinet/rules.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/loxinet/rules.go b/loxinet/rules.go index 9013e497f..86fed9afe 100644 --- a/loxinet/rules.go +++ b/loxinet/rules.go @@ -2312,7 +2312,7 @@ func (R *RuleH) AdvRuleVIPIfL2(IP net.IP) error { } } - } else { + } else if ciState != "NOT_DEFINED" { if IsIPHostAddr(IP.String()) { if loxinlp.DelAddrNoHook(IP.String()+"/32", "lo") != 0 { tk.LogIt(tk.LogError, "nat lb-rule vip %s:%s delete failed\n", IP.String(), "lo") From 3f0eb5d43f0f6dc5efc3b0a671703c471fcf4454 Mon Sep 17 00:00:00 2001 From: Trekkie Coder Date: Tue, 19 Sep 2023 09:32:17 +0900 Subject: [PATCH 4/4] Updated to latest submodule --- loxilb-ebpf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/loxilb-ebpf b/loxilb-ebpf index fc591b577..cc86c3ad5 160000 --- a/loxilb-ebpf +++ b/loxilb-ebpf @@ -1 +1 @@ -Subproject commit fc591b577970276f13cb9a13cf989257c78eb397 +Subproject commit cc86c3ad574c6745be19fd89f5a02716edbfa714