Skip to content

Commit

Permalink
Merge pull request loxilb-io#857 from TrekkieCoder/main
Browse files Browse the repository at this point in the history
loxilb-iogh-87: added initial cicd for service sharding
  • Loading branch information
TrekkieCoder authored Oct 30, 2024
2 parents cb901ec + d9878cd commit 70c7161
Show file tree
Hide file tree
Showing 20 changed files with 692 additions and 0 deletions.
34 changes: 34 additions & 0 deletions cicd/k3s-sharding/EPconfig.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
{
"Attr":[
{
"hostName":"192.168.80.10",
"name":"192.168.80.10_tcp_6443",
"inactiveReTries":2,
"probeType":"tcp",
"probeReq":"",
"probeResp":"",
"probeDuration":10,
"probePort":6443
},
{
"hostName":"192.168.80.11",
"name":"192.168.80.11_tcp_6443",
"inactiveReTries":2,
"probeType":"tcp",
"probeReq":"",
"probeResp":"",
"probeDuration":10,
"probePort":6443
},
{
"hostName":"192.168.80.12",
"name":"192.168.80.12_tcp_6443",
"inactiveReTries":2,
"probeType":"tcp",
"probeReq":"",
"probeResp":"",
"probeDuration":10,
"probePort":6443
}
]
}
72 changes: 72 additions & 0 deletions cicd/k3s-sharding/Vagrantfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :

workers = (ENV['WORKERS'] || "2").to_i
box_name = (ENV['VAGRANT_BOX'] || "sysnet4admin/Ubuntu-k8s")
box_version = "0.7.1"
Vagrant.configure("2") do |config|
config.vm.box = "#{box_name}"
config.vm.box_version = "#{box_version}"

if Vagrant.has_plugin?("vagrant-vbguest")
config.vbguest.auto_update = false
end

config.vm.define "host" do |host|
host.vm.hostname = 'host1'
host.vm.network :private_network, ip: "192.168.80.9", :netmask => "255.255.255.0"
host.vm.network :private_network, ip: "192.168.90.9", :netmask => "255.255.255.0"
host.vm.provision :shell, :path => "host.sh"
host.vm.provider :virtualbox do |vbox|
vbox.customize ["modifyvm", :id, "--memory", 2048]
vbox.customize ["modifyvm", :id, "--cpus", 1]
end
end

config.vm.define "master1" do |master|
master.vm.hostname = 'master1'
master.vm.network :private_network, ip: "192.168.90.10", :netmask => "255.255.255.0"
master.vm.network :private_network, ip: "192.168.80.10", :netmask => "255.255.255.0"
master.vm.provision :shell, :path => "master1.sh"
master.vm.provider :virtualbox do |vbox|
vbox.customize ["modifyvm", :id, "--memory", 8192]
vbox.customize ["modifyvm", :id, "--cpus", 4]
end
end

config.vm.define "master2" do |master|
master.vm.hostname = 'master2'
master.vm.network :private_network, ip: "192.168.90.11", :netmask => "255.255.255.0"
master.vm.network :private_network, ip: "192.168.80.11", :netmask => "255.255.255.0"
master.vm.provision :shell, :path => "master2.sh"
master.vm.provider :virtualbox do |vbox|
vbox.customize ["modifyvm", :id, "--memory", 8192]
vbox.customize ["modifyvm", :id, "--cpus", 4]
end
end

config.vm.define "master3" do |master|
master.vm.hostname = 'master3'
master.vm.network :private_network, ip: "192.168.90.12", :netmask => "255.255.255.0"
master.vm.network :private_network, ip: "192.168.80.12", :netmask => "255.255.255.0"
master.vm.provision :shell, :path => "master3.sh"
master.vm.provider :virtualbox do |vbox|
vbox.customize ["modifyvm", :id, "--memory", 8192]
vbox.customize ["modifyvm", :id, "--cpus", 4]
end
end


(1..workers).each do |node_number|
config.vm.define "worker#{node_number}" do |worker|
worker.vm.hostname = "worker#{node_number}"
ip = node_number + 100
worker.vm.network :private_network, ip: "192.168.80.#{ip}", :netmask => "255.255.255.0"
worker.vm.provision :shell, :path => "worker.sh"
worker.vm.provider :virtualbox do |vbox|
vbox.customize ["modifyvm", :id, "--memory", 4096]
vbox.customize ["modifyvm", :id, "--cpus", 2]
end
end
end
end
7 changes: 7 additions & 0 deletions cicd/k3s-sharding/config.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
#!/bin/bash
vagrant global-status | grep -i virtualbox | cut -f 1 -d ' ' | xargs -L 1 vagrant destroy -f
vagrant up
#sudo ip route add 123.123.123.1 via 192.168.90.10 || true
vagrant ssh master1 -c 'sudo kubectl create -f /vagrant/tcp-onearm-ds.yml'
vagrant ssh master1 -c 'sudo kubectl create -f /vagrant/udp-onearm-ds.yml'
vagrant ssh master1 -c 'sudo kubectl create -f /vagrant/sctp-onearm-ds.yml'
6 changes: 6 additions & 0 deletions cicd/k3s-sharding/host.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
sudo apt-get install -y lksctp-tools
sudo ip route add 123.123.123.0/24 via 192.168.90.10
sysctl net.ipv4.conf.eth1.arp_accept=1
sysctl net.ipv4.conf.eth2.arp_accept=1
sysctl net.ipv4.conf.default.arp_accept=1
echo "Host is up"
59 changes: 59 additions & 0 deletions cicd/k3s-sharding/host_validation.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
#!/bin/bash
extIP=$(cat /vagrant/extIP)
extIP1=$(cat /vagrant/extIP1)
extIP2=$(cat /vagrant/extIP2)

mode="onearm"
tcp_port=55001
udp_port=55002
sctp_port=55003

code=0
echo TCP Service IP: $extIP

ip route list match $extIP | grep $extIP -A 2

echo -e "\n*********************************************"
echo "Testing Service"
echo "*********************************************"
for((i=0;i<20;i++))
do

out=$(curl -s --connect-timeout 10 http://$extIP:$tcp_port)
if [[ ${out} == *"Welcome to nginx"* ]]; then
echo -e "K3s-sharding TCP\t($mode)\t[OK]"
else
echo -e "K3s-sharding TCP\t($mode)\t[FAILED]"
code=1
fi

echo UDP Service IP: $extIP1

out=$(timeout 5 /vagrant/udp_client $extIP1 $udp_port)
if [[ ${out} == *"Client"* ]]; then
echo -e "K3s-sharding UDP\t($mode)\t[OK]"
else
echo -e "K3s-sharding UDP\t($mode)\t[FAILED]"
code=1
fi

echo SCTP Service IP: $extIP2

sctp_darn -H 192.168.80.9 -h $extIP2 -p $sctp_port -s < /vagrant/input > output
#sleep 2
exp="New connection, peer addresses
192.168.80.202:55003"

res=`cat output | grep -A 1 "New connection, peer addresses"`
sudo rm -rf output
if [[ "$res" == "$exp" ]]; then
#echo $res
echo -e "K3s-sharding SCTP\t($mode)\t[OK]"
else
echo -e "K3s-sharding SCTP\t($mode)\t[FAILED]"
code=1
fi


done
exit $code
6 changes: 6 additions & 0 deletions cicd/k3s-sharding/input
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@






148 changes: 148 additions & 0 deletions cicd/k3s-sharding/kube-loxilb.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-loxilb
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kube-loxilb
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- watch
- list
- patch
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- watch
- list
- patch
- apiGroups:
- ""
resources:
- endpoints
- services
- namespaces
- services/status
verbs:
- get
- watch
- list
- patch
- update
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- get
- watch
- list
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kube-loxilb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kube-loxilb
subjects:
- kind: ServiceAccount
name: kube-loxilb
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-loxilb
namespace: kube-system
labels:
app: loxilb
spec:
replicas: 1
selector:
matchLabels:
app: loxilb
template:
metadata:
labels:
app: loxilb
spec:
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
tolerations:
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
- key: "node-role.kubernetes.io/master"
operator: Exists
- key: "node-role.kubernetes.io/control-plane"
operator: Exists
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "node-role.kubernetes.io/master"
operator: Exists
- key: "node-role.kubernetes.io/control-plane"
operator: Exists
priorityClassName: system-node-critical
serviceAccountName: kube-loxilb
terminationGracePeriodSeconds: 0
containers:
- name: kube-loxilb
image: ghcr.io/loxilb-io/kube-loxilb:latest
imagePullPolicy: Always
command:
- /bin/kube-loxilb
args:
#- --loxiURL=http://192.168.80.10:11111
- --cidrPools=defaultPool=192.168.80.200/24
#- --setBGP=64512
- --setRoles=0.0.0.0
- --setUniqueIP
- --numZoneInstances=3
#- --monitor
#- --setBGP
#- --setLBMode=1
#- --config=/opt/loxilb/agent/kube-loxilb.conf
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: true
capabilities:
add: ["NET_ADMIN", "NET_RAW"]
41 changes: 41 additions & 0 deletions cicd/k3s-sharding/lbconfig.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
{
"lbAttr":[
{
"serviceArguments":{
"externalIP":"192.168.80.80",
"port":6443,
"protocol":"tcp",
"sel":0,
"mode":2,
"BGP":false,
"Monitor":true,
"inactiveTimeOut":240,
"block":0
},
"secondaryIPs":null,
"endpoints":[
{
"endpointIP":"192.168.80.10",
"targetPort":6443,
"weight":1,
"state":"active",
"counter":""
},
{
"endpointIP":"192.168.80.11",
"targetPort":6443,
"weight":1,
"state":"active",
"counter":""
},
{
"endpointIP":"192.168.80.12",
"targetPort":6443,
"weight":1,
"state":"active",
"counter":""
}
]
}
]
}
Loading

0 comments on commit 70c7161

Please sign in to comment.