diff --git a/.drone.yml b/.drone.yml index 449ba9a338d5..e70cb33582f7 100644 --- a/.drone.yml +++ b/.drone.yml @@ -6,6 +6,11 @@ platform: os: linux arch: amd64 +trigger: + event: + exclude: + - cron + steps: - name: build image: rancher/dapper:v0.5.0 @@ -128,6 +133,48 @@ volumes: host: path: /var/run/docker.sock +--- +kind: pipeline +name: conformance + +platform: + os: linux + arch: amd64 + +trigger: + event: + - cron + cron: + - nightly + +steps: +- name: build + image: rancher/dapper:v0.5.0 + commands: + - dapper ci + - echo "${DRONE_TAG}-amd64" | sed -e 's/+/-/g' >.tags + volumes: + - name: docker + path: /var/run/docker.sock + +- name: test + image: rancher/dapper:v0.5.0 + environment: + ENABLE_REGISTRY: 'true' + commands: + - docker build --target test-k3s -t k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT} -f Dockerfile.test . + - > + docker run -i -e REPO -e TAG -e DRONE_TAG -e DRONE_BUILD_EVENT -e IMAGE_NAME -e SONOBUOY_VERSION -e ENABLE_REGISTRY + -v /var/run/docker.sock:/var/run/docker.sock --privileged --network host -v /tmp:/tmp k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT} + volumes: + - name: docker + path: /var/run/docker.sock + +volumes: +- name: docker + host: + path: /var/run/docker.sock + --- kind: pipeline name: arm64 @@ -136,6 +183,11 @@ platform: os: linux arch: arm64 +trigger: + event: + exclude: + - cron + steps: - name: build image: rancher/dapper:v0.5.0 @@ -222,6 +274,11 @@ platform: os: linux arch: arm +trigger: + event: + exclude: + - cron + steps: - name: build image: rancher/dapper:v0.5.0 @@ -312,6 +369,11 @@ platform: node: arch: s390x +trigger: + event: + exclude: + - cron + clone: disable: true @@ -412,6 +474,11 @@ platform: os: linux arch: amd64 +trigger: + event: + exclude: + - cron + steps: - name: validate_go_mods image: rancher/dapper:v0.5.0 @@ -459,7 +526,10 @@ trigger: - refs/head/master - refs/tags/* event: - - tag + include: + - tag + exclude: + - cron depends_on: - amd64 @@ -499,3 +569,76 @@ trigger: depends_on: - manifest + +--- +kind: pipeline +name: e2e +type: docker + +platform: + os: linux + arch: amd64 + +steps: +- name: build-e2e-image + image: rancher/dapper:v0.5.0 + commands: + - DOCKER_BUILDKIT=1 docker build --target test-e2e -t test-e2e -f Dockerfile.test . + - SKIP_VALIDATE=true SKIP_AIRGAP=true dapper ci + - cp dist/artifacts/* /tmp/artifacts/ + volumes: + - name: cache + path: /tmp/artifacts + - name: docker + path: /var/run/docker.sock + +- name: test-e2e + image: test-e2e + pull: never + resources: + cpu: 6000 + memory: 10Gi + environment: + E2E_REGISTRY: 'true' + commands: + - mkdir -p dist/artifacts + - cp /tmp/artifacts/* dist/artifacts/ + - docker stop registry && docker rm registry + # Cleanup any VMs running, happens if a previous test panics + - | + VMS=$(virsh list --name | grep '_server-\|_agent-' || true) + if [ -n "$VMS" ]; then + for vm in $VMS + do + virsh destroy $vm + virsh undefine $vm --remove-all-storage + done + fi + - docker run -d -p 5000:5000 -e REGISTRY_PROXY_REMOTEURL=https://registry-1.docker.io --name registry registry:2 + - cd tests/e2e/validatecluster + - vagrant destroy -f + - go test -v -timeout=45m ./validatecluster_test.go -ci -local + - cd ../secretsencryption + - vagrant destroy -f + - go test -v -timeout=30m ./secretsencryption_test.go -ci -local + - cd ../upgradecluster + - E2E_RELEASE_CHANNEL="v1.25" go test -v -timeout=45m ./upgradecluster_test.go -ci -local + - docker stop registry && docker rm registry + + volumes: + - name: libvirt + path: /var/run/libvirt/ + - name: docker + path: /var/run/docker.sock + - name: cache + path: /tmp/artifacts + +volumes: +- name: docker + host: + path: /var/run/docker.sock +- name: libvirt + host: + path: /var/run/libvirt/ +- name: cache + temp: {} \ No newline at end of file diff --git a/Dockerfile.test b/Dockerfile.test index fa961e8ee7de..298e9d083668 100644 --- a/Dockerfile.test +++ b/Dockerfile.test @@ -35,3 +35,21 @@ ENV TEST_CLEANUP true ENTRYPOINT ["./scripts/entry.sh"] CMD ["test"] + + +FROM vagrantlibvirt/vagrant-libvirt:0.10.7 AS test-e2e + +RUN apt-get update && apt-get install -y docker.io +RUN vagrant plugin install vagrant-k3s vagrant-reload vagrant-scp +RUN vagrant box add generic/ubuntu2004 --provider libvirt --force +RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"; \ + chmod +x ./kubectl; \ + mv ./kubectl /usr/local/bin/kubectl +ENV GO_VERSION 1.19.2 +RUN curl -O -L "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz"; \ + rm -rf /usr/local/go; \ + tar -C /usr/local -xzf go${GO_VERSION}.linux-amd64.tar.gz; + +ENV PATH="${PATH}:/usr/local/go/bin" + + diff --git a/pkg/agent/loadbalancer/loadbalancer.go b/pkg/agent/loadbalancer/loadbalancer.go index f47f4c38a38f..a9c981710254 100644 --- a/pkg/agent/loadbalancer/loadbalancer.go +++ b/pkg/agent/loadbalancer/loadbalancer.go @@ -56,7 +56,7 @@ var ( ETCDServerServiceName = version.Program + "-etcd-server-load-balancer" ) -// New contstructs a new LoadBalancer instance. The default server URL, and +// New constructs a new LoadBalancer instance. The default server URL, and // currently active servers, are stored in a file within the dataDir. func New(ctx context.Context, dataDir, serviceName, serverURL string, lbServerPort int, isIPv6 bool) (_lb *LoadBalancer, _err error) { config := net.ListenConfig{Control: reusePort} diff --git a/scripts/test b/scripts/test index c45b05f9cf3d..74be0a70d53c 100755 --- a/scripts/test +++ b/scripts/test @@ -43,26 +43,47 @@ echo "Did test-run-lazypull $?" [ "$ARCH" != 'amd64' ] && \ early-exit "Skipping remaining tests, images not available for $ARCH." -E2E_OUTPUT=$artifacts test-run-sonobuoy serial -echo "Did test-run-sonobuoy serial $?" - # --- if [ "$DRONE_BUILD_EVENT" = 'tag' ]; then + E2E_OUTPUT=$artifacts test-run-sonobuoy serial + echo "Did test-run-sonobuoy serial $?" E2E_OUTPUT=$artifacts test-run-sonobuoy parallel echo "Did test-run-sonobuoy parallel $?" early-exit 'Skipping remaining tests on tag.' fi # --- -test-run-sonobuoy etcd serial -echo "Did test-run-sonobuoy-etcd serial $?" -test-run-sonobuoy mysql serial -echo "Did test-run-sonobuoy-mysqk serial $?" -test-run-sonobuoy postgres serial -echo "Did test-run-sonobuoy-postgres serial $?" +if [ "$DRONE_BUILD_EVENT" = 'cron' ]; then + E2E_OUTPUT=$artifacts test-run-sonobuoy serial + echo "Did test-run-sonobuoy serial $?" + test-run-sonobuoy etcd serial + echo "Did test-run-sonobuoy-etcd serial $?" + test-run-sonobuoy mysql serial + echo "Did test-run-sonobuoy-mysqk serial $?" + test-run-sonobuoy postgres serial + echo "Did test-run-sonobuoy-postgres serial $?" + + # Wait until all serial tests have finished + delay=15 + ( + set +x + while [ $(count-running-tests) -ge 1 ]; do + sleep $delay + done + ) + + E2E_OUTPUT=$artifacts test-run-sonobuoy parallel + echo "Did test-run-sonobuoy parallel $?" + test-run-sonobuoy etcd parallel + echo "Did test-run-sonobuoy-etcd parallel $?" + test-run-sonobuoy mysql parallel + echo "Did test-run-sonobuoy-mysql parallel $?" + test-run-sonobuoy postgres parallel + echo "Did test-run-sonobuoy-postgres parallel $?" +fi -# Wait until all serial tests have finished +# Wait until all tests have finished delay=15 ( set +x @@ -70,16 +91,5 @@ while [ $(count-running-tests) -ge 1 ]; do sleep $delay done ) -E2E_OUTPUT=$artifacts test-run-sonobuoy parallel -echo "Did test-run-sonobuoy parallel $?" -test-run-sonobuoy etcd parallel -echo "Did test-run-sonobuoy-etcd parallel $?" -test-run-sonobuoy mysql parallel -echo "Did test-run-sonobuoy-mysql parallel $?" -test-run-sonobuoy postgres parallel -echo "Did test-run-sonobuoy-postgres parallel $?" - - - exit 0 diff --git a/tests/e2e/amd64_resource_files/cluster-cidr-ipv6.yaml b/tests/e2e/amd64_resource_files/cluster-cidr-ipv6.yaml new file mode 100644 index 000000000000..b48c41b4c0bb --- /dev/null +++ b/tests/e2e/amd64_resource_files/cluster-cidr-ipv6.yaml @@ -0,0 +1,14 @@ +apiVersion: networking.k8s.io/v1alpha1 +kind: ClusterCIDR +metadata: + name: new-cidr +spec: + nodeSelector: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - "agent-0" + perNodeHostBits: 64 + ipv6: 2001:cafe:248::/56 diff --git a/tests/e2e/amd64_resource_files/cluster-cidr.yaml b/tests/e2e/amd64_resource_files/cluster-cidr.yaml new file mode 100644 index 000000000000..a5a559e5faa7 --- /dev/null +++ b/tests/e2e/amd64_resource_files/cluster-cidr.yaml @@ -0,0 +1,14 @@ +apiVersion: networking.k8s.io/v1alpha1 +kind: ClusterCIDR +metadata: + name: new-cidr +spec: + nodeSelector: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - "agent-0" + perNodeHostBits: 8 + ipv4: 10.248.0.0/16 diff --git a/tests/e2e/multiclustercidr/Vagrantfile b/tests/e2e/multiclustercidr/Vagrantfile new file mode 100644 index 000000000000..1655c38ec5ef --- /dev/null +++ b/tests/e2e/multiclustercidr/Vagrantfile @@ -0,0 +1,155 @@ +ENV['VAGRANT_NO_PARALLEL'] = 'no' +NODE_ROLES = (ENV['E2E_NODE_ROLES'] || + ["server-0", "server-1", "server-2", "agent-0"]) +NODE_BOXES = (ENV['E2E_NODE_BOXES'] || + ['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004']) +GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master") +RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "") +NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i +NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 2048).to_i +IP_FAMILY = (ENV['E2E_IP_FAMILY'] || "ipv4") +NETWORK4_PREFIX = "10.10.10" +NETWORK6_PREFIX = "fd11:decf:c0ff:ee" +install_type = "" + +def provision(vm, roles, role_num, node_num) + vm.box = NODE_BOXES[node_num] + vm.hostname = "#{roles[0]}-#{role_num}" + node_ip4 = "#{NETWORK4_PREFIX}.#{100+node_num}" + node_ip6 = "#{NETWORK6_PREFIX}::#{10+node_num}" + node_ip6_gw = "#{NETWORK6_PREFIX}::1" + # Only works with libvirt, which allows IPv4 + IPv6 on a single network/interface + vm.network "private_network", + :ip => node_ip4, + :netmask => "255.255.255.0", + :libvirt__dhcp_enabled => false, + :libvirt__forward_mode => "none", + :libvirt__guest_ipv6 => "yes", + :libvirt__ipv6_address => "#{NETWORK6_PREFIX}::1", + :libvirt__ipv6_prefix => "64" + + scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts" + vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" + load vagrant_defaults + + defaultOSConfigure(vm) + + vm.provision "IPv6 Setup", type: "shell", path: scripts_location +"/ipv6.sh", args: [node_ip4, node_ip6, node_ip6_gw, vm.box.to_s] + install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) + + vm.provision "Ping Check", type: "shell", inline: "ping -c 2 k3s.io" + + if roles.include?("server") && role_num == 0 + vm.provision :k3s, run: 'once' do |k3s| + k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 + k3s.args = "server " + if IP_FAMILY.include?("ipv4") + k3s.config = <<~YAML + node-external-ip: #{node_ip4} + node-ip: #{node_ip4} + cluster-init: true + token: vagrant + cluster-cidr: 10.42.0.0/16 + service-cidr: 10.43.0.0/16 + bind-address: #{NETWORK4_PREFIX}.100 + multi-cluster-cidr: true + flannel-iface: eth1 + YAML + else + k3s.config = <<~YAML + node-external-ip: #{node_ip6} + node-ip: #{node_ip6} + cluster-init: true + token: vagrant + cluster-cidr: 2001:cafe:42::/56 + service-cidr: 2001:cafe:43::/112 + bind-address: #{NETWORK6_PREFIX}::10 + multi-cluster-cidr: true + flannel-ipv6-masq: true + flannel-iface: eth1 + YAML + end + k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type] + end + elsif roles.include?("server") && role_num != 0 + vm.provision :k3s, run: 'once' do |k3s| + k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 + k3s.args = "server " + if IP_FAMILY.include?("ipv4") + k3s.config = <<~YAML + node-external-ip: #{node_ip4} + node-ip: #{node_ip4} + server: https://#{NETWORK4_PREFIX}.100:6443 + token: vagrant + cluster-cidr: 10.42.0.0/16 + service-cidr: 10.43.0.0/16 + multi-cluster-cidr: true + flannel-iface: eth1 + YAML + else + k3s.config = <<~YAML + node-external-ip: #{node_ip6} + node-ip: #{node_ip6} + server: https://[#{NETWORK6_PREFIX}::10]:6443 + token: vagrant + cluster-cidr: 2001:cafe:42::/56 + service-cidr: 2001:cafe:43::/112 + multi-cluster-cidr: true + flannel-ipv6-masq: true + flannel-iface: eth1 + YAML + end + k3s.env = ["K3S_KUBECONFIG_MODE=0644", install_type] + end + end + if roles.include?("agent") + vm.provision :k3s, run: 'once' do |k3s| + k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 + k3s.args = "agent " + if IP_FAMILY.include?("ipv4") + k3s.config = <<~YAML + node-external-ip: #{node_ip4} + node-ip: #{node_ip4} + server: https://#{NETWORK4_PREFIX}.100:6443 + token: vagrant + flannel-iface: eth1 + YAML + else + k3s.config = <<~YAML + node-external-ip: #{node_ip6} + node-ip: #{node_ip6} + server: https://[#{NETWORK6_PREFIX}::10]:6443 + token: vagrant + flannel-iface: eth1 + YAML + end + k3s.env = ["K3S_KUBECONFIG_MODE=0644", "INSTALL_K3S_SKIP_START=true", install_type] + end + end +end + +Vagrant.configure("2") do |config| + config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload", "vagrant-libvirt"] + config.vm.provider "libvirt" do |v| + v.cpus = NODE_CPUS + v.memory = NODE_MEMORY + end + + if NODE_ROLES.kind_of?(String) + NODE_ROLES = NODE_ROLES.split(" ", -1) + end + if NODE_BOXES.kind_of?(String) + NODE_BOXES = NODE_BOXES.split(" ", -1) + end + + # Must iterate on the index, vagrant does not understand iterating + # over the node roles themselves + NODE_ROLES.length.times do |i| + name = NODE_ROLES[i] + config.vm.define name do |node| + roles = name.split("-", -1) + role_num = roles.pop.to_i + provision(node.vm, roles, role_num, i) + end + end +end diff --git a/tests/e2e/multiclustercidr/multiclustercidr_test.go b/tests/e2e/multiclustercidr/multiclustercidr_test.go new file mode 100644 index 000000000000..09b95fabc70b --- /dev/null +++ b/tests/e2e/multiclustercidr/multiclustercidr_test.go @@ -0,0 +1,281 @@ +package multiclustercidr + +import ( + "flag" + "fmt" + "os" + "strings" + "testing" + + "github.com/k3s-io/k3s/tests/e2e" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64 +var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system") +var serverCount = flag.Int("serverCount", 3, "number of server nodes") +var agentCount = flag.Int("agentCount", 1, "number of agent nodes") +var hardened = flag.Bool("hardened", false, "true or false") +var ci = flag.Bool("ci", false, "running on CI") + +func Test_E2EMultiClusterCIDR(t *testing.T) { + flag.Parse() + RegisterFailHandler(Fail) + suiteConfig, reporterConfig := GinkgoConfiguration() + RunSpecs(t, "MultiClusterCIDR Test Suite", suiteConfig, reporterConfig) +} + +var ( + kubeConfigFile string + serverNodeNames []string + agentNodeNames []string +) + +var _ = ReportAfterEach(e2e.GenReport) + +var _ = Describe("Verify MultiClusterCIDR Configuration", Ordered, func() { + + It("Starts up IPv4 setup with no issues", func() { + var err error + os.Setenv("E2E_IP_FAMILY", "ipv4") + defer os.Unsetenv("E2E_IP_FAMILY") + serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) + Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) + fmt.Println("CLUSTER CONFIG") + fmt.Println("OS:", *nodeOS) + fmt.Println("Server Nodes:", serverNodeNames) + fmt.Println("Agent Nodes:", agentNodeNames) + kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks Node Status", func() { + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) + } + }, "420s", "5s").Should(Succeed()) + _, err := e2e.ParseNodes(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks Pod Status", func() { + Eventually(func(g Gomega) { + pods, err := e2e.ParsePods(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if strings.Contains(pod.Name, "helm-install") { + g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) + } else { + g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + } + } + }, "420s", "5s").Should(Succeed()) + _, err := e2e.ParsePods(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Verifies that each node has IPv4", func() { + nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + for _, node := range nodeIPs { + Expect(node.IPv4).Should(ContainSubstring("10.10.10")) + } + }) + + It("Verifies that each pod has IPv4", func() { + podIPs, err := e2e.GetPodIPs(kubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + for _, pod := range podIPs { + Expect(pod.IPv4).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.42.")), pod.Name) + } + }) + + It("Add new CIDR", func() { + _, err := e2e.DeployWorkload("cluster-cidr.yaml", kubeConfigFile, *hardened) + Expect(err).NotTo(HaveOccurred()) + Eventually(func() (string, error) { + cmd := "kubectl get clustercidr new-cidr --kubeconfig=" + kubeConfigFile + return e2e.RunCommand(cmd) + }, "120s", "5s").Should(ContainSubstring("10.248.0.0")) + + }) + + It("Restart agent-0", func() { + agents := []string{"agent-0"} + err := e2e.RestartClusterAgent(agents) + Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) + }) + + It("Checks Node Status", func() { + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) + } + }, "420s", "5s").Should(Succeed()) + _, err := e2e.ParseNodes(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks Pod Status", func() { + Eventually(func(g Gomega) { + pods, err := e2e.ParsePods(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if strings.Contains(pod.Name, "helm-install") { + g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) + } else { + g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + } + } + }, "420s", "5s").Should(Succeed()) + _, err := e2e.ParsePods(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Verifies that each pod of agent-0 has IPv4 from the new CIDR", func() { + pods, err := e2e.ParsePods(kubeConfigFile, false) + Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if pod.Node == "agent-0" { + Expect(pod.NodeIP).Should(Or(ContainSubstring("10.10.10"), ContainSubstring("10.248.")), pod.Name) + } + } + }) + + It("Destroy Cluster", func() { + Expect(e2e.DestroyCluster()).To(Succeed()) + Expect(os.Remove(kubeConfigFile)).To(Succeed()) + }) + + It("Starts up IPv6 setup with no issues", func() { + var err error + os.Setenv("E2E_IP_FAMILY", "ipv6") + defer os.Unsetenv("E2E_IP_FAMILY") + serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) + Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) + fmt.Println("CLUSTER CONFIG") + fmt.Println("OS:", *nodeOS) + fmt.Println("Server Nodes:", serverNodeNames) + fmt.Println("Agent Nodes:", agentNodeNames) + kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks Node Status", func() { + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) + } + }, "420s", "5s").Should(Succeed()) + _, err := e2e.ParseNodes(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks Pod Status", func() { + Eventually(func(g Gomega) { + pods, err := e2e.ParsePods(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if strings.Contains(pod.Name, "helm-install") { + g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) + } else { + g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + } + } + }, "420s", "5s").Should(Succeed()) + _, err := e2e.ParsePods(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Verifies that each node has IPv6", func() { + nodeIPs, err := e2e.GetNodeIPs(kubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + for _, node := range nodeIPs { + Expect(node.IPv6).Should(ContainSubstring("fd11:decf:c0ff")) + } + }) + + It("Verifies that each pod has IPv6", func() { + podIPs, err := e2e.GetPodIPs(kubeConfigFile) + Expect(err).NotTo(HaveOccurred()) + for _, pod := range podIPs { + Expect(pod.IPv6).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:42")), pod.Name) + } + }) + + It("Add new CIDR", func() { + _, err := e2e.DeployWorkload("cluster-cidr-ipv6.yaml", kubeConfigFile, *hardened) + Expect(err).NotTo(HaveOccurred()) + Eventually(func() (string, error) { + cmd := "kubectl get clustercidr new-cidr --kubeconfig=" + kubeConfigFile + return e2e.RunCommand(cmd) + }, "120s", "5s").Should(ContainSubstring("2001:cafe:248")) + + }) + + It("Delete and restart agent-0", func() { + agents := []string{"agent-0"} + err := e2e.RestartClusterAgent(agents) + Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) + }) + + It("Checks Node Status", func() { + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) + } + }, "420s", "5s").Should(Succeed()) + _, err := e2e.ParseNodes(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Checks Pod Status", func() { + Eventually(func(g Gomega) { + pods, err := e2e.ParsePods(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if strings.Contains(pod.Name, "helm-install") { + g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) + } else { + g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + } + } + }, "420s", "5s").Should(Succeed()) + _, err := e2e.ParsePods(kubeConfigFile, true) + Expect(err).NotTo(HaveOccurred()) + }) + + It("Verifies that each pod of agent-0 has IPv6 from the new CIDR", func() { + pods, err := e2e.ParsePods(kubeConfigFile, false) + Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if pod.Node == "agent-0" { + Expect(pod.NodeIP).Should(Or(ContainSubstring("fd11:decf:c0ff"), ContainSubstring("2001:cafe:248")), pod.Name) + } + } + }) +}) + +var failed bool +var _ = AfterEach(func() { + failed = failed || CurrentSpecReport().Failed() +}) + +var _ = AfterSuite(func() { + if failed && !*ci { + fmt.Println("FAILED!") + } else { + Expect(e2e.DestroyCluster()).To(Succeed()) + Expect(os.Remove(kubeConfigFile)).To(Succeed()) + } +}) diff --git a/tests/e2e/scripts/latest_commit.sh b/tests/e2e/scripts/latest_commit.sh index d60ff3aab23f..548d96557a19 100755 --- a/tests/e2e/scripts/latest_commit.sh +++ b/tests/e2e/scripts/latest_commit.sh @@ -22,5 +22,5 @@ while [ $? -ne 0 ]; do fi sed -i 1d "$2" sleep 1 - curl -s --fail https://k3s-ci-builds.s3.amazonaws.com/k3s-ci-builds/k3s-$(head -n 1 $2).sha256sum -done \ No newline at end of file + curl -s --fail https://k3s-ci-builds.s3.amazonaws.com/k3s-$(head -n 1 $2).sha256sum +done diff --git a/tests/e2e/scripts/run_tests.sh b/tests/e2e/scripts/run_tests.sh index cbf73af92f37..814b27092daf 100755 --- a/tests/e2e/scripts/run_tests.sh +++ b/tests/e2e/scripts/run_tests.sh @@ -29,9 +29,6 @@ E2E_REGISTRY=true E2E_HARDENED="$hardened" /usr/local/go/bin/go test -v validate echo 'RUNNING SECRETS ENCRYPTION TEST' /usr/local/go/bin/go test -v secretsencryption/secretsencryption_test.go -nodeOS="$nodeOS" -serverCount=$((servercount)) -timeout=1h -json -ci | tee -a k3s_"$OS".log -echo 'RUN CLUSTER RESET TEST' -/usr/local/go/bin/go test -v clusterreset/clusterreset_test.go -nodeOS="$nodeOS" -serverCount=3 -agentCount=1 -timeout=30m -json -ci | tee -a createreport/k3s_"$OS".log - echo 'RUNNING SPLIT SERVER VALIDATION TEST' E2E_HARDENED="$hardened" /usr/local/go/bin/go test -v splitserver/splitserver_test.go -nodeOS="$nodeOS" -timeout=30m -json -ci | tee -a k3s_"$OS".log diff --git a/tests/e2e/secretsencryption/secretsencryption_test.go b/tests/e2e/secretsencryption/secretsencryption_test.go index 6d24de942dbb..1b05aa957437 100644 --- a/tests/e2e/secretsencryption/secretsencryption_test.go +++ b/tests/e2e/secretsencryption/secretsencryption_test.go @@ -16,6 +16,8 @@ import ( var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system") var serverCount = flag.Int("serverCount", 3, "number of server nodes") var hardened = flag.Bool("hardened", false, "true or false") +var ci = flag.Bool("ci", false, "running on CI") +var local = flag.Bool("local", false, "deploy a locally built K3s binary") // Environment Variables Info: // E2E_RELEASE_VERSION=v1.23.1+k3s2 or nil for latest commit from master @@ -38,7 +40,11 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { Context("Secrets Keys are rotated:", func() { It("Starts up with no issues", func() { var err error - serverNodeNames, _, err = e2e.CreateCluster(*nodeOS, *serverCount, 0) + if *local { + serverNodeNames, _, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, 0) + } else { + serverNodeNames, _, err = e2e.CreateCluster(*nodeOS, *serverCount, 0) + } Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) fmt.Println("CLUSTER CONFIG") fmt.Println("OS:", *nodeOS) @@ -107,7 +113,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { }) It("Restarts K3s servers", func() { - Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed()) + Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed(), e2e.GetVagrantLog(nil)) }) It("Checks node and pod status", func() { @@ -166,7 +172,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { }) It("Restarts K3s servers", func() { - Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed()) + Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed(), e2e.GetVagrantLog(nil)) }) It("Verifies encryption rotate stage", func() { @@ -201,7 +207,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { }) It("Restarts K3s Servers", func() { - Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed()) + Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed(), e2e.GetVagrantLog(nil)) }) It("Verifies Encryption Reencrypt Stage", func() { @@ -300,7 +306,7 @@ var _ = AfterEach(func() { }) var _ = AfterSuite(func() { - if failed { + if failed && !*ci { fmt.Println("FAILED!") } else { Expect(e2e.DestroyCluster()).To(Succeed()) diff --git a/tests/e2e/snapshotrestore/snapshotrestore_test.go b/tests/e2e/snapshotrestore/snapshotrestore_test.go index 4565bb1163e8..d01a9e44c385 100644 --- a/tests/e2e/snapshotrestore/snapshotrestore_test.go +++ b/tests/e2e/snapshotrestore/snapshotrestore_test.go @@ -6,6 +6,7 @@ import ( "os" "strings" "testing" + "time" "github.com/k3s-io/k3s/tests/e2e" . "github.com/onsi/ginkgo/v2" @@ -44,7 +45,7 @@ var ( var _ = ReportAfterEach(e2e.GenReport) var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() { - Context("Cluster :", func() { + Context("Cluster creates snapshots and workloads:", func() { It("Starts up with no issues", func() { var err error if *local { @@ -122,6 +123,8 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() { }, "240s", "5s").Should(Succeed()) }) + }) + Context("Cluster is reset normally", func() { It("Resets the cluster", func() { for _, nodeName := range serverNodeNames { cmd := "sudo systemctl stop k3s" @@ -168,6 +171,7 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() { for _, nodeName := range serverNodeNames[1:] { cmd := "sudo systemctl start k3s" Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) + time.Sleep(20 * time.Second) //Stagger the restarts for etcd leaners } }) @@ -176,7 +180,8 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() { nodes, err := e2e.ParseNodes(kubeConfigFile, false) g.Expect(err).NotTo(HaveOccurred()) for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready")) + nodeJournal, _ := e2e.GetJournalLogs(node.Name) + g.Expect(node.Status).Should(Equal("Ready"), nodeJournal) } }, "420s", "5s").Should(Succeed()) @@ -203,6 +208,8 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() { Expect(res).Should(ContainSubstring("test-nodeport")) }) + }) + Context("Cluster restores from snapshot", func() { It("Restores the snapshot", func() { //Stop k3s on all nodes for _, nodeName := range serverNodeNames { diff --git a/tests/e2e/startup/startup_test.go b/tests/e2e/startup/startup_test.go index c87fa7ed4ac2..acba181d60e7 100644 --- a/tests/e2e/startup/startup_test.go +++ b/tests/e2e/startup/startup_test.go @@ -15,6 +15,7 @@ import ( // Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64 var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system") var ci = flag.Bool("ci", false, "running on CI") +var local = flag.Bool("local", false, "deploy a locally built K3s binary") // Environment Variables Info: // E2E_RELEASE_VERSION=v1.23.1+k3s2 or nil for latest commit from master @@ -80,7 +81,11 @@ var _ = Describe("Various Startup Configurations", Ordered, func() { Context("Verify CRI-Dockerd :", func() { It("Stands up the nodes", func() { var err error - serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, 1, 1) + if *local { + serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, 1, 1) + } else { + serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, 1, 1) + } Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) }) It("Starts K3s with no issues", func() { diff --git a/tests/e2e/testutils.go b/tests/e2e/testutils.go index 061c428c6763..69e7865a6549 100644 --- a/tests/e2e/testutils.go +++ b/tests/e2e/testutils.go @@ -25,6 +25,10 @@ type Node struct { ExternalIP string } +func (n Node) String() string { + return fmt.Sprintf("Node (name: %s, status: %s, roles: %s)", n.Name, n.Status, n.Roles) +} + type Pod struct { NameSpace string Name string @@ -136,6 +140,19 @@ func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []stri return serverNodeNames, agentNodeNames, nil } +func scpK3sBinary(nodeNames []string) error { + for _, node := range nodeNames { + cmd := fmt.Sprintf(`vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node) + if _, err := RunCommand(cmd); err != nil { + return fmt.Errorf("failed to scp k3s binary to %s: %v", node, err) + } + if _, err := RunCmdOnNode("sudo mv /tmp/k3s /usr/local/bin/", node); err != nil { + return err + } + } + return nil +} + // CreateLocalCluster creates a cluster using the locally built k3s binary. The vagrant-scp plugin must be installed for // this function to work. The binary is deployed as an airgapped install of k3s on the VMs. // This is intended only for local testing purposes when writing a new E2E test. @@ -173,14 +190,8 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [ if err := errg.Wait(); err != nil { return nil, nil, err } - for _, node := range append(serverNodeNames, agentNodeNames...) { - cmd = fmt.Sprintf(`vagrant scp ../../../dist/artifacts/k3s %s:/tmp/`, node) - if _, err := RunCommand(cmd); err != nil { - return nil, nil, fmt.Errorf("failed to scp k3s binary to %s: %v", node, err) - } - if _, err := RunCmdOnNode("sudo mv /tmp/k3s /usr/local/bin/", node); err != nil { - return nil, nil, err - } + if err := scpK3sBinary(append(serverNodeNames, agentNodeNames...)); err != nil { + return nil, nil, err } // Install K3s on all nodes in parallel @@ -203,6 +214,15 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [ return serverNodeNames, agentNodeNames, nil } +// Deletes the content of a manifest file previously applied +func DeleteWorkload(workload, kubeconfig string) error { + cmd := "kubectl delete -f " + workload + " --kubeconfig=" + kubeconfig + if _, err := RunCommand(cmd); err != nil { + return err + } + return nil +} + func DeployWorkload(workload, kubeconfig string, hardened bool) (string, error) { resourceDir := "../amd64_resource_files" if hardened { @@ -302,13 +322,18 @@ func GenReport(specReport ginkgo.SpecReport) { fmt.Printf("%s", status) } +func GetJournalLogs(node string) (string, error) { + cmd := "journalctl -u k3s* --no-pager" + return RunCmdOnNode(cmd, node) +} + // GetVagrantLog returns the logs of on vagrant commands that initialize the nodes and provision K3s on each node. // It also attempts to fetch the systemctl logs of K3s on nodes where the k3s.service failed. func GetVagrantLog(cErr error) string { var nodeErr *NodeError nodeJournal := "" if errors.As(cErr, &nodeErr) { - nodeJournal, _ = RunCmdOnNode("sudo journalctl -u k3s* --no-pager", nodeErr.Node) + nodeJournal, _ = GetJournalLogs(nodeErr.Node) nodeJournal = "\nNode Journal Logs:\n" + nodeJournal } @@ -331,7 +356,7 @@ func ParseNodes(kubeConfig string, print bool) ([]Node, error) { res, err := RunCommand(cmd) if err != nil { - return nil, err + return nil, fmt.Errorf("unable to get nodes: %s: %v", res, err) } nodeList = strings.TrimSpace(res) split := strings.Split(nodeList, "\n") @@ -391,7 +416,18 @@ func ParsePods(kubeConfig string, print bool) ([]Pod, error) { // RestartCluster restarts the k3s service on each node given func RestartCluster(nodeNames []string) error { for _, nodeName := range nodeNames { - cmd := "sudo systemctl restart k3s" + cmd := "sudo systemctl restart k3s*" + if _, err := RunCmdOnNode(cmd, nodeName); err != nil { + return err + } + } + return nil +} + +// RestartCluster restarts the k3s service on each node given +func RestartClusterAgent(nodeNames []string) error { + for _, nodeName := range nodeNames { + cmd := "sudo systemctl restart k3s-agent" if _, err := RunCmdOnNode(cmd, nodeName); err != nil { return err } @@ -404,7 +440,7 @@ func RunCmdOnNode(cmd string, nodename string) (string, error) { runcmd := "vagrant ssh -c \"" + cmd + "\" " + nodename out, err := RunCommand(runcmd) if err != nil { - return out, fmt.Errorf("failed to run command %s on node %s: %v", cmd, nodename, err) + return out, fmt.Errorf("failed to run command: %s on node %s: %s, %v", cmd, nodename, out, err) } return out, nil } @@ -416,19 +452,18 @@ func RunCommand(cmd string) (string, error) { return string(out), err } -func UpgradeCluster(serverNodeNames []string, agentNodeNames []string) error { - for _, nodeName := range serverNodeNames { - cmd := "E2E_RELEASE_CHANNEL=commit vagrant provision " + nodeName - fmt.Println(cmd) - if out, err := RunCommand(cmd); err != nil { - fmt.Println("Error Upgrading Cluster", out) +func UpgradeCluster(nodeNames []string, local bool) error { + upgradeVersion := "E2E_RELEASE_CHANNEL=commit" + if local { + if err := scpK3sBinary(nodeNames); err != nil { return err } + upgradeVersion = "E2E_RELEASE_VERSION=skip" } - for _, nodeName := range agentNodeNames { - cmd := "E2E_RELEASE_CHANNEL=commit vagrant provision " + nodeName - if _, err := RunCommand(cmd); err != nil { - fmt.Println("Error Upgrading Cluster", err) + for _, nodeName := range nodeNames { + cmd := upgradeVersion + " vagrant provision " + nodeName + if out, err := RunCommand(cmd); err != nil { + fmt.Println("Error Upgrading Cluster", out) return err } } @@ -462,7 +497,11 @@ func GetObjIPs(cmd string) ([]ObjIP, error) { if len(fields) > 2 { objIPs = append(objIPs, ObjIP{Name: fields[0], IPv4: fields[1], IPv6: fields[2]}) } else if len(fields) > 1 { - objIPs = append(objIPs, ObjIP{Name: fields[0], IPv4: fields[1]}) + if strings.Contains(fields[1], ".") { + objIPs = append(objIPs, ObjIP{Name: fields[0], IPv4: fields[1]}) + } else { + objIPs = append(objIPs, ObjIP{Name: fields[0], IPv6: fields[1]}) + } } else { objIPs = append(objIPs, ObjIP{Name: fields[0]}) } diff --git a/tests/e2e/upgradecluster/Vagrantfile b/tests/e2e/upgradecluster/Vagrantfile index dee336bed160..1bc46adbe491 100644 --- a/tests/e2e/upgradecluster/Vagrantfile +++ b/tests/e2e/upgradecluster/Vagrantfile @@ -25,16 +25,16 @@ def provision(vm, role, role_num, node_num) load vagrant_defaults defaultOSConfigure(vm) - - if !RELEASE_VERSION.empty? + + if RELEASE_VERSION == "skip" + install_type = "INSTALL_K3S_SKIP_DOWNLOAD=true" + elsif !RELEASE_VERSION.empty? install_type = "INSTALL_K3S_VERSION=#{RELEASE_VERSION}" elsif RELEASE_CHANNEL == "commit" vm.provision "shell", path: "../scripts/latest_commit.sh", args: ["master", "/tmp/k3s_commits"] install_type = "INSTALL_K3S_COMMIT=$(head\ -n\ 1\ /tmp/k3s_commits)" else - vm.provision "latest version", type: "shell", - inline: "curl -w '%{url_effective}' -L -s -S https://update.k3s.io/v1-release/channels/#{RELEASE_CHANNEL} -o /dev/null | sed -e 's|.*/||' &> /tmp/k3s_version" - install_type = "INSTALL_K3S_VERSION=$(cat\ /tmp/k3s_version)" + install_type = "INSTALL_K3S_CHANNEL=#{RELEASE_CHANNEL}" end diff --git a/tests/e2e/upgradecluster/upgradecluster_test.go b/tests/e2e/upgradecluster/upgradecluster_test.go index 60e9117b98e1..5001e024bdaf 100644 --- a/tests/e2e/upgradecluster/upgradecluster_test.go +++ b/tests/e2e/upgradecluster/upgradecluster_test.go @@ -20,10 +20,11 @@ var serverCount = flag.Int("serverCount", 3, "number of server nodes") var agentCount = flag.Int("agentCount", 2, "number of agent nodes") var hardened = flag.Bool("hardened", false, "true or false") var ci = flag.Bool("ci", false, "running on CI") +var local = flag.Bool("local", false, "Controls which version k3s upgrades too, local binary or latest commit on master") // Environment Variables Info: // E2E_REGISTRY: true/false (default: false) -// Controls which K3s version is installed first, upgrade is always to latest commit +// Controls which K3s version is installed first // E2E_RELEASE_VERSION=v1.23.3+k3s1 // OR // E2E_RELEASE_CHANNEL=(commit|latest|stable), commit pulls latest commit from master @@ -249,9 +250,8 @@ var _ = Describe("Verify Upgrade", Ordered, func() { It("Upgrades with no issues", func() { var err error - err = e2e.UpgradeCluster(serverNodeNames, agentNodeNames) - fmt.Println(err) - Expect(err).NotTo(HaveOccurred()) + Expect(e2e.UpgradeCluster(append(serverNodeNames, agentNodeNames...), *local)).To(Succeed()) + Expect(e2e.RestartCluster(append(serverNodeNames, agentNodeNames...))).To(Succeed()) fmt.Println("CLUSTER UPGRADED") kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) Expect(err).NotTo(HaveOccurred()) diff --git a/tests/e2e/validatecluster/validatecluster_test.go b/tests/e2e/validatecluster/validatecluster_test.go index d2dfbddf9c77..dab6e959c439 100644 --- a/tests/e2e/validatecluster/validatecluster_test.go +++ b/tests/e2e/validatecluster/validatecluster_test.go @@ -216,6 +216,49 @@ var _ = Describe("Verify Create", Ordered, func() { }, "420s", "2s").Should(Succeed()) }) + It("Verifies Restart", func() { + _, err := e2e.DeployWorkload("daemonset.yaml", kubeConfigFile, *hardened) + Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed") + defer e2e.DeleteWorkload("daemonset.yaml", kubeConfigFile) + nodes, _ := e2e.ParseNodes(kubeConfigFile, false) + + Eventually(func(g Gomega) { + pods, _ := e2e.ParsePods(kubeConfigFile, false) + count := e2e.CountOfStringInSlice("test-daemonset", pods) + g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count") + podsRunning := 0 + for _, pod := range pods { + if strings.Contains(pod.Name, "test-daemonset") && pod.Status == "Running" && pod.Ready == "1/1" { + podsRunning++ + } + } + g.Expect(len(nodes)).Should((Equal(podsRunning)), "Daemonset running pods count does not match node count") + }, "620s", "5s").Should(Succeed()) + errRestart := e2e.RestartCluster(serverNodeNames) + Expect(errRestart).NotTo(HaveOccurred(), "Restart Nodes not happened correctly") + if len(agentNodeNames) > 0 { + errRestartAgent := e2e.RestartCluster(agentNodeNames) + Expect(errRestartAgent).NotTo(HaveOccurred(), "Restart Agent not happened correctly") + } + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) + } + pods, _ := e2e.ParsePods(kubeConfigFile, false) + count := e2e.CountOfStringInSlice("test-daemonset", pods) + g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count") + podsRunningAr := 0 + for _, pod := range pods { + if strings.Contains(pod.Name, "test-daemonset") && pod.Status == "Running" && pod.Ready == "1/1" { + podsRunningAr++ + } + } + g.Expect(len(nodes)).Should((Equal(podsRunningAr)), "Daemonset pods are not running after the restart") + }, "620s", "5s").Should(Succeed()) + }) + It("Verifies Local Path Provisioner storage ", func() { res, err := e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened) Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed: "+res)