diff --git a/.github/workflows/install.yaml b/.github/workflows/install.yaml index 570260030ae0..61f4ba103f45 100644 --- a/.github/workflows/install.yaml +++ b/.github/workflows/install.yaml @@ -6,29 +6,18 @@ on: - "channel.yaml" - "install.sh" - "tests/install/**" + - ".github/workflows/install.yaml" + pull_request: branches: [main, master] paths: - "install.sh" - "tests/install/**" + - ".github/workflows/install.yaml" workflow_dispatch: {} jobs: build: - name: Build - runs-on: ubuntu-20.04 - timeout-minutes: 20 - steps: - - name: "Checkout" - uses: actions/checkout@v3 - with: - fetch-depth: 1 - - name: "Make" - run: DOCKER_BUILDKIT=1 SKIP_VALIDATE=1 make - - name: "Upload k3s binary" - uses: actions/upload-artifact@v3 - with: - name: k3s - path: ./dist/artifacts/k3s + uses: ./.github/workflows/build-k3s.yaml test: name: "Smoke Test" needs: build @@ -37,7 +26,7 @@ jobs: strategy: fail-fast: false matrix: - vm: [centos-7, rocky-8, fedora, opensuse-leap, opensuse-microos, ubuntu-focal] + vm: [centos-7, rocky-8, fedora, opensuse-leap, ubuntu-focal] max-parallel: 2 defaults: run: diff --git a/.github/workflows/nightly-install.yaml b/.github/workflows/nightly-install.yaml index a1969f987a85..6266b4913451 100644 --- a/.github/workflows/nightly-install.yaml +++ b/.github/workflows/nightly-install.yaml @@ -12,7 +12,7 @@ jobs: fail-fast: false matrix: channel: [stable] - vm: [centos-7, rocky-8, fedora, opensuse-leap, opensuse-microos, ubuntu-focal] + vm: [centos-7, rocky-8, fedora, opensuse-leap, ubuntu-focal] include: - {channel: latest, vm: rocky-8} - {channel: latest, vm: ubuntu-focal} diff --git a/.github/workflows/snapshotter.yaml b/.github/workflows/snapshotter.yaml index 734cfd679237..9470fff7aff1 100644 --- a/.github/workflows/snapshotter.yaml +++ b/.github/workflows/snapshotter.yaml @@ -20,22 +20,11 @@ on: - "!.github/workflows/snapshotter.yaml" workflow_dispatch: {} jobs: - prep: - name: "Prepare" - runs-on: ubuntu-20.04 - timeout-minutes: 40 - steps: - - name: "Checkout" - uses: actions/checkout@v3 - with: { fetch-depth: 1 } - - name: "Build" - run: DOCKER_BUILDKIT=1 SKIP_VALIDATE=1 make - - name: "Upload Binary" - uses: actions/upload-artifact@v3 - with: { name: k3s, path: dist/artifacts/k3s } + build: + uses: ./.github/workflows/build-k3s.yaml test: name: "Smoke Test" - needs: prep + needs: build # nested virtualization is only available on macOS hosts runs-on: macos-12 timeout-minutes: 40 diff --git a/tests/e2e/clusterreset/Vagrantfile b/tests/e2e/clusterreset/Vagrantfile deleted file mode 100644 index 1bdf74e85dcb..000000000000 --- a/tests/e2e/clusterreset/Vagrantfile +++ /dev/null @@ -1,136 +0,0 @@ -ENV['VAGRANT_NO_PARALLEL'] = 'no' -NODE_ROLES = (ENV['E2E_NODE_ROLES'] || - ["server-0", "server-1", "server-2", "agent-0", "agent-1"]) -NODE_BOXES = (ENV['E2E_NODE_BOXES'] || - ['generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004', 'generic/ubuntu2004']) -GITHUB_BRANCH = (ENV['E2E_GITHUB_BRANCH'] || "master") -RELEASE_VERSION = (ENV['E2E_RELEASE_VERSION'] || "") -EXTERNAL_DB = (ENV['E2E_EXTERNAL_DB'] || "etcd") -HARDENED = (ENV['E2E_HARDENED'] || "") -NODE_CPUS = (ENV['E2E_NODE_CPUS'] || 2).to_i -NODE_MEMORY = (ENV['E2E_NODE_MEMORY'] || 1024).to_i -# Virtualbox >= 6.1.28 require `/etc/vbox/network.conf` for expanded private networks -NETWORK_PREFIX = "10.10.10" -install_type = "" -hardened_arg = "" - -def provision(vm, role, role_num, node_num) - vm.box = NODE_BOXES[node_num] - vm.hostname = role - # An expanded netmask is required to allow VM<-->VM communication, virtualbox defaults to /32 - node_ip = "#{NETWORK_PREFIX}.#{100+node_num}" - vm.network "private_network", ip: node_ip, netmask: "255.255.255.0" - - scripts_location = Dir.exists?("./scripts") ? "./scripts" : "../scripts" - vagrant_defaults = File.exists?("./vagrantdefaults.rb") ? "./vagrantdefaults.rb" : "../vagrantdefaults.rb" - load vagrant_defaults - - defaultOSConfigure(vm) - install_type = getInstallType(vm, RELEASE_VERSION, GITHUB_BRANCH) - - vm.provision "shell", inline: "ping -c 2 k3s.io" - - db_type = getDBType(role, role_num, vm) - - if !HARDENED.empty? - vm.provision "Set kernel parameters", type: "shell", path: scripts_location + "/harden.sh" - hardened_arg = "protect-kernel-defaults: true\nkube-apiserver-arg: \"enable-admission-plugins=NodeRestriction,PodSecurityPolicy,ServiceAccount\"" - end - - if role.include?("server") && role_num == 0 - vm.provision 'k3s-primary-server', type: 'k3s', run: 'once' do |k3s| - k3s.args = "server " - k3s.config = <<~YAML - token: vagrant - node-external-ip: #{NETWORK_PREFIX}.100 - flannel-iface: eth1 - tls-san: #{NETWORK_PREFIX}.100.nip.io - #{db_type} - #{hardened_arg} - YAML - k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}] - k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 - end - - elsif role.include?("server") && role_num != 0 - vm.provision 'k3s-secondary-server', type: 'k3s', run: 'once' do |k3s| - k3s.args = "server" - k3s.config = <<~YAML - server: "https://#{NETWORK_PREFIX}.100:6443" - token: vagrant - node-external-ip: #{node_ip} - flannel-iface: eth1 - #{db_type} - #{hardened_arg} - YAML - k3s.env = %W[K3S_KUBECONFIG_MODE=0644 K3S_TOKEN=vagrant #{install_type}] - k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 - end - end - - if role.include?("agent") - vm.provision 'k3s-agent', type: 'k3s', run: 'once' do |k3s| - k3s.args = "agent" - k3s.config = <<~YAML - server: "https://#{NETWORK_PREFIX}.100:6443" - token: vagrant - node-external-ip: #{node_ip} - flannel-iface: eth1 - #{db_type} - #{hardened_arg} - YAML - k3s.env = %W[K3S_KUBECONFIG_MODE=0644 #{install_type}] - k3s.config_mode = '0644' # side-step https://github.com/k3s-io/k3s/issues/4321 - end - end - if vm.box.to_s.include?("microos") - vm.provision 'k3s-reload', type: 'reload', run: 'once' - if !EXTERNAL_DB.empty? - vm.provision "shell", inline: "docker start #{EXTERNAL_DB}" - end - end -end - -def getDBType(role, role_num, vm) - if ( EXTERNAL_DB == "" || EXTERNAL_DB == "etcd" ) - if role.include?("server") && role_num == 0 - return "cluster-init: true" - end - elsif ( EXTERNAL_DB == "none" ) - # Use internal sqlite, only valid for single node clusters - else - puts "Unknown EXTERNAL_DB: " + EXTERNAL_DB - abort - end - return "" -end - -Vagrant.configure("2") do |config| - config.vagrant.plugins = ["vagrant-k3s", "vagrant-reload"] - # Default provider is libvirt, virtualbox is only provided as a backup - config.vm.provider "libvirt" do |v| - v.cpus = NODE_CPUS - v.memory = NODE_MEMORY - end - config.vm.provider "virtualbox" do |v| - v.cpus = NODE_CPUS - v.memory = NODE_MEMORY - end - - if NODE_ROLES.kind_of?(String) - NODE_ROLES = NODE_ROLES.split(" ", -1) - end - if NODE_BOXES.kind_of?(String) - NODE_BOXES = NODE_BOXES.split(" ", -1) - end - - # Must iterate on the index, vagrant does not understand iterating - # over the node roles themselves - NODE_ROLES.length.times do |i| - name = NODE_ROLES[i] - role_num = name.split("-", -1).pop.to_i - config.vm.define name do |node| - provision(node.vm, name, role_num, i) - end - end -end diff --git a/tests/e2e/clusterreset/clusterreset_test.go b/tests/e2e/clusterreset/clusterreset_test.go deleted file mode 100644 index e6ed1bc99194..000000000000 --- a/tests/e2e/clusterreset/clusterreset_test.go +++ /dev/null @@ -1,187 +0,0 @@ -package clusterreset - -import ( - "flag" - "fmt" - "os" - "strings" - "testing" - - "github.com/k3s-io/k3s/tests/e2e" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -// Valid nodeOS: -// generic/ubuntu2004, generic/centos7, generic/rocky8, -// opensuse/Leap-15.3.x86_64, dweomer/microos.amd64 -var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system") -var serverCount = flag.Int("serverCount", 3, "number of server nodes") -var agentCount = flag.Int("agentCount", 1, "number of agent nodes") -var hardened = flag.Bool("hardened", false, "true or false") -var ci = flag.Bool("ci", false, "running on CI") -var local = flag.Bool("local", false, "deploy a locally built K3s binary") - -// Environment Variables Info: -// E2E_EXTERNAL_DB: mysql, postgres, etcd (default: etcd) -// E2E_RELEASE_VERSION=v1.23.1+k3s2 (default: latest commit from master) - -func Test_E2EClusterReset(t *testing.T) { - RegisterFailHandler(Fail) - flag.Parse() - suiteConfig, reporterConfig := GinkgoConfiguration() - RunSpecs(t, "ClusterReset Test Suite", suiteConfig, reporterConfig) -} - -var ( - kubeConfigFile string - serverNodeNames []string - agentNodeNames []string -) - -var _ = ReportAfterEach(e2e.GenReport) - -var _ = Describe("Verify Create", Ordered, func() { - Context("Cluster :", func() { - It("Starts up with no issues", func() { - var err error - if *local { - serverNodeNames, agentNodeNames, err = e2e.CreateLocalCluster(*nodeOS, *serverCount, *agentCount) - } else { - serverNodeNames, agentNodeNames, err = e2e.CreateCluster(*nodeOS, *serverCount, *agentCount) - } - Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) - fmt.Println("CLUSTER CONFIG") - fmt.Println("OS:", *nodeOS) - fmt.Println("Server Nodes:", serverNodeNames) - fmt.Println("Agent Nodes:", agentNodeNames) - kubeConfigFile, err = e2e.GenKubeConfigFile(serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred()) - }) - - It("Checks Node and Pod Status", func() { - fmt.Printf("\nFetching node status\n") - Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready")) - } - }, "420s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) - - fmt.Printf("\nFetching Pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } - }, "420s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) - }) - - It("Verifies ClusterReset Functionality", func() { - Eventually(func(g Gomega) { - for _, nodeName := range serverNodeNames { - if nodeName != "server-0" { - cmd := "sudo systemctl stop k3s" - _, err := e2e.RunCmdOnNode(cmd, nodeName) - Expect(err).NotTo(HaveOccurred()) - } - } - - cmd := "sudo systemctl stop k3s" - _, err := e2e.RunCmdOnNode(cmd, "server-0") - Expect(err).NotTo(HaveOccurred()) - - cmd = "sudo k3s server --cluster-reset" - res, err := e2e.RunCmdOnNode(cmd, "server-0") - Expect(err).NotTo(HaveOccurred()) - Expect(res).Should(ContainSubstring("Managed etcd cluster membership has been reset, restart without --cluster-reset flag now")) - - cmd = "sudo systemctl start k3s" - _, err = e2e.RunCmdOnNode(cmd, "server-0") - Expect(err).NotTo(HaveOccurred()) - - fmt.Printf("\nFetching node status\n") - Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes { - if strings.Contains(node.Name, "server-0") || strings.Contains(node.Name, "agent-") { - g.Expect(node.Status).Should(Equal("Ready")) - } else { - g.Expect(node.Status).Should(Equal("NotReady")) - } - } - }, "480s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) - - fmt.Printf("\nFetching Pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } - }, "420s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) - for _, nodeName := range serverNodeNames { - if nodeName != "server-0" { - cmd := "sudo rm -rf /var/lib/rancher/k3s/server/db" - _, err := e2e.RunCmdOnNode(cmd, nodeName) - Expect(err).NotTo(HaveOccurred()) - cmd = "sudo systemctl restart k3s" - _, err = e2e.RunCmdOnNode(cmd, nodeName) - Expect(err).NotTo(HaveOccurred()) - } - } - Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready")) - } - }, "420s", "5s").Should(Succeed()) - _, _ = e2e.ParseNodes(kubeConfigFile, true) - - fmt.Printf("\nFetching Pods status\n") - Eventually(func(g Gomega) { - pods, err := e2e.ParsePods(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, pod := range pods { - if strings.Contains(pod.Name, "helm-install") { - g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) - } else { - g.Expect(pod.Status).Should(Equal("Running"), pod.Name) - } - } - }, "420s", "5s").Should(Succeed()) - _, _ = e2e.ParsePods(kubeConfigFile, true) - }, "240s", "5s").Should(Succeed()) - }) - }) -}) - -var failed bool -var _ = AfterEach(func() { - failed = failed || CurrentSpecReport().Failed() -}) - -var _ = AfterSuite(func() { - if failed && !*ci { - fmt.Println("FAILED!") - } else { - Expect(e2e.DestroyCluster()).To(Succeed()) - Expect(os.Remove(kubeConfigFile)).To(Succeed()) - } -}) diff --git a/tests/e2e/docker/docker_test.go b/tests/e2e/docker/docker_test.go index 47e13e51c9ec..b823950eff63 100644 --- a/tests/e2e/docker/docker_test.go +++ b/tests/e2e/docker/docker_test.go @@ -12,7 +12,7 @@ import ( . "github.com/onsi/gomega" ) -// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64, dweomer/microos.amd64 +// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64 var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system") var serverCount = flag.Int("serverCount", 1, "number of server nodes") var agentCount = flag.Int("agentCount", 1, "number of agent nodes") diff --git a/tests/e2e/preferbundled/preferbundled_test.go b/tests/e2e/preferbundled/preferbundled_test.go index 3679399477e9..95e10b36ae44 100644 --- a/tests/e2e/preferbundled/preferbundled_test.go +++ b/tests/e2e/preferbundled/preferbundled_test.go @@ -12,7 +12,7 @@ import ( . "github.com/onsi/gomega" ) -// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64, dweomer/microos.amd64 +// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64 var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system") var serverCount = flag.Int("serverCount", 1, "number of server nodes") var agentCount = flag.Int("agentCount", 1, "number of agent nodes") diff --git a/tests/e2e/secretsencryption/secretsencryption_test.go b/tests/e2e/secretsencryption/secretsencryption_test.go index c4a13643c0a8..6ce4ac2c6740 100644 --- a/tests/e2e/secretsencryption/secretsencryption_test.go +++ b/tests/e2e/secretsencryption/secretsencryption_test.go @@ -12,7 +12,7 @@ import ( . "github.com/onsi/gomega" ) -// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64, dweomer/microos.amd64 +// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64 var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system") var serverCount = flag.Int("serverCount", 3, "number of server nodes") var hardened = flag.Bool("hardened", false, "true or false") @@ -35,7 +35,7 @@ var ( var _ = ReportAfterEach(e2e.GenReport) var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { - Context("Cluster :", func() { + Context("Secrets Keys are rotated:", func() { It("Starts up with no issues", func() { var err error serverNodeNames, _, err = e2e.CreateCluster(*nodeOS, *serverCount, 0) @@ -218,73 +218,78 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { }) }) - It("Disables encryption", func() { - cmd := "sudo k3s secrets-encrypt disable" - res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred(), res) + Context("Disabling Secrets-Encryption", func() { + It("Disables encryption", func() { + cmd := "sudo k3s secrets-encrypt disable" + res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred(), res) - cmd = "sudo k3s secrets-encrypt reencrypt -f --skip" - res, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred(), res) + cmd = "sudo k3s secrets-encrypt reencrypt -f --skip" + res, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred(), res) - cmd = "sudo k3s secrets-encrypt status" - Eventually(func() (string, error) { - return e2e.RunCmdOnNode(cmd, serverNodeNames[0]) - }, "180s", "5s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished")) + cmd = "sudo k3s secrets-encrypt status" + Eventually(func() (string, error) { + return e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + }, "180s", "5s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished")) - for i, nodeName := range serverNodeNames { - Eventually(func(g Gomega) { - res, err := e2e.RunCmdOnNode(cmd, nodeName) - g.Expect(err).NotTo(HaveOccurred(), res) - if i == 0 { - g.Expect(res).Should(ContainSubstring("Encryption Status: Disabled")) - } else { - g.Expect(res).Should(ContainSubstring("Encryption Status: Enabled")) - } - }, "420s", "2s").Should(Succeed()) - } - }) + for i, nodeName := range serverNodeNames { + Eventually(func(g Gomega) { + res, err := e2e.RunCmdOnNode(cmd, nodeName) + g.Expect(err).NotTo(HaveOccurred(), res) + if i == 0 { + g.Expect(res).Should(ContainSubstring("Encryption Status: Disabled")) + } else { + g.Expect(res).Should(ContainSubstring("Encryption Status: Enabled")) + } + }, "420s", "2s").Should(Succeed()) + } + }) - It("Restarts K3s servers", func() { - Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed()) - }) + It("Restarts K3s servers", func() { + Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed()) + }) + + It("Verifies encryption disabled on all nodes", func() { + cmd := "sudo k3s secrets-encrypt status" + for _, nodeName := range serverNodeNames { + Eventually(func(g Gomega) { + g.Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("Encryption Status: Disabled")) + }, "420s", "2s").Should(Succeed()) + } + }) - It("Verifies encryption disabled on all nodes", func() { - cmd := "sudo k3s secrets-encrypt status" - for _, nodeName := range serverNodeNames { - Eventually(func(g Gomega) { - g.Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("Encryption Status: Disabled")) - }, "420s", "2s").Should(Succeed()) - } }) - It("Enables encryption", func() { - cmd := "sudo k3s secrets-encrypt enable" - res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred(), res) + Context("Enabling Secrets-Encryption", func() { + It("Enables encryption", func() { + cmd := "sudo k3s secrets-encrypt enable" + res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred(), res) - cmd = "sudo k3s secrets-encrypt reencrypt -f --skip" - res, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0]) - Expect(err).NotTo(HaveOccurred(), res) + cmd = "sudo k3s secrets-encrypt reencrypt -f --skip" + res, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred(), res) - cmd = "sudo k3s secrets-encrypt status" - Eventually(func() (string, error) { - return e2e.RunCmdOnNode(cmd, serverNodeNames[0]) - }, "180s", "5s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished")) - }) + cmd = "sudo k3s secrets-encrypt status" + Eventually(func() (string, error) { + return e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + }, "180s", "5s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished")) + }) - It("Restarts K3s servers", func() { - Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed()) - }) + It("Restarts K3s servers", func() { + Expect(e2e.RestartCluster(serverNodeNames)).To(Succeed()) + }) - It("Verifies encryption enabled on all nodes", func() { - cmd := "sudo k3s secrets-encrypt status" - for _, nodeName := range serverNodeNames { - Eventually(func(g Gomega) { - g.Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("Encryption Status: Enabled")) - }, "420s", "2s").Should(Succeed()) - } + It("Verifies encryption enabled on all nodes", func() { + cmd := "sudo k3s secrets-encrypt status" + for _, nodeName := range serverNodeNames { + Eventually(func(g Gomega) { + g.Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("Encryption Status: Enabled")) + }, "420s", "2s").Should(Succeed()) + } + }) }) }) diff --git a/tests/e2e/snapshotrestore/snapshotrestore_test.go b/tests/e2e/snapshotrestore/snapshotrestore_test.go index a998796a2942..4565bb1163e8 100644 --- a/tests/e2e/snapshotrestore/snapshotrestore_test.go +++ b/tests/e2e/snapshotrestore/snapshotrestore_test.go @@ -14,7 +14,7 @@ import ( // Valid nodeOS: // generic/ubuntu2004, generic/centos7, generic/rocky8, -// opensuse/Leap-15.3.x86_64, dweomer/microos.amd64 +// opensuse/Leap-15.3.x86_64 var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system") var serverCount = flag.Int("serverCount", 3, "number of server nodes") @@ -43,7 +43,7 @@ var ( var _ = ReportAfterEach(e2e.GenReport) -var _ = Describe("Verify Create", Ordered, func() { +var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() { Context("Cluster :", func() { It("Starts up with no issues", func() { var err error @@ -122,26 +122,139 @@ var _ = Describe("Verify Create", Ordered, func() { }, "240s", "5s").Should(Succeed()) }) - It("Verifies snapshot is restored successfully and validates only test workload1 is present", func() { + It("Resets the cluster", func() { + for _, nodeName := range serverNodeNames { + cmd := "sudo systemctl stop k3s" + Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) + if nodeName != serverNodeNames[0] { + cmd = "k3s-killall.sh" + Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) + } + } + + cmd := "sudo k3s server --cluster-reset" + res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred()) + Expect(res).Should(ContainSubstring("Managed etcd cluster membership has been reset, restart without --cluster-reset flag now")) + + cmd = "sudo systemctl start k3s" + Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Error().NotTo(HaveOccurred()) + }) + + It("Checks that other servers are not ready", func() { + fmt.Printf("\nFetching node status\n") + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + if strings.Contains(node.Name, serverNodeNames[0]) || strings.Contains(node.Name, "agent-") { + g.Expect(node.Status).Should(Equal("Ready")) + } else { + g.Expect(node.Status).Should(Equal("NotReady")) + } + } + }, "240s", "5s").Should(Succeed()) + _, _ = e2e.ParseNodes(kubeConfigFile, true) + }) + + It("Rejoins other servers to cluster", func() { + // We must remove the db directory on the other servers before restarting k3s + // otherwise the nodes may join the old cluster + for _, nodeName := range serverNodeNames[1:] { + cmd := "sudo rm -rf /var/lib/rancher/k3s/server/db" + Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) + } + + for _, nodeName := range serverNodeNames[1:] { + cmd := "sudo systemctl start k3s" + Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) + } + }) + + It("Checks that all nodes and pods are ready", func() { + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) + } + }, "420s", "5s").Should(Succeed()) + + _, _ = e2e.ParseNodes(kubeConfigFile, true) + + fmt.Printf("\nFetching Pods status\n") + Eventually(func(g Gomega) { + pods, err := e2e.ParsePods(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if strings.Contains(pod.Name, "helm-install") { + g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) + } else { + g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + } + } + }, "420s", "5s").Should(Succeed()) + }) + It("Verifies that workload1 and workload1 exist", func() { + cmd := "kubectl get pods --kubeconfig=" + kubeConfigFile + res, err := e2e.RunCommand(cmd) + Expect(err).NotTo(HaveOccurred()) + Expect(res).Should(ContainSubstring("test-clusterip")) + Expect(res).Should(ContainSubstring("test-nodeport")) + }) + + It("Restores the snapshot", func() { //Stop k3s on all nodes for _, nodeName := range serverNodeNames { cmd := "sudo systemctl stop k3s" - _, err := e2e.RunCmdOnNode(cmd, nodeName) - Expect(err).NotTo(HaveOccurred()) + Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) + if nodeName != serverNodeNames[0] { + cmd = "k3s-killall.sh" + Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) + } } //Restores from snapshot on server-0 - for _, nodeName := range serverNodeNames { - if nodeName == "server-0" { - cmd := "sudo k3s server --cluster-init --cluster-reset --cluster-reset-restore-path=/var/lib/rancher/k3s/server/db/snapshots/" + snapshotname - res, err := e2e.RunCmdOnNode(cmd, nodeName) - Expect(err).NotTo(HaveOccurred()) - Expect(res).Should(ContainSubstring("Managed etcd cluster membership has been reset, restart without --cluster-reset flag now")) - - cmd = "sudo systemctl start k3s" - _, err = e2e.RunCmdOnNode(cmd, nodeName) - Expect(err).NotTo(HaveOccurred()) + cmd := "sudo k3s server --cluster-init --cluster-reset --cluster-reset-restore-path=/var/lib/rancher/k3s/server/db/snapshots/" + snapshotname + res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred()) + Expect(res).Should(ContainSubstring("Managed etcd cluster membership has been reset, restart without --cluster-reset flag now")) + + cmd = "sudo systemctl start k3s" + Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Error().NotTo(HaveOccurred()) + + }) + + It("Checks that other servers are not ready", func() { + fmt.Printf("\nFetching node status\n") + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + if strings.Contains(node.Name, serverNodeNames[0]) || strings.Contains(node.Name, "agent-") { + g.Expect(node.Status).Should(Equal("Ready")) + } else { + g.Expect(node.Status).Should(Equal("NotReady")) + } } + }, "240s", "5s").Should(Succeed()) + _, _ = e2e.ParseNodes(kubeConfigFile, true) + }) + + It("Rejoins other servers to cluster", func() { + // We must remove the db directory on the other servers before restarting k3s + // otherwise the nodes may join the old cluster + for _, nodeName := range serverNodeNames[1:] { + cmd := "sudo rm -rf /var/lib/rancher/k3s/server/db" + Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) + } + + for _, nodeName := range serverNodeNames[1:] { + cmd := "sudo systemctl start k3s" + Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) } + }) + + It("Checks that all nodes and pods are ready", func() { //Verifies node is up and pods running fmt.Printf("\nFetching node status\n") Eventually(func(g Gomega) { @@ -166,8 +279,9 @@ var _ = Describe("Verify Create", Ordered, func() { } }, "620s", "5s").Should(Succeed()) _, _ = e2e.ParsePods(kubeConfigFile, true) - //Verifies test workload1 is present - //Verifies test workload2 is not present + }) + + It("Verifies that workload1 exists and workload2 does not", func() { cmd := "kubectl get pods --kubeconfig=" + kubeConfigFile res, err := e2e.RunCommand(cmd) Expect(err).NotTo(HaveOccurred()) diff --git a/tests/e2e/splitserver/splitserver_test.go b/tests/e2e/splitserver/splitserver_test.go index 044f590ec0ae..70cdc43ff9da 100644 --- a/tests/e2e/splitserver/splitserver_test.go +++ b/tests/e2e/splitserver/splitserver_test.go @@ -13,7 +13,7 @@ import ( . "github.com/onsi/gomega" ) -// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64, dweomer/microos.amd64 +// Valid nodeOS: generic/ubuntu2004, opensuse/Leap-15.3.x86_64 var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system") var etcdCount = flag.Int("etcdCount", 1, "number of server nodes only deploying etcd") var controlPlaneCount = flag.Int("controlPlaneCount", 1, "number of server nodes acting as control plane") diff --git a/tests/e2e/testutils.go b/tests/e2e/testutils.go index 9b80db83fb0f..02b4a9b485f7 100644 --- a/tests/e2e/testutils.go +++ b/tests/e2e/testutils.go @@ -123,7 +123,11 @@ func CreateCluster(nodeOS string, serverCount, agentCount int) ([]string, []stri return nil }) // We must wait a bit between provisioning nodes to avoid too many learners attempting to join the cluster - time.Sleep(20 * time.Second) + if strings.Contains(node, "agent") { + time.Sleep(5 * time.Second) + } else { + time.Sleep(30 * time.Second) + } } if err := errg.Wait(); err != nil { return nil, nil, err diff --git a/tests/e2e/upgradecluster/upgradecluster_test.go b/tests/e2e/upgradecluster/upgradecluster_test.go index 9c271b2afdf8..60e9117b98e1 100644 --- a/tests/e2e/upgradecluster/upgradecluster_test.go +++ b/tests/e2e/upgradecluster/upgradecluster_test.go @@ -14,7 +14,7 @@ import ( // Valid nodeOS: // generic/ubuntu2004, generic/centos7, generic/rocky8 -// opensuse/Leap-15.3.x86_64, dweomer/microos.amd64 +// opensuse/Leap-15.3.x86_64 var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system") var serverCount = flag.Int("serverCount", 3, "number of server nodes") var agentCount = flag.Int("agentCount", 2, "number of agent nodes") diff --git a/tests/e2e/validatecluster/validatecluster_test.go b/tests/e2e/validatecluster/validatecluster_test.go index b1bc8fbaf68f..d2dfbddf9c77 100644 --- a/tests/e2e/validatecluster/validatecluster_test.go +++ b/tests/e2e/validatecluster/validatecluster_test.go @@ -14,7 +14,7 @@ import ( // Valid nodeOS: // generic/ubuntu2004, generic/centos7, generic/rocky8, -// opensuse/Leap-15.3.x86_64, dweomer/microos.amd64 +// opensuse/Leap-15.3.x86_64 var nodeOS = flag.String("nodeOS", "generic/ubuntu2004", "VM operating system") var serverCount = flag.Int("serverCount", 3, "number of server nodes") var agentCount = flag.Int("agentCount", 2, "number of agent nodes")