From 8988439e52e22340c19712443ec2aa93577578c7 Mon Sep 17 00:00:00 2001 From: Derek Nola Date: Thu, 8 Jun 2023 15:05:30 -0700 Subject: [PATCH] [Release-1.26] E2E and Dep Backports - June (#7693) * Bump docker go.mod (#7681) * Shortcircuit commands with version or help flags (#7683) * Fix for longhorn integration test * Add Rotation certification Check (#7097) * Add Certification Test to Validate Cluster * Fix to stop/start for k3s certificate rotation * E2E: Use sudo for all RunCmdOnNode * Remove unnecessary daemonset addition/deletion Signed-off-by: Derek Nola Signed-off-by: est-suse Signed-off-by: Brad Davidson Co-authored-by: Esteban Esquivel Alvarado Co-authored-by: est-suse Co-authored-by: Brad Davidson --- go.mod | 2 +- go.sum | 4 +- pkg/configfilearg/defaultparser.go | 5 +- pkg/configfilearg/parser.go | 27 ++- pkg/configfilearg/parser_test.go | 6 +- .../multiclustercidr/multiclustercidr_test.go | 4 +- tests/e2e/rotateca/rotateca_test.go | 8 +- .../secretsencryption_test.go | 36 ++-- .../snapshotrestore/snapshotrestore_test.go | 24 +-- tests/e2e/startup/startup_test.go | 19 ++- tests/e2e/testutils.go | 35 ++-- .../validatecluster/validatecluster_test.go | 155 ++++++++++++------ .../integration/longhorn/longhorn_int_test.go | 2 +- .../upgradecluster/upgradecluster_test.go | 4 +- 14 files changed, 212 insertions(+), 119 deletions(-) diff --git a/go.mod b/go.mod index 5b838c57ba7a..9195d2d0d3c6 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ replace ( github.com/containerd/stargz-snapshotter => github.com/k3s-io/stargz-snapshotter v0.13.0-k3s1 github.com/coreos/go-systemd => github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e github.com/docker/distribution => github.com/docker/distribution v2.8.1+incompatible - github.com/docker/docker => github.com/docker/docker v20.10.12+incompatible + github.com/docker/docker => github.com/docker/docker v20.10.24+incompatible github.com/docker/libnetwork => github.com/docker/libnetwork v0.8.0-dev.2.0.20190624125649-f0e46a78ea34 github.com/emicklei/go-restful/v3 => github.com/emicklei/go-restful/v3 v3.9.0 diff --git a/go.sum b/go.sum index 33e9389ce0b7..a6e4be06157c 100644 --- a/go.sum +++ b/go.sum @@ -287,8 +287,8 @@ github.com/docker/cli v23.0.3+incompatible h1:Zcse1DuDqBdgI7OQDV8Go7b83xLgfhW1ez github.com/docker/cli v23.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.12+incompatible h1:CEeNmFM0QZIsJCZKMkZx0ZcahTiewkrgiwfYD+dfl1U= -github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.24+incompatible h1:Ugvxm7a8+Gz6vqQYQQ2W7GYq5EUPaAiuPgIfVyI3dYE= +github.com/docker/docker v20.10.24+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= diff --git a/pkg/configfilearg/defaultparser.go b/pkg/configfilearg/defaultparser.go index bbc44cb087e7..b9b6d04fca2c 100644 --- a/pkg/configfilearg/defaultparser.go +++ b/pkg/configfilearg/defaultparser.go @@ -9,7 +9,7 @@ import ( var DefaultParser = &Parser{ After: []string{"server", "agent", "etcd-snapshot:1"}, - FlagNames: []string{"--config", "-c"}, + ConfigFlags: []string{"--config", "-c"}, EnvName: version.ProgramUpper + "_CONFIG_FILE", DefaultConfig: "/etc/rancher/" + version.Program + "/config.yaml", ValidFlags: map[string][]cli.Flag{"server": cmds.ServerFlags, "etcd-snapshot": cmds.EtcdSnapshotFlags}, @@ -25,8 +25,7 @@ func MustParse(args []string) []string { func MustFindString(args []string, target string) string { parser := &Parser{ - After: []string{}, - FlagNames: []string{}, + OverrideFlags: []string{"--help", "-h", "--version", "-v"}, EnvName: version.ProgramUpper + "_CONFIG_FILE", DefaultConfig: "/etc/rancher/" + version.Program + "/config.yaml", } diff --git a/pkg/configfilearg/parser.go b/pkg/configfilearg/parser.go index f93e13911b83..06fce3cf2553 100644 --- a/pkg/configfilearg/parser.go +++ b/pkg/configfilearg/parser.go @@ -20,10 +20,13 @@ import ( type Parser struct { After []string - FlagNames []string + ConfigFlags []string + OverrideFlags []string EnvName string DefaultConfig string - ValidFlags map[string][]cli.Flag + // ValidFlags are maps of flags that are valid for that particular conmmand. This enables us to ignore flags in + // the config file that do no apply to the current command. + ValidFlags map[string][]cli.Flag } // Parse will parse an os.Args style slice looking for Parser.FlagNames after Parse.After. @@ -97,6 +100,12 @@ func (p *Parser) stripInvalidFlags(command string, args []string) ([]string, err } func (p *Parser) FindString(args []string, target string) (string, error) { + + // Check for --help or --version flags, which override any other flags + if val, found := p.findOverrideFlag(args); found { + return val, nil + } + configFile, isSet := p.findConfigFileFlag(args) var lastVal string if configFile != "" { @@ -140,13 +149,25 @@ func (p *Parser) FindString(args []string, target string) (string, error) { return lastVal, nil } +func (p *Parser) findOverrideFlag(args []string) (string, bool) { + for _, arg := range args { + for _, flagName := range p.OverrideFlags { + if flagName == arg { + return arg, true + } + } + } + + return "", false +} + func (p *Parser) findConfigFileFlag(args []string) (string, bool) { if envVal := os.Getenv(p.EnvName); p.EnvName != "" && envVal != "" { return envVal, true } for i, arg := range args { - for _, flagName := range p.FlagNames { + for _, flagName := range p.ConfigFlags { if flagName == arg { if len(args) > i+1 { return args[i+1], true diff --git a/pkg/configfilearg/parser_test.go b/pkg/configfilearg/parser_test.go index 66faa7586810..1dc4640ab9d8 100644 --- a/pkg/configfilearg/parser_test.go +++ b/pkg/configfilearg/parser_test.go @@ -199,7 +199,7 @@ func Test_UnitParser_findConfigFileFlag(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { p := Parser{ - FlagNames: []string{"--config", "-c"}, + ConfigFlags: []string{"--config", "-c"}, EnvName: "_TEST_FLAG_ENV", DefaultConfig: tt.fields.DefaultConfig, } @@ -328,7 +328,7 @@ func Test_UnitParser_Parse(t *testing.T) { t.Run(tt.name, func(t *testing.T) { p := &Parser{ After: tt.fields.After, - FlagNames: tt.fields.FlagNames, + ConfigFlags: tt.fields.FlagNames, EnvName: tt.fields.EnvName, DefaultConfig: tt.fields.DefaultConfig, } @@ -447,7 +447,7 @@ func Test_UnitParser_FindString(t *testing.T) { t.Run(tt.name, func(t *testing.T) { p := &Parser{ After: tt.fields.After, - FlagNames: tt.fields.FlagNames, + ConfigFlags: tt.fields.FlagNames, EnvName: tt.fields.EnvName, DefaultConfig: tt.fields.DefaultConfig, } diff --git a/tests/e2e/multiclustercidr/multiclustercidr_test.go b/tests/e2e/multiclustercidr/multiclustercidr_test.go index 09b95fabc70b..bd5b3e074460 100644 --- a/tests/e2e/multiclustercidr/multiclustercidr_test.go +++ b/tests/e2e/multiclustercidr/multiclustercidr_test.go @@ -106,7 +106,7 @@ var _ = Describe("Verify MultiClusterCIDR Configuration", Ordered, func() { It("Restart agent-0", func() { agents := []string{"agent-0"} - err := e2e.RestartClusterAgent(agents) + err := e2e.RestartCluster(agents) Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) }) @@ -223,7 +223,7 @@ var _ = Describe("Verify MultiClusterCIDR Configuration", Ordered, func() { It("Delete and restart agent-0", func() { agents := []string{"agent-0"} - err := e2e.RestartClusterAgent(agents) + err := e2e.RestartCluster(agents) Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err)) }) diff --git a/tests/e2e/rotateca/rotateca_test.go b/tests/e2e/rotateca/rotateca_test.go index 80ddae6dabe9..29e187e80273 100644 --- a/tests/e2e/rotateca/rotateca_test.go +++ b/tests/e2e/rotateca/rotateca_test.go @@ -78,9 +78,9 @@ var _ = Describe("Verify Custom CA Rotation", Ordered, func() { It("Generates New CA Certificates", func() { cmds := []string{ - "sudo mkdir -p /opt/rancher/k3s/server", - "sudo cp -r /var/lib/rancher/k3s/server/tls /opt/rancher/k3s/server", - "sudo DATA_DIR=/opt/rancher/k3s /tmp/generate-custom-ca-certs.sh", + "mkdir -p /opt/rancher/k3s/server", + "cp -r /var/lib/rancher/k3s/server/tls /opt/rancher/k3s/server", + "DATA_DIR=/opt/rancher/k3s /tmp/generate-custom-ca-certs.sh", } for _, cmd := range cmds { _, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) @@ -89,7 +89,7 @@ var _ = Describe("Verify Custom CA Rotation", Ordered, func() { }) It("Rotates CA Certificates", func() { - cmd := "sudo k3s certificate rotate-ca --path=/opt/rancher/k3s/server" + cmd := "k3s certificate rotate-ca --path=/opt/rancher/k3s/server" _, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) Expect(err).NotTo(HaveOccurred()) }) diff --git a/tests/e2e/secretsencryption/secretsencryption_test.go b/tests/e2e/secretsencryption/secretsencryption_test.go index 5c6d86639d24..b653d95f2cde 100644 --- a/tests/e2e/secretsencryption/secretsencryption_test.go +++ b/tests/e2e/secretsencryption/secretsencryption_test.go @@ -85,7 +85,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { }) It("Verifies encryption start stage", func() { - cmd := "sudo k3s secrets-encrypt status" + cmd := "k3s secrets-encrypt status" for _, nodeName := range serverNodeNames { res, err := e2e.RunCmdOnNode(cmd, nodeName) Expect(err).NotTo(HaveOccurred()) @@ -96,11 +96,11 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { }) It("Prepares for Secrets-Encryption Rotation", func() { - cmd := "sudo k3s secrets-encrypt prepare" + cmd := "k3s secrets-encrypt prepare" res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) Expect(err).NotTo(HaveOccurred(), res) for i, nodeName := range serverNodeNames { - cmd := "sudo k3s secrets-encrypt status" + cmd := "k3s secrets-encrypt status" res, err := e2e.RunCmdOnNode(cmd, nodeName) Expect(err).NotTo(HaveOccurred(), res) Expect(res).Should(ContainSubstring("Server Encryption Hashes: hash does not match")) @@ -140,7 +140,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { }) It("Verifies encryption prepare stage", func() { - cmd := "sudo k3s secrets-encrypt status" + cmd := "k3s secrets-encrypt status" for _, nodeName := range serverNodeNames { Eventually(func(g Gomega) { res, err := e2e.RunCmdOnNode(cmd, nodeName) @@ -153,12 +153,12 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { }) It("Rotates the Secrets-Encryption Keys", func() { - cmd := "sudo k3s secrets-encrypt rotate" + cmd := "k3s secrets-encrypt rotate" res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) Expect(err).NotTo(HaveOccurred(), res) for i, nodeName := range serverNodeNames { Eventually(func(g Gomega) { - cmd := "sudo k3s secrets-encrypt status" + cmd := "k3s secrets-encrypt status" res, err := e2e.RunCmdOnNode(cmd, nodeName) g.Expect(err).NotTo(HaveOccurred(), res) g.Expect(res).Should(ContainSubstring("Server Encryption Hashes: hash does not match")) @@ -176,7 +176,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { }) It("Verifies encryption rotate stage", func() { - cmd := "sudo k3s secrets-encrypt status" + cmd := "k3s secrets-encrypt status" for _, nodeName := range serverNodeNames { Eventually(func(g Gomega) { res, err := e2e.RunCmdOnNode(cmd, nodeName) @@ -189,11 +189,11 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { }) It("Reencrypts the Secrets-Encryption Keys", func() { - cmd := "sudo k3s secrets-encrypt reencrypt" + cmd := "k3s secrets-encrypt reencrypt" res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) Expect(err).NotTo(HaveOccurred(), res) - cmd = "sudo k3s secrets-encrypt status" + cmd = "k3s secrets-encrypt status" Eventually(func() (string, error) { return e2e.RunCmdOnNode(cmd, serverNodeNames[0]) }, "180s", "5s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished")) @@ -211,7 +211,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { }) It("Verifies Encryption Reencrypt Stage", func() { - cmd := "sudo k3s secrets-encrypt status" + cmd := "k3s secrets-encrypt status" for _, nodeName := range serverNodeNames { Eventually(func(g Gomega) { res, err := e2e.RunCmdOnNode(cmd, nodeName) @@ -226,15 +226,15 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { Context("Disabling Secrets-Encryption", func() { It("Disables encryption", func() { - cmd := "sudo k3s secrets-encrypt disable" + cmd := "k3s secrets-encrypt disable" res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) Expect(err).NotTo(HaveOccurred(), res) - cmd = "sudo k3s secrets-encrypt reencrypt -f --skip" + cmd = "k3s secrets-encrypt reencrypt -f --skip" res, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0]) Expect(err).NotTo(HaveOccurred(), res) - cmd = "sudo k3s secrets-encrypt status" + cmd = "k3s secrets-encrypt status" Eventually(func() (string, error) { return e2e.RunCmdOnNode(cmd, serverNodeNames[0]) }, "180s", "5s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished")) @@ -257,7 +257,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { }) It("Verifies encryption disabled on all nodes", func() { - cmd := "sudo k3s secrets-encrypt status" + cmd := "k3s secrets-encrypt status" for _, nodeName := range serverNodeNames { Eventually(func(g Gomega) { g.Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("Encryption Status: Disabled")) @@ -269,15 +269,15 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { Context("Enabling Secrets-Encryption", func() { It("Enables encryption", func() { - cmd := "sudo k3s secrets-encrypt enable" + cmd := "k3s secrets-encrypt enable" res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) Expect(err).NotTo(HaveOccurred(), res) - cmd = "sudo k3s secrets-encrypt reencrypt -f --skip" + cmd = "k3s secrets-encrypt reencrypt -f --skip" res, err = e2e.RunCmdOnNode(cmd, serverNodeNames[0]) Expect(err).NotTo(HaveOccurred(), res) - cmd = "sudo k3s secrets-encrypt status" + cmd = "k3s secrets-encrypt status" Eventually(func() (string, error) { return e2e.RunCmdOnNode(cmd, serverNodeNames[0]) }, "180s", "5s").Should(ContainSubstring("Current Rotation Stage: reencrypt_finished")) @@ -288,7 +288,7 @@ var _ = Describe("Verify Secrets Encryption Rotation", Ordered, func() { }) It("Verifies encryption enabled on all nodes", func() { - cmd := "sudo k3s secrets-encrypt status" + cmd := "k3s secrets-encrypt status" for _, nodeName := range serverNodeNames { Eventually(func(g Gomega) { g.Expect(e2e.RunCmdOnNode(cmd, nodeName)).Should(ContainSubstring("Encryption Status: Enabled")) diff --git a/tests/e2e/snapshotrestore/snapshotrestore_test.go b/tests/e2e/snapshotrestore/snapshotrestore_test.go index d18e512c3bda..9b1320b3ce3e 100644 --- a/tests/e2e/snapshotrestore/snapshotrestore_test.go +++ b/tests/e2e/snapshotrestore/snapshotrestore_test.go @@ -102,10 +102,10 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() { It("Verifies Snapshot is created", func() { Eventually(func(g Gomega) { - cmd := "sudo k3s etcd-snapshot save" + cmd := "k3s etcd-snapshot save" _, err := e2e.RunCmdOnNode(cmd, "server-0") g.Expect(err).NotTo(HaveOccurred()) - cmd = "sudo ls /var/lib/rancher/k3s/server/db/snapshots/" + cmd = "ls /var/lib/rancher/k3s/server/db/snapshots/" snapshotname, err = e2e.RunCmdOnNode(cmd, "server-0") g.Expect(err).NotTo(HaveOccurred()) fmt.Println("Snapshot Name", snapshotname) @@ -128,7 +128,7 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() { Context("Cluster is reset normally", func() { It("Resets the cluster", func() { for _, nodeName := range serverNodeNames { - cmd := "sudo systemctl stop k3s" + cmd := "systemctl stop k3s" Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) if nodeName != serverNodeNames[0] { cmd = "k3s-killall.sh" @@ -136,12 +136,12 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() { } } - cmd := "sudo k3s server --cluster-reset" + cmd := "k3s server --cluster-reset" res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) Expect(err).NotTo(HaveOccurred()) Expect(res).Should(ContainSubstring("Managed etcd cluster membership has been reset, restart without --cluster-reset flag now")) - cmd = "sudo systemctl start k3s" + cmd = "systemctl start k3s" Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Error().NotTo(HaveOccurred()) }) @@ -165,12 +165,12 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() { // We must remove the db directory on the other servers before restarting k3s // otherwise the nodes may join the old cluster for _, nodeName := range serverNodeNames[1:] { - cmd := "sudo rm -rf /var/lib/rancher/k3s/server/db" + cmd := "rm -rf /var/lib/rancher/k3s/server/db" Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) } for _, nodeName := range serverNodeNames[1:] { - cmd := "sudo systemctl start k3s" + cmd := "systemctl start k3s" Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) time.Sleep(20 * time.Second) //Stagger the restarts for etcd leaners } @@ -214,7 +214,7 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() { It("Restores the snapshot", func() { //Stop k3s on all nodes for _, nodeName := range serverNodeNames { - cmd := "sudo systemctl stop k3s" + cmd := "systemctl stop k3s" Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) if nodeName != serverNodeNames[0] { cmd = "k3s-killall.sh" @@ -222,12 +222,12 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() { } } //Restores from snapshot on server-0 - cmd := "sudo k3s server --cluster-init --cluster-reset --cluster-reset-restore-path=/var/lib/rancher/k3s/server/db/snapshots/" + snapshotname + cmd := "k3s server --cluster-init --cluster-reset --cluster-reset-restore-path=/var/lib/rancher/k3s/server/db/snapshots/" + snapshotname res, err := e2e.RunCmdOnNode(cmd, serverNodeNames[0]) Expect(err).NotTo(HaveOccurred()) Expect(res).Should(ContainSubstring("Managed etcd cluster membership has been reset, restart without --cluster-reset flag now")) - cmd = "sudo systemctl start k3s" + cmd = "systemctl start k3s" Expect(e2e.RunCmdOnNode(cmd, serverNodeNames[0])).Error().NotTo(HaveOccurred()) }) @@ -252,12 +252,12 @@ var _ = Describe("Verify snapshots and cluster restores work", Ordered, func() { // We must remove the db directory on the other servers before restarting k3s // otherwise the nodes may join the old cluster for _, nodeName := range serverNodeNames[1:] { - cmd := "sudo rm -rf /var/lib/rancher/k3s/server/db" + cmd := "rm -rf /var/lib/rancher/k3s/server/db" Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) } for _, nodeName := range serverNodeNames[1:] { - cmd := "sudo systemctl start k3s" + cmd := "systemctl start k3s" Expect(e2e.RunCmdOnNode(cmd, nodeName)).Error().NotTo(HaveOccurred()) } }) diff --git a/tests/e2e/startup/startup_test.go b/tests/e2e/startup/startup_test.go index 0ea4187b8d61..087bc8a47df3 100644 --- a/tests/e2e/startup/startup_test.go +++ b/tests/e2e/startup/startup_test.go @@ -45,13 +45,13 @@ func StartK3sCluster(nodes []string, serverYAML string, agentYAML string) error var resetCmd string var startCmd string if strings.Contains(node, "server") { - resetCmd = "sudo head -n 3 /etc/rancher/k3s/config.yaml > /tmp/config.yaml && sudo mv /tmp/config.yaml /etc/rancher/k3s/config.yaml" - yamlCmd = fmt.Sprintf("sudo echo '%s' >> /etc/rancher/k3s/config.yaml", serverYAML) - startCmd = "sudo systemctl start k3s" + resetCmd = "head -n 3 /etc/rancher/k3s/config.yaml > /tmp/config.yaml && sudo mv /tmp/config.yaml /etc/rancher/k3s/config.yaml" + yamlCmd = fmt.Sprintf("echo '%s' >> /etc/rancher/k3s/config.yaml", serverYAML) + startCmd = "systemctl start k3s" } else { - resetCmd = "sudo head -n 4 /etc/rancher/k3s/config.yaml > /tmp/config.yaml && sudo mv /tmp/config.yaml /etc/rancher/k3s/config.yaml" - yamlCmd = fmt.Sprintf("sudo echo '%s' >> /etc/rancher/k3s/config.yaml", agentYAML) - startCmd = "sudo systemctl start k3s-agent" + resetCmd = "head -n 4 /etc/rancher/k3s/config.yaml > /tmp/config.yaml && sudo mv /tmp/config.yaml /etc/rancher/k3s/config.yaml" + yamlCmd = fmt.Sprintf("echo '%s' >> /etc/rancher/k3s/config.yaml", agentYAML) + startCmd = "systemctl start k3s-agent" } if _, err := e2e.RunCmdOnNode(resetCmd, node); err != nil { return err @@ -68,9 +68,14 @@ func StartK3sCluster(nodes []string, serverYAML string, agentYAML string) error func KillK3sCluster(nodes []string) error { for _, node := range nodes { - if _, err := e2e.RunCmdOnNode("sudo k3s-killall.sh", node); err != nil { + if _, err := e2e.RunCmdOnNode("k3s-killall.sh", node); err != nil { return err } + if strings.Contains(node, "server") { + if _, err := e2e.RunCmdOnNode("rm -rf /var/lib/rancher/k3s/server/db", node); err != nil { + return err + } + } } return nil } diff --git a/tests/e2e/testutils.go b/tests/e2e/testutils.go index b2ef21ed3ea4..5f5bee64df79 100644 --- a/tests/e2e/testutils.go +++ b/tests/e2e/testutils.go @@ -146,7 +146,7 @@ func scpK3sBinary(nodeNames []string) error { if _, err := RunCommand(cmd); err != nil { return fmt.Errorf("failed to scp k3s binary to %s: %v", node, err) } - if _, err := RunCmdOnNode("sudo mv /tmp/k3s /usr/local/bin/", node); err != nil { + if _, err := RunCmdOnNode("mv /tmp/k3s /usr/local/bin/", node); err != nil { return err } } @@ -214,15 +214,6 @@ func CreateLocalCluster(nodeOS string, serverCount, agentCount int) ([]string, [ return serverNodeNames, agentNodeNames, nil } -// Deletes the content of a manifest file previously applied -func DeleteWorkload(workload, kubeconfig string) error { - cmd := "kubectl delete -f " + workload + " --kubeconfig=" + kubeconfig - if _, err := RunCommand(cmd); err != nil { - return err - } - return nil -} - func DeployWorkload(workload, kubeconfig string, hardened bool) (string, error) { resourceDir := "../amd64_resource_files" if hardened { @@ -416,7 +407,7 @@ func ParsePods(kubeConfig string, print bool) ([]Pod, error) { // RestartCluster restarts the k3s service on each node given func RestartCluster(nodeNames []string) error { for _, nodeName := range nodeNames { - cmd := "sudo systemctl restart k3s* --all" + cmd := "systemctl restart k3s* --all" if _, err := RunCmdOnNode(cmd, nodeName); err != nil { return err } @@ -424,10 +415,24 @@ func RestartCluster(nodeNames []string) error { return nil } -// RestartCluster restarts the k3s service on each node given -func RestartClusterAgent(nodeNames []string) error { +// StartCluster starts the k3s service on each node given +func StartCluster(nodeNames []string) error { + for _, nodeName := range nodeNames { + cmd := "systemctl start k3s" + if strings.Contains(nodeName, "agent") { + cmd += "-agent" + } + if _, err := RunCmdOnNode(cmd, nodeName); err != nil { + return err + } + } + return nil +} + +// StopCluster starts the k3s service on each node given +func StopCluster(nodeNames []string) error { for _, nodeName := range nodeNames { - cmd := "sudo systemctl restart k3s-agent" + cmd := "systemctl stop k3s*" if _, err := RunCmdOnNode(cmd, nodeName); err != nil { return err } @@ -437,7 +442,7 @@ func RestartClusterAgent(nodeNames []string) error { // RunCmdOnNode executes a command from within the given node func RunCmdOnNode(cmd string, nodename string) (string, error) { - runcmd := "vagrant ssh -c \"" + cmd + "\" " + nodename + runcmd := "vagrant ssh " + nodename + " -c \"sudo " + cmd + "\"" out, err := RunCommand(runcmd) if err != nil { return out, fmt.Errorf("failed to run command %s on node %s: %v", cmd, nodename, err) diff --git a/tests/e2e/validatecluster/validatecluster_test.go b/tests/e2e/validatecluster/validatecluster_test.go index 6ec6fa9251be..c73631c1a65b 100644 --- a/tests/e2e/validatecluster/validatecluster_test.go +++ b/tests/e2e/validatecluster/validatecluster_test.go @@ -4,6 +4,7 @@ import ( "flag" "fmt" "os" + "regexp" "strings" "testing" @@ -43,7 +44,7 @@ var ( var _ = ReportAfterEach(e2e.GenReport) var _ = Describe("Verify Create", Ordered, func() { - Context("Cluster :", func() { + Context("Cluster Starts up and deploys basic components", func() { It("Starts up with no issues", func() { var err error if *local { @@ -99,7 +100,6 @@ var _ = Describe("Verify Create", Ordered, func() { clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc", false) cmd := "curl -L --insecure http://" + clusterip + "/name.html" - fmt.Println(cmd) for _, nodeName := range serverNodeNames { Eventually(func(g Gomega) { res, err := e2e.RunCmdOnNode(cmd, nodeName) @@ -127,7 +127,7 @@ var _ = Describe("Verify Create", Ordered, func() { }, "240s", "5s").Should(Succeed()) cmd = "curl -L --insecure http://" + nodeExternalIP + ":" + nodeport + "/name.html" - fmt.Println(cmd) + Eventually(func(g Gomega) { res, err := e2e.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res) @@ -210,55 +210,13 @@ var _ = Describe("Verify Create", Ordered, func() { Eventually(func(g Gomega) { cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default" + res, err := e2e.RunCommand(cmd) g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res) g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local")) }, "420s", "2s").Should(Succeed()) }) - It("Verifies Restart", func() { - _, err := e2e.DeployWorkload("daemonset.yaml", kubeConfigFile, *hardened) - Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed") - defer e2e.DeleteWorkload("daemonset.yaml", kubeConfigFile) - nodes, _ := e2e.ParseNodes(kubeConfigFile, false) - - Eventually(func(g Gomega) { - pods, _ := e2e.ParsePods(kubeConfigFile, false) - count := e2e.CountOfStringInSlice("test-daemonset", pods) - g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count") - podsRunning := 0 - for _, pod := range pods { - if strings.Contains(pod.Name, "test-daemonset") && pod.Status == "Running" && pod.Ready == "1/1" { - podsRunning++ - } - } - g.Expect(len(nodes)).Should((Equal(podsRunning)), "Daemonset running pods count does not match node count") - }, "620s", "5s").Should(Succeed()) - errRestart := e2e.RestartCluster(serverNodeNames) - Expect(errRestart).NotTo(HaveOccurred(), "Restart Nodes not happened correctly") - if len(agentNodeNames) > 0 { - errRestartAgent := e2e.RestartCluster(agentNodeNames) - Expect(errRestartAgent).NotTo(HaveOccurred(), "Restart Agent not happened correctly") - } - Eventually(func(g Gomega) { - nodes, err := e2e.ParseNodes(kubeConfigFile, false) - g.Expect(err).NotTo(HaveOccurred()) - for _, node := range nodes { - g.Expect(node.Status).Should(Equal("Ready")) - } - pods, _ := e2e.ParsePods(kubeConfigFile, false) - count := e2e.CountOfStringInSlice("test-daemonset", pods) - g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count") - podsRunningAr := 0 - for _, pod := range pods { - if strings.Contains(pod.Name, "test-daemonset") && pod.Status == "Running" && pod.Ready == "1/1" { - podsRunningAr++ - } - } - g.Expect(len(nodes)).Should((Equal(podsRunningAr)), "Daemonset pods are not running after the restart") - }, "620s", "5s").Should(Succeed()) - }) - It("Verifies Local Path Provisioner storage ", func() { res, err := e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened) Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed: "+res) @@ -314,6 +272,111 @@ var _ = Describe("Verify Create", Ordered, func() { }, "180s", "2s").Should(Succeed()) }) }) + + Context("Validate restart", func() { + It("Restarts normally", func() { + errRestart := e2e.RestartCluster(append(serverNodeNames, agentNodeNames...)) + Expect(errRestart).NotTo(HaveOccurred(), "Restart Nodes not happened correctly") + + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) + } + pods, _ := e2e.ParsePods(kubeConfigFile, false) + count := e2e.CountOfStringInSlice("test-daemonset", pods) + g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count") + podsRunningAr := 0 + for _, pod := range pods { + if strings.Contains(pod.Name, "test-daemonset") && pod.Status == "Running" && pod.Ready == "1/1" { + podsRunningAr++ + } + } + g.Expect(len(nodes)).Should((Equal(podsRunningAr)), "Daemonset pods are not running after the restart") + }, "620s", "5s").Should(Succeed()) + }) + }) + + Context("Valdiate Certificate Rotation", func() { + It("Stops K3s and rotates certificates", func() { + errStop := e2e.StopCluster(serverNodeNames) + Expect(errStop).NotTo(HaveOccurred(), "Cluster could not be stoped successfully") + + for _, nodeName := range serverNodeNames { + cmd := "k3s certificate rotate" + if _, err := e2e.RunCmdOnNode(cmd, nodeName); err != nil { + Expect(err).NotTo(HaveOccurred(), "Certificate could not be rotated successfully") + } + } + }) + + It("Start normally", func() { + // Since we stopped all the server, we have to start 2 at once to get it back up + // If we only start one at a time, the first will hang waiting for the second to be up + _, err := e2e.RunCmdOnNode("systemctl --no-block start k3s", serverNodeNames[0]) + Expect(err).NotTo(HaveOccurred()) + err = e2e.StartCluster(serverNodeNames[1:]) + Expect(err).NotTo(HaveOccurred(), "Cluster could not be started successfully") + + Eventually(func(g Gomega) { + nodes, err := e2e.ParseNodes(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, node := range nodes { + g.Expect(node.Status).Should(Equal("Ready")) + } + fmt.Println("help") + }, "620s", "5s").Should(Succeed()) + + Eventually(func(g Gomega) { + pods, err := e2e.ParsePods(kubeConfigFile, false) + g.Expect(err).NotTo(HaveOccurred()) + for _, pod := range pods { + if strings.Contains(pod.Name, "helm-install") { + g.Expect(pod.Status).Should(Equal("Completed"), pod.Name) + } else { + g.Expect(pod.Status).Should(Equal("Running"), pod.Name) + } + } + }, "620s", "5s").Should(Succeed()) + }) + It("Validates certificates", func() { + const grepCert = "sudo ls -lt /var/lib/rancher/k3s/server/ | grep tls" + var expectResult = []string{"client-ca.crt", + "client-ca.key", + "client-ca.nochain.crt", + "dynamic-cert.json", "peer-ca.crt", + "peer-ca.key", "server-ca.crt", + "server-ca.key", "request-header-ca.crt", + "request-header-ca.key", "server-ca.crt", + "server-ca.key", "server-ca.nochain.crt", + "service.current.key", "service.key", + "apiserver-loopback-client__.crt", + "apiserver-loopback-client__.key", "", + } + + var finalResult string + var finalErr error + for _, nodeName := range serverNodeNames { + grCert, errGrep := e2e.RunCmdOnNode(grepCert, nodeName) + Expect(errGrep).NotTo(HaveOccurred(), "Certificate could not be created successfully") + re := regexp.MustCompile("tls-[0-9]+") + tls := re.FindAllString(grCert, -1)[0] + final := fmt.Sprintf("diff -sr /var/lib/rancher/k3s/server/tls/ /var/lib/rancher/k3s/server/%s/"+ + "| grep -i identical | cut -f4 -d ' ' | xargs basename -a \n", tls) + finalResult, finalErr = e2e.RunCmdOnNode(final, nodeName) + Expect(finalErr).NotTo(HaveOccurred(), "Final Certification does not created successfully") + } + errRestartAgent := e2e.RestartCluster(agentNodeNames) + Expect(errRestartAgent).NotTo(HaveOccurred(), "Agent could not be restart successfully") + + finalCert := strings.Replace(finalResult, "\n", ",", -1) + finalCertArray := strings.Split(finalCert, ",") + Expect((finalCertArray)).Should((Equal(expectResult)), "Final certification does not match the expected results") + + }) + + }) }) var failed bool diff --git a/tests/integration/longhorn/longhorn_int_test.go b/tests/integration/longhorn/longhorn_int_test.go index 7bade6cc222a..e4c475e71605 100644 --- a/tests/integration/longhorn/longhorn_int_test.go +++ b/tests/integration/longhorn/longhorn_int_test.go @@ -139,7 +139,7 @@ var _ = AfterEach(func() { }) var _ = AfterSuite(func() { - if !testutil.IsExistingServer() { + if !testutil.IsExistingServer() && server != nil { if failed { testutil.K3sSaveLog(server, false) } diff --git a/tests/terraform/upgradecluster/upgradecluster_test.go b/tests/terraform/upgradecluster/upgradecluster_test.go index 629edef57099..ff8dc59faad0 100644 --- a/tests/terraform/upgradecluster/upgradecluster_test.go +++ b/tests/terraform/upgradecluster/upgradecluster_test.go @@ -322,7 +322,7 @@ var _ = Describe("Test:", func() { MIPs := strings.Split(createcluster.MasterIPs, ",") for _, ip := range MIPs { - cmd := "sudo sed -i \"s/|/| INSTALL_K3S_VERSION=" + *upgradeVersion + "/g\" /tmp/master_cmd" + cmd := "sed -i \"s/|/| INSTALL_K3S_VERSION=" + *upgradeVersion + "/g\" /tmp/master_cmd" Eventually(func(g Gomega) { _, err := tf.RunCmdOnNode(cmd, ip, createcluster.AwsUser, createcluster.AccessKey) g.Expect(err).NotTo(HaveOccurred()) @@ -338,7 +338,7 @@ var _ = Describe("Test:", func() { WIPs := strings.Split(createcluster.WorkerIPs, ",") for _, ip := range WIPs { - cmd := "sudo sed -i \"s/|/| INSTALL_K3S_VERSION=" + *upgradeVersion + "/g\" /tmp/agent_cmd" + cmd := "sed -i \"s/|/| INSTALL_K3S_VERSION=" + *upgradeVersion + "/g\" /tmp/agent_cmd" Eventually(func(g Gomega) { _, err := tf.RunCmdOnNode(cmd, ip, createcluster.AwsUser, createcluster.AccessKey) g.Expect(err).NotTo(HaveOccurred())