Skip to content

Commit

Permalink
Add Rotation certification Check (k3s-io#7097)
Browse files Browse the repository at this point in the history
* Add Certification Test to Validate Cluster

Signed-off-by: est-suse <[email protected]>

* Fix to stop/start for k3s certificate rotation

Signed-off-by: Derek Nola <[email protected]>

---------

Signed-off-by: est-suse <[email protected]>
Signed-off-by: Derek Nola <[email protected]>
Co-authored-by: est-suse <[email protected]>
Co-authored-by: Derek Nola <[email protected]>
  • Loading branch information
3 people committed Jun 8, 2023
1 parent c0efafa commit dd1bf5b
Show file tree
Hide file tree
Showing 3 changed files with 147 additions and 51 deletions.
4 changes: 2 additions & 2 deletions tests/e2e/multiclustercidr/multiclustercidr_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ var _ = Describe("Verify MultiClusterCIDR Configuration", Ordered, func() {

It("Restart agent-0", func() {
agents := []string{"agent-0"}
err := e2e.RestartClusterAgent(agents)
err := e2e.RestartCluster(agents)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
})

Expand Down Expand Up @@ -223,7 +223,7 @@ var _ = Describe("Verify MultiClusterCIDR Configuration", Ordered, func() {

It("Delete and restart agent-0", func() {
agents := []string{"agent-0"}
err := e2e.RestartClusterAgent(agents)
err := e2e.RestartCluster(agents)
Expect(err).NotTo(HaveOccurred(), e2e.GetVagrantLog(err))
})

Expand Down
20 changes: 17 additions & 3 deletions tests/e2e/testutils.go
Original file line number Diff line number Diff line change
Expand Up @@ -424,10 +424,24 @@ func RestartCluster(nodeNames []string) error {
return nil
}

// RestartCluster restarts the k3s service on each node given
func RestartClusterAgent(nodeNames []string) error {
// StartCluster starts the k3s service on each node given
func StartCluster(nodeNames []string) error {
for _, nodeName := range nodeNames {
cmd := "sudo systemctl start k3s"
if strings.Contains(nodeName, "agent") {
cmd += "-agent"
}
if _, err := RunCmdOnNode(cmd, nodeName); err != nil {
return err
}
}
return nil
}

// StopCluster starts the k3s service on each node given
func StopCluster(nodeNames []string) error {
for _, nodeName := range nodeNames {
cmd := "sudo systemctl restart k3s-agent"
cmd := "sudo systemctl stop k3s*"
if _, err := RunCmdOnNode(cmd, nodeName); err != nil {
return err
}
Expand Down
174 changes: 128 additions & 46 deletions tests/e2e/validatecluster/validatecluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"flag"
"fmt"
"os"
"regexp"
"strings"
"testing"

Expand Down Expand Up @@ -43,7 +44,7 @@ var (
var _ = ReportAfterEach(e2e.GenReport)

var _ = Describe("Verify Create", Ordered, func() {
Context("Cluster :", func() {
Context("Cluster Starts up and deploys basic components", func() {
It("Starts up with no issues", func() {
var err error
if *local {
Expand Down Expand Up @@ -99,7 +100,6 @@ var _ = Describe("Verify Create", Ordered, func() {

clusterip, _ := e2e.FetchClusterIP(kubeConfigFile, "nginx-clusterip-svc", false)
cmd := "curl -L --insecure http://" + clusterip + "/name.html"
fmt.Println(cmd)
for _, nodeName := range serverNodeNames {
Eventually(func(g Gomega) {
res, err := e2e.RunCmdOnNode(cmd, nodeName)
Expand Down Expand Up @@ -127,7 +127,7 @@ var _ = Describe("Verify Create", Ordered, func() {
}, "240s", "5s").Should(Succeed())

cmd = "curl -L --insecure http://" + nodeExternalIP + ":" + nodeport + "/name.html"
fmt.Println(cmd)

Eventually(func(g Gomega) {
res, err := e2e.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
Expand Down Expand Up @@ -210,55 +210,13 @@ var _ = Describe("Verify Create", Ordered, func() {

Eventually(func(g Gomega) {
cmd := "kubectl --kubeconfig=" + kubeConfigFile + " exec -i -t dnsutils -- nslookup kubernetes.default"

res, err := e2e.RunCommand(cmd)
g.Expect(err).NotTo(HaveOccurred(), "failed cmd: "+cmd+" result: "+res)
g.Expect(res).Should(ContainSubstring("kubernetes.default.svc.cluster.local"))
}, "420s", "2s").Should(Succeed())
})

It("Verifies Restart", func() {
_, err := e2e.DeployWorkload("daemonset.yaml", kubeConfigFile, *hardened)
Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed")
defer e2e.DeleteWorkload("daemonset.yaml", kubeConfigFile)
nodes, _ := e2e.ParseNodes(kubeConfigFile, false)

Eventually(func(g Gomega) {
pods, _ := e2e.ParsePods(kubeConfigFile, false)
count := e2e.CountOfStringInSlice("test-daemonset", pods)
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
podsRunning := 0
for _, pod := range pods {
if strings.Contains(pod.Name, "test-daemonset") && pod.Status == "Running" && pod.Ready == "1/1" {
podsRunning++
}
}
g.Expect(len(nodes)).Should((Equal(podsRunning)), "Daemonset running pods count does not match node count")
}, "620s", "5s").Should(Succeed())
errRestart := e2e.RestartCluster(serverNodeNames)
Expect(errRestart).NotTo(HaveOccurred(), "Restart Nodes not happened correctly")
if len(agentNodeNames) > 0 {
errRestartAgent := e2e.RestartCluster(agentNodeNames)
Expect(errRestartAgent).NotTo(HaveOccurred(), "Restart Agent not happened correctly")
}
Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
pods, _ := e2e.ParsePods(kubeConfigFile, false)
count := e2e.CountOfStringInSlice("test-daemonset", pods)
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
podsRunningAr := 0
for _, pod := range pods {
if strings.Contains(pod.Name, "test-daemonset") && pod.Status == "Running" && pod.Ready == "1/1" {
podsRunningAr++
}
}
g.Expect(len(nodes)).Should((Equal(podsRunningAr)), "Daemonset pods are not running after the restart")
}, "620s", "5s").Should(Succeed())
})

It("Verifies Local Path Provisioner storage ", func() {
res, err := e2e.DeployWorkload("local-path-provisioner.yaml", kubeConfigFile, *hardened)
Expect(err).NotTo(HaveOccurred(), "local-path-provisioner manifest not deployed: "+res)
Expand Down Expand Up @@ -314,6 +272,130 @@ var _ = Describe("Verify Create", Ordered, func() {
}, "180s", "2s").Should(Succeed())
})
})

Context("Validate restart", func() {
It("Deletes daemonset", func() {
_, err := e2e.DeployWorkload("daemonset.yaml", kubeConfigFile, *hardened)
Expect(err).NotTo(HaveOccurred(), "Daemonset manifest not deployed")
defer e2e.DeleteWorkload("daemonset.yaml", kubeConfigFile)
nodes, _ := e2e.ParseNodes(kubeConfigFile, false)

Eventually(func(g Gomega) {
pods, _ := e2e.ParsePods(kubeConfigFile, false)
count := e2e.CountOfStringInSlice("test-daemonset", pods)
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
podsRunning := 0
for _, pod := range pods {
if strings.Contains(pod.Name, "test-daemonset") && pod.Status == "Running" && pod.Ready == "1/1" {
podsRunning++
}
}
g.Expect(len(nodes)).Should((Equal(podsRunning)), "Daemonset running pods count does not match node count")
}, "620s", "5s").Should(Succeed())
})
It("Restarts normally", func() {
errRestart := e2e.RestartCluster(append(serverNodeNames, agentNodeNames...))
Expect(errRestart).NotTo(HaveOccurred(), "Restart Nodes not happened correctly")

Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
pods, _ := e2e.ParsePods(kubeConfigFile, false)
count := e2e.CountOfStringInSlice("test-daemonset", pods)
g.Expect(len(nodes)).Should((Equal(count)), "Daemonset pod count does not match node count")
podsRunningAr := 0
for _, pod := range pods {
if strings.Contains(pod.Name, "test-daemonset") && pod.Status == "Running" && pod.Ready == "1/1" {
podsRunningAr++
}
}
g.Expect(len(nodes)).Should((Equal(podsRunningAr)), "Daemonset pods are not running after the restart")
}, "620s", "5s").Should(Succeed())
})
})

Context("Valdiate Certificate Rotation", func() {
It("Stops K3s and rotates certificates", func() {
errStop := e2e.StopCluster(serverNodeNames)
Expect(errStop).NotTo(HaveOccurred(), "Cluster could not be stoped successfully")

for _, nodeName := range serverNodeNames {
cmd := "sudo k3s certificate rotate"
if _, err := e2e.RunCmdOnNode(cmd, nodeName); err != nil {
Expect(err).NotTo(HaveOccurred(), "Certificate could not be rotated successfully")
}
}
})

It("Start normally", func() {
// Since we stopped all the server, we have to start 2 at once to get it back up
// If we only start one at a time, the first will hang waiting for the second to be up
_, err := e2e.RunCmdOnNode("sudo systemctl --no-block start k3s", serverNodeNames[0])
Expect(err).NotTo(HaveOccurred())
err = e2e.StartCluster(serverNodeNames[1:])
Expect(err).NotTo(HaveOccurred(), "Cluster could not be started successfully")

Eventually(func(g Gomega) {
nodes, err := e2e.ParseNodes(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, node := range nodes {
g.Expect(node.Status).Should(Equal("Ready"))
}
fmt.Println("help")
}, "620s", "5s").Should(Succeed())

Eventually(func(g Gomega) {
pods, err := e2e.ParsePods(kubeConfigFile, false)
g.Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
if strings.Contains(pod.Name, "helm-install") {
g.Expect(pod.Status).Should(Equal("Completed"), pod.Name)
} else {
g.Expect(pod.Status).Should(Equal("Running"), pod.Name)
}
}
}, "620s", "5s").Should(Succeed())
})
It("Validates certificates", func() {
const grepCert = "sudo ls -lt /var/lib/rancher/k3s/server/ | grep tls"
var expectResult = []string{"client-ca.crt",
"client-ca.key",
"client-ca.nochain.crt",
"dynamic-cert.json", "peer-ca.crt",
"peer-ca.key", "server-ca.crt",
"server-ca.key", "request-header-ca.crt",
"request-header-ca.key", "server-ca.crt",
"server-ca.key", "server-ca.nochain.crt",
"service.current.key", "service.key",
"apiserver-loopback-client__.crt",
"apiserver-loopback-client__.key", "",
}

var finalResult string
var finalErr error
for _, nodeName := range serverNodeNames {
grCert, errGrep := e2e.RunCmdOnNode(grepCert, nodeName)
Expect(errGrep).NotTo(HaveOccurred(), "Certificate could not be created successfully")
re := regexp.MustCompile("tls-[0-9]+")
tls := re.FindAllString(grCert, -1)[0]
final := fmt.Sprintf("sudo diff -sr /var/lib/rancher/k3s/server/tls/ /var/lib/rancher/k3s/server/%s/"+
"| grep -i identical | cut -f4 -d ' ' | xargs basename -a \n", tls)
finalResult, finalErr = e2e.RunCmdOnNode(final, nodeName)
Expect(finalErr).NotTo(HaveOccurred(), "Final Certification does not created successfully")
}
errRestartAgent := e2e.RestartCluster(agentNodeNames)
Expect(errRestartAgent).NotTo(HaveOccurred(), "Agent could not be restart successfully")

finalCert := strings.Replace(finalResult, "\n", ",", -1)
finalCertArray := strings.Split(finalCert, ",")
Expect((finalCertArray)).Should((Equal(expectResult)), "Final certification does not match the expected results")

})

})
})

var failed bool
Expand Down

0 comments on commit dd1bf5b

Please sign in to comment.