Skip to content

Commit

Permalink
tests: wait for new managers to become available
Browse files Browse the repository at this point in the history
In the current code, setRange and deleteLeaderManager functions
are not waiting for the old pods to disappear and new ones to become
available. Due to that, tests using this functions don't have any
webhook available or worse, they are being served by the old one.

This issue was not exposed due to Eventually we have around all the
Create calls.

This commit makes sure the old pods are removed but since the readiness
probe proposed in
k8snetworkplumbingwg#84
is not merged yet, a sleep is used here.

Signed-off-by: Petr Horacek <[email protected]>
Signed-off-by: Alona Kaplan <[email protected]>
  • Loading branch information
phoracek authored and AlonaKaplan committed Jan 12, 2020
1 parent c4504ef commit b792a2c
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 11 deletions.
38 changes: 28 additions & 10 deletions tests/tests.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,42 +123,57 @@ func setRange(rangeStart, rangeEnd string) error {
return err
}

podsList, err := testClient.KubeClient.CoreV1().Pods(ManagerNamespce).List(metav1.ListOptions{})
oldPods, err := testClient.KubeClient.CoreV1().Pods(ManagerNamespce).List(metav1.ListOptions{})
if err != nil {
return err
}

for _, pod := range podsList.Items {
for _, pod := range oldPods.Items {
err = testClient.KubeClient.CoreV1().Pods(ManagerNamespce).Delete(pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds})
if err != nil {
return err
}
}

deployment, err := testClient.KubeClient.AppsV1().Deployments(ManagerNamespce).Get(names.MANAGER_DEPLOYMENT, metav1.GetOptions{})
if err != nil {
return err
}

Eventually(func() error {
podsList, err = testClient.KubeClient.CoreV1().Pods(ManagerNamespce).List(metav1.ListOptions{})
currentPods, err := testClient.KubeClient.CoreV1().Pods(ManagerNamespce).List(metav1.ListOptions{})
if err != nil {
return err
}

if len(podsList.Items) != 2 {
return fmt.Errorf("should have two manager pods")
for _, currentPod := range currentPods.Items {
for _, oldPod := range oldPods.Items {
if currentPod.Name == oldPod.Name {
return fmt.Errorf("old pod %s has not yet been removed", oldPod.Name)
}
}
}

if len(currentPods.Items) != int(deployment.Status.Replicas) {
return fmt.Errorf("should have %v manager pods", deployment.Status.Replicas)
}

for _, pod := range podsList.Items {
if pod.Status.Phase != corev1.PodRunning {
return fmt.Errorf("manager pod not ready")
for _, currentPod := range currentPods.Items {
if currentPod.Status.Phase != corev1.PodRunning {
return fmt.Errorf("manager pod not running")
}
}
// This sleep is temporary and horrible until we have a readiness probe - https://github.com/k8snetworkplumbingwg/kubemacpool/pull/84/
time.Sleep(40 * time.Second)

return nil

}, 30*time.Second, 3*time.Second).Should(Not(HaveOccurred()), "failed to get kubemacpool manager pod")
}, 2*time.Minute, 3*time.Second).Should(Not(HaveOccurred()), "failed to start new set of manager pods within the given timeout")

return nil
}

func DeleteLeaderManager() {
func deleteLeaderManager() {
pods, err := testClient.KubeClient.CoreV1().Pods(ManagerNamespce).List(metav1.ListOptions{})
Expect(err).ToNot(HaveOccurred())

Expand All @@ -183,6 +198,9 @@ func DeleteLeaderManager() {

return false
}, 30*time.Second, 3*time.Second).Should(BeTrue(), "failed to delete kubemacpool leader pod")

// This sleep is temporary and horrible until we have a readiness probe - https://github.com/k8snetworkplumbingwg/kubemacpool/pull/84/
time.Sleep(40 * time.Second)
}

func BeforeAll(fn func()) {
Expand Down
2 changes: 1 addition & 1 deletion tests/virtual_machines_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -478,7 +478,7 @@ var _ = Describe("Virtual Machines", func() {
Expect(err).ToNot(HaveOccurred())

By("deleting leader manager")
DeleteLeaderManager()
deleteLeaderManager()

Eventually(func() error {
return testClient.VirtClient.Create(context.TODO(), anotherVm)
Expand Down

0 comments on commit b792a2c

Please sign in to comment.