diff --git a/pkg/webhooks/jobset_webhook.go b/pkg/webhooks/jobset_webhook.go index f66395901..3cdf589f0 100644 --- a/pkg/webhooks/jobset_webhook.go +++ b/pkg/webhooks/jobset_webhook.go @@ -314,9 +314,9 @@ func (j *jobSetWebhook) ValidateUpdate(ctx context.Context, old, newObj runtime. } mungedSpec := js.Spec.DeepCopy() - // Allow pod template to be mutated for suspended JobSets. + // Allow pod template to be mutated for suspended JobSets, or JobSets getting suspended. // This is needed for integration with Kueue/DWS. - if ptr.Deref(oldJS.Spec.Suspend, false) { + if ptr.Deref(oldJS.Spec.Suspend, false) || ptr.Deref(js.Spec.Suspend, false) { for index := range js.Spec.ReplicatedJobs { // Pod values which must be mutable for Kueue are defined here: https://github.com/kubernetes-sigs/kueue/blob/a50d395c36a2cb3965be5232162cf1fded1bdb08/apis/kueue/v1beta1/workload_types.go#L256-L260 mungedSpec.ReplicatedJobs[index].Template.Spec.Template.Annotations = oldJS.Spec.ReplicatedJobs[index].Template.Spec.Template.Annotations diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index 8c713a721..4bf83cfb3 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -18,12 +18,14 @@ package e2e import ( "context" "fmt" + "time" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" jobset "sigs.k8s.io/jobset/api/jobset/v1alpha2" "sigs.k8s.io/jobset/pkg/util/testing" @@ -131,6 +133,159 @@ var _ = ginkgo.Describe("JobSet", func() { }) }) + ginkgo.When("job is unsuspended and suspend", func() { + ginkgo.It("should not create Jobs while suspended, and delete Jobs on suspend", func() { + ctx := context.Background() + js := shortSleepTestJobSet(ns).Obj() + jsKey := types.NamespacedName{Name: js.Name, Namespace: js.Namespace} + + ginkgo.By("Create a suspended JobSet", func() { + js.Spec.Suspend = ptr.To(true) + js.Spec.TTLSecondsAfterFinished = ptr.To[int32](5) + gomega.Expect(k8sClient.Create(ctx, js)).Should(gomega.Succeed()) + }) + + ginkgo.By("Verify Jobs aren't created", func() { + gomega.Consistently(func() int32 { + gomega.Expect(k8sClient.Get(ctx, jsKey, js)).Should(gomega.Succeed()) + if js.Status.ReplicatedJobsStatus == nil { + return 0 + } + return js.Status.ReplicatedJobsStatus[0].Active + }).WithTimeout(time.Second).WithPolling(200 * time.Millisecond).Should(gomega.Equal(int32(0))) + }) + + ginkgo.By("Unsuspend the JobSet setting schedulingGates that prevent pods from being scheduled", func() { + gomega.Eventually(func() error { + gomega.Expect(k8sClient.Get(ctx, jsKey, js)).Should(gomega.Succeed()) + js.Spec.Suspend = ptr.To(false) + podTemplate := &js.Spec.ReplicatedJobs[0].Template.Spec.Template + podTemplate.Spec.SchedulingGates = []corev1.PodSchedulingGate{ + { + Name: "example.com/gate", + }, + } + return k8sClient.Update(ctx, js) + }, timeout, interval).Should(gomega.Succeed()) + }) + + ginkgo.By("Await for all Jobs to be created", func() { + gomega.Eventually(func() int32 { + gomega.Expect(k8sClient.Get(ctx, jsKey, js)).Should(gomega.Succeed()) + if js.Status.ReplicatedJobsStatus == nil { + return 0 + } + return js.Status.ReplicatedJobsStatus[0].Active + }, timeout, interval).Should(gomega.Equal(js.Spec.ReplicatedJobs[0].Replicas)) + }) + + ginkgo.By("Suspend the JobSet restoring the PodTemplate properties", func() { + gomega.Eventually(func() error { + gomega.Expect(k8sClient.Get(ctx, jsKey, js)).Should(gomega.Succeed()) + js.Spec.Suspend = ptr.To(true) + podTemplate := &js.Spec.ReplicatedJobs[0].Template.Spec.Template + delete(podTemplate.Spec.NodeSelector, "kubernetes.io/hostname") + delete(podTemplate.Labels, "custom-label-key") + delete(podTemplate.Annotations, "custom-annotation-key") + podTemplate.Spec.SchedulingGates = nil + return k8sClient.Update(ctx, js) + }, timeout, interval).Should(gomega.Succeed()) + }) + + ginkgo.By("Await for all Jobs to be deleted", func() { + gomega.Eventually(func() int32 { + gomega.Expect(k8sClient.Get(ctx, jsKey, js)).Should(gomega.Succeed()) + return js.Status.ReplicatedJobsStatus[0].Active + }, timeout, interval).Should(gomega.Equal(int32(0))) + }) + + ginkgo.By("Unsuspending the JobSet again with PodTemplate allowing completion", func() { + gomega.Eventually(func() error { + gomega.Expect(k8sClient.Get(ctx, jsKey, js)).Should(gomega.Succeed()) + js.Spec.Suspend = ptr.To(false) + return k8sClient.Update(ctx, js) + }, timeout, interval).Should(gomega.Succeed()) + }) + + ginkgo.By("Await for the JobSet to complete successfully", func() { + util.JobSetCompleted(ctx, k8sClient, js, timeout) + }) + }) + + ginkgo.It("should allow to quickly update PodTemplate on unsuspend and restore the PodTemplate on suspend", func() { + ctx := context.Background() + js := shortSleepTestJobSet(ns).Obj() + jsKey := types.NamespacedName{Name: js.Name, Namespace: js.Namespace} + + ginkgo.By("Create a suspended JobSet", func() { + js.Spec.Suspend = ptr.To(true) + js.Spec.TTLSecondsAfterFinished = ptr.To[int32](5) + gomega.Expect(k8sClient.Create(ctx, js)).Should(gomega.Succeed()) + }) + + ginkgo.By("Unsuspend the JobSet setting nodeSelectors that prevent pods from being scheduled", func() { + gomega.Eventually(func() error { + gomega.Expect(k8sClient.Get(ctx, jsKey, js)).Should(gomega.Succeed()) + js.Spec.Suspend = ptr.To(false) + podTemplate := &js.Spec.ReplicatedJobs[0].Template.Spec.Template + if podTemplate.Spec.NodeSelector == nil { + podTemplate.Spec.NodeSelector = make(map[string]string) + } + podTemplate.Spec.NodeSelector["kubernetes.io/hostname"] = "non-existing-node" + if podTemplate.Labels == nil { + podTemplate.Labels = make(map[string]string) + } + podTemplate.Labels["custom-label-key"] = "custom-label-value" + if podTemplate.Annotations == nil { + podTemplate.Annotations = make(map[string]string) + } + podTemplate.Annotations["custom-annotation-key"] = "custom-annotation-value" + podTemplate.Spec.SchedulingGates = []corev1.PodSchedulingGate{ + { + Name: "example.com/gate", + }, + } + return k8sClient.Update(ctx, js) + }, timeout, interval).Should(gomega.Succeed()) + }) + + ginkgo.By("Await for at least one active Job to make sure there are some running Pods", func() { + gomega.Eventually(func() int32 { + gomega.Expect(k8sClient.Get(ctx, jsKey, js)).Should(gomega.Succeed()) + if js.Status.ReplicatedJobsStatus == nil { + return 0 + } + return js.Status.ReplicatedJobsStatus[0].Active + }, timeout, interval).Should(gomega.BeNumerically(">=", 1)) + }) + + ginkgo.By("Suspend the JobSet restoring the PodTemplate properties", func() { + gomega.Eventually(func() error { + gomega.Expect(k8sClient.Get(ctx, jsKey, js)).Should(gomega.Succeed()) + js.Spec.Suspend = ptr.To(true) + podTemplate := &js.Spec.ReplicatedJobs[0].Template.Spec.Template + delete(podTemplate.Spec.NodeSelector, "kubernetes.io/hostname") + delete(podTemplate.Labels, "custom-label-key") + delete(podTemplate.Annotations, "custom-annotation-key") + podTemplate.Spec.SchedulingGates = nil + return k8sClient.Update(ctx, js) + }, timeout, interval).Should(gomega.Succeed()) + }) + + ginkgo.By("Unsuspending the JobSet again with PodTemplate allowing completion", func() { + gomega.Eventually(func() error { + gomega.Expect(k8sClient.Get(ctx, jsKey, js)).Should(gomega.Succeed()) + js.Spec.Suspend = ptr.To(false) + return k8sClient.Update(ctx, js) + }, timeout, interval).Should(gomega.Succeed()) + }) + + ginkgo.By("Await for the JobSet to complete successfully", func() { + util.JobSetCompleted(ctx, k8sClient, js, timeout) + }) + }) + }) + }) // end of Describe // getPingCommand returns ping command for 4 hostnames @@ -144,14 +299,14 @@ do gotStatus="-1" wantStatus="0" while [ $gotStatus -ne $wantStatus ] - do + do ping -c 1 $pod > /dev/null 2>&1 - gotStatus=$? + gotStatus=$? if [ $gotStatus -ne $wantStatus ]; then echo "Failed to ping pod $pod, retrying in 1 second..." sleep 1 fi - done + done echo "Successfully pinged pod: $pod" done sleep 30`, hostnames[0], hostnames[1], hostnames[2], hostnames[3]) @@ -246,3 +401,25 @@ func sleepTestJobSet(ns *corev1.Namespace) *testing.JobSetWrapper { Replicas(int32(replicas)). Obj()) } + +func shortSleepTestJobSet(ns *corev1.Namespace) *testing.JobSetWrapper { + jsName := "js" + rjobName := "rjob" + replicas := 3 + return testing.MakeJobSet(jsName, ns.Name). + ReplicatedJob(testing.MakeReplicatedJob(rjobName). + Job(testing.MakeJobTemplate("job", ns.Name). + PodSpec(corev1.PodSpec{ + RestartPolicy: "Never", + Containers: []corev1.Container{ + { + Name: "short-sleep-test-container", + Image: "bash:latest", + Command: []string{"bash", "-c"}, + Args: []string{"sleep 1"}, + }, + }, + }).Obj()). + Replicas(int32(replicas)). + Obj()) +}