Skip to content

Commit

Permalink
Fix preemption within cohort when there is no borrowingLimit (#1561)
Browse files Browse the repository at this point in the history
* Fix the borrowing while preemption when no borrowingLimit

* improve readability
  • Loading branch information
mimowo authored Jan 9, 2024
1 parent 1664221 commit a7ad78d
Show file tree
Hide file tree
Showing 2 changed files with 69 additions and 11 deletions.
25 changes: 14 additions & 11 deletions pkg/scheduler/preemption/preemption.go
Original file line number Diff line number Diff line change
Expand Up @@ -361,18 +361,21 @@ func workloadFits(wlReq cache.FlavorResourceQuantities, cq *cache.ClusterQueue,
cohortResRequestable = cq.Cohort.RequestableResources[flvQuotas.Name]
}
for rName, rReq := range flvReq {
limit := flvQuotas.Resources[rName].Nominal

// No need to check whether cohort is nil here because when borrowingLimit
// is non nil, cohort is always non nil.
if allowBorrowing && flvQuotas.Resources[rName].BorrowingLimit != nil {
limit += *flvQuotas.Resources[rName].BorrowingLimit
resource := flvQuotas.Resources[rName]

if cq.Cohort == nil || !allowBorrowing {
if cqResUsage[rName]+rReq > resource.Nominal {
return false
}
} else {
// When resource.BorrowingLimit == nil there is no borrowing
// limit, so we can skip the check.
if resource.BorrowingLimit != nil {
if cqResUsage[rName]+rReq > resource.Nominal+*resource.BorrowingLimit {
return false
}
}
}

if cqResUsage[rName]+rReq > limit {
return false
}

if cq.Cohort != nil && cohortResUsage[rName]+rReq > cohortResRequestable[rName] {
return false
}
Expand Down
55 changes: 55 additions & 0 deletions pkg/scheduler/preemption/preemption_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,30 @@ func TestPreemption(t *testing.T) {
ReclaimWithinCohort: kueue.PreemptionPolicyAny,
}).
Obj(),
utiltesting.MakeClusterQueue("d1").
Cohort("cohort-no-limits").
ResourceGroup(*utiltesting.MakeFlavorQuotas("default").
Resource(corev1.ResourceCPU, "6").
Resource(corev1.ResourceMemory, "3Gi").
Obj(),
).
Preemption(kueue.ClusterQueuePreemption{
WithinClusterQueue: kueue.PreemptionPolicyLowerPriority,
ReclaimWithinCohort: kueue.PreemptionPolicyLowerPriority,
}).
Obj(),
utiltesting.MakeClusterQueue("d2").
Cohort("cohort-no-limits").
ResourceGroup(*utiltesting.MakeFlavorQuotas("default").
Resource(corev1.ResourceCPU, "6").
Resource(corev1.ResourceMemory, "3Gi").
Obj(),
).
Preemption(kueue.ClusterQueuePreemption{
WithinClusterQueue: kueue.PreemptionPolicyNever,
ReclaimWithinCohort: kueue.PreemptionPolicyAny,
}).
Obj(),
utiltesting.MakeClusterQueue("l1").
Cohort("legion").
ResourceGroup(*utiltesting.MakeFlavorQuotas("default").
Expand Down Expand Up @@ -540,6 +564,37 @@ func TestPreemption(t *testing.T) {
}),
wantPreempted: sets.New("/c1-low"),
},
"preempting locally and borrowing same resource in cohort; no borrowing limit in the cohort": {
admitted: []kueue.Workload{
*utiltesting.MakeWorkload("d1-med", "").
Priority(0).
Request(corev1.ResourceCPU, "4").
ReserveQuota(utiltesting.MakeAdmission("d1").Assignment(corev1.ResourceCPU, "default", "4").Obj()).
Obj(),
*utiltesting.MakeWorkload("d1-low", "").
Priority(-1).
Request(corev1.ResourceCPU, "4").
ReserveQuota(utiltesting.MakeAdmission("d1").Assignment(corev1.ResourceCPU, "default", "4").Obj()).
Obj(),
*utiltesting.MakeWorkload("d2-low-1", "").
Priority(-1).
Request(corev1.ResourceCPU, "4").
ReserveQuota(utiltesting.MakeAdmission("d2").Assignment(corev1.ResourceCPU, "default", "4").Obj()).
Obj(),
},
incoming: utiltesting.MakeWorkload("in", "").
Priority(1).
Request(corev1.ResourceCPU, "4").
Obj(),
targetCQ: "d1",
assignment: singlePodSetAssignment(flavorassigner.ResourceAssignment{
corev1.ResourceCPU: &flavorassigner.FlavorAssignment{
Name: "default",
Mode: flavorassigner.Preempt,
},
}),
wantPreempted: sets.New("/d1-low"),
},
"preempting locally and borrowing other resources in cohort, with cohort candidates": {
admitted: []kueue.Workload{
*utiltesting.MakeWorkload("c1-med", "").
Expand Down

0 comments on commit a7ad78d

Please sign in to comment.