Skip to content

Commit

Permalink
fix: record provisioner requirements as valid topology domains
Browse files Browse the repository at this point in the history
This allows spreading across labels constructed via provisioner
requirements
  • Loading branch information
tzneal committed Jul 22, 2022
1 parent 8ab2b20 commit ea9f0fd
Show file tree
Hide file tree
Showing 2 changed files with 60 additions and 0 deletions.
6 changes: 6 additions & 0 deletions pkg/controllers/provisioning/provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -199,12 +199,18 @@ func (p *Provisioner) schedule(ctx context.Context, pods []*v1.Pod) ([]*schedule
return nil, fmt.Errorf("getting instance types, %w", err)
}
instanceTypes[provisioner.Name] = append(instanceTypes[provisioner.Name], instanceTypeOptions...)

// Construct Topology Domains
for _, instanceType := range instanceTypeOptions {
for key, requirement := range instanceType.Requirements() {
domains[key] = domains[key].Union(requirement.Values())
}
}
for key, requirement := range scheduling.NewNodeSelectorRequirements(provisioner.Spec.Requirements...) {
if requirement.Type() == v1.NodeSelectorOpIn {
domains[key] = domains[key].Union(requirement.Values())
}
}
}
if len(nodeTemplates) == 0 {
return nil, fmt.Errorf("no provisioners found")
Expand Down
54 changes: 54 additions & 0 deletions pkg/controllers/provisioning/scheduling/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1431,6 +1431,60 @@ var _ = Describe("Topology", func() {
ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(7, 7, 7))
ExpectSkew(ctx, env.Client, "default", &topology[1]).ToNot(ContainElements(BeNumerically(">", 3)))
})
It("should balance pods across provisioner requirements", func() {
spotProv := test.Provisioner(test.ProvisionerOptions{
Requirements: []v1.NodeSelectorRequirement{
{
Key: v1alpha5.LabelCapacityType,
Operator: v1.NodeSelectorOpIn,
Values: []string{"spot"},
},
{
Key: "capacity.spread.4-1",
Operator: v1.NodeSelectorOpIn,
Values: []string{"2", "3", "4", "5"},
},
},
})
onDemandProv := test.Provisioner(test.ProvisionerOptions{
Requirements: []v1.NodeSelectorRequirement{
{
Key: v1alpha5.LabelCapacityType,
Operator: v1.NodeSelectorOpIn,
Values: []string{"on-demand"},
},
{
Key: "capacity.spread.4-1",
Operator: v1.NodeSelectorOpIn,
Values: []string{"1"},
},
},
})

topology := []v1.TopologySpreadConstraint{{
TopologyKey: "capacity.spread.4-1",
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}}
ExpectApplied(ctx, env.Client, spotProv, onDemandProv)
pods := ExpectProvisioned(ctx, env.Client, controller, MakePods(20, test.PodOptions{
ObjectMeta: metav1.ObjectMeta{Labels: labels},
TopologySpreadConstraints: topology,
})...)
for _, p := range pods {
ExpectScheduled(ctx, env.Client, p)
}

ExpectSkew(ctx, env.Client, "default", &topology[0]).To(ConsistOf(4, 4, 4, 4, 4))
// due to the spread across provisioners, we've forced a 4:1 spot to on-demand spread
ExpectSkew(ctx, env.Client, "default", &v1.TopologySpreadConstraint{
TopologyKey: v1alpha5.LabelCapacityType,
WhenUnsatisfiable: v1.DoNotSchedule,
LabelSelector: &metav1.LabelSelector{MatchLabels: labels},
MaxSkew: 1,
}).To(ConsistOf(4, 16))
})
})

Context("Combined Hostname and Capacity Type Topology", func() {
Expand Down

0 comments on commit ea9f0fd

Please sign in to comment.