Skip to content

Commit

Permalink
Add sidecar ut and preflter
Browse files Browse the repository at this point in the history
Signed-off-by: Monokaix <[email protected]>
  • Loading branch information
Monokaix committed Sep 3, 2024
1 parent 2c62ef5 commit 16cb70c
Show file tree
Hide file tree
Showing 5 changed files with 264 additions and 69 deletions.
7 changes: 2 additions & 5 deletions pkg/features/volcano_features.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,6 @@ const (

// ResourceTopology supports resources like cpu/memory topology aware.
ResourceTopology featuregate.Feature = "ResourceTopology"

SidecarContainers featuregate.Feature = "SidecarContainers"
)

func init() {
Expand All @@ -58,7 +56,6 @@ var defaultVolcanoFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec
QueueCommandSync: {Default: true, PreRelease: featuregate.Alpha},
PriorityClass: {Default: true, PreRelease: featuregate.Alpha},
// CSIStorage is explicitly set to false by default.
CSIStorage: {Default: false, PreRelease: featuregate.Alpha},
ResourceTopology: {Default: true, PreRelease: featuregate.Alpha},
SidecarContainers: {Default: false, PreRelease: featuregate.Alpha},
CSIStorage: {Default: false, PreRelease: featuregate.Alpha},
ResourceTopology: {Default: true, PreRelease: featuregate.Alpha},
}
78 changes: 46 additions & 32 deletions pkg/scheduler/api/job_info.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,10 +119,11 @@ type TaskInfo struct {
// LastTransaction holds the context of last scheduling transaction
LastTransaction *TransactionContext

Priority int32
VolumeReady bool
Preemptable bool
BestEffort bool
Priority int32
VolumeReady bool
Preemptable bool
BestEffort bool
HasRestartableInitContainer bool

// RevocableZone supports setting volcano.sh/revocable-zone annotation or label for pod/podgroup
// we only support empty value or * value for this version and we will support specify revocable zone name for future releases
Expand Down Expand Up @@ -176,23 +177,25 @@ func NewTaskInfo(pod *v1.Pod) *TaskInfo {
revocableZone := GetPodRevocableZone(pod)
topologyInfo := GetPodTopologyInfo(pod)
role := getTaskRole(pod)
hasRestartableInitContainer := hasRestartableInitContainer(pod)

jobID := getJobID(pod)

ti := &TaskInfo{
UID: TaskID(pod.UID),
Job: jobID,
Name: pod.Name,
Namespace: pod.Namespace,
TaskRole: role,
Priority: 1,
Pod: pod,
Resreq: resReq,
InitResreq: initResReq,
Preemptable: preemptable,
BestEffort: bestEffort,
RevocableZone: revocableZone,
NumaInfo: topologyInfo,
UID: TaskID(pod.UID),
Job: jobID,
Name: pod.Name,
Namespace: pod.Namespace,
TaskRole: role,
Priority: 1,
Pod: pod,
Resreq: resReq,
InitResreq: initResReq,
Preemptable: preemptable,
BestEffort: bestEffort,
HasRestartableInitContainer: hasRestartableInitContainer,
RevocableZone: revocableZone,
NumaInfo: topologyInfo,
TransactionContext: TransactionContext{
NodeName: pod.Spec.NodeName,
Status: getTaskStatus(pod),
Expand Down Expand Up @@ -254,21 +257,22 @@ func (ti *TaskInfo) UnsetPodResourceDecision() {
// Clone is used for cloning a task
func (ti *TaskInfo) Clone() *TaskInfo {
return &TaskInfo{
UID: ti.UID,
Job: ti.Job,
Name: ti.Name,
Namespace: ti.Namespace,
TaskRole: ti.TaskRole,
Priority: ti.Priority,
PodVolumes: ti.PodVolumes,
Pod: ti.Pod,
Resreq: ti.Resreq.Clone(),
InitResreq: ti.InitResreq.Clone(),
VolumeReady: ti.VolumeReady,
Preemptable: ti.Preemptable,
BestEffort: ti.BestEffort,
RevocableZone: ti.RevocableZone,
NumaInfo: ti.NumaInfo.Clone(),
UID: ti.UID,
Job: ti.Job,
Name: ti.Name,
Namespace: ti.Namespace,
TaskRole: ti.TaskRole,
Priority: ti.Priority,
PodVolumes: ti.PodVolumes,
Pod: ti.Pod,
Resreq: ti.Resreq.Clone(),
InitResreq: ti.InitResreq.Clone(),
VolumeReady: ti.VolumeReady,
Preemptable: ti.Preemptable,
BestEffort: ti.BestEffort,
HasRestartableInitContainer: ti.HasRestartableInitContainer,
RevocableZone: ti.RevocableZone,
NumaInfo: ti.NumaInfo.Clone(),
TransactionContext: TransactionContext{
NodeName: ti.NodeName,
Status: ti.Status,
Expand All @@ -277,6 +281,16 @@ func (ti *TaskInfo) Clone() *TaskInfo {
}
}

// hasRestartableInitContainer returns whether pod has restartable container.
func hasRestartableInitContainer(pod *v1.Pod) bool {
for _, c := range pod.Spec.InitContainers {
if c.RestartPolicy != nil && *c.RestartPolicy == v1.ContainerRestartPolicyAlways {
return true
}
}
return false
}

// String returns the taskInfo details in a string
func (ti TaskInfo) String() string {
res := fmt.Sprintf("Task (%v:%v/%v): taskSpec %s, job %v, status %v, pri %v, "+
Expand Down
46 changes: 14 additions & 32 deletions pkg/scheduler/api/pod_info.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import (
"volcano.sh/apis/pkg/apis/scheduling/v1beta1"
)

// Refer k8s.io/kubernetes/pkg/scheduler/algorithm/predicates/predicates.go#GetResourceRequest.
// Refer k8s.io/kubernetes/pkg/api/v1/resource/helpers.go#PodRequests.
//
// GetResourceRequest returns a *Resource that covers the largest width in each resource dimension.
// Because init-containers run sequentially, we collect the max in each dimension iteratively.
Expand Down Expand Up @@ -57,30 +57,30 @@ import (

// GetPodResourceRequest returns all the resource required for that pod
func GetPodResourceRequest(pod *v1.Pod) *Resource {
req := v1.ResourceList{}
restartableInitContainerReqs := v1.ResourceList{}
initContainerReqs := v1.ResourceList{}

result := GetPodResourceWithoutInitContainers(pod)

restartableInitContainerReqs := EmptyResource()
initContainerReqs := EmptyResource()
for _, container := range pod.Spec.InitContainers {
containerReq := container.Resources.Requests
containerReq := NewResource(container.Resources.Requests)

if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
// and add them to the resulting cumulative container requests
addResourceList(req, containerReq)
// Add the restartable container's req to the resulting cumulative container requests.
result.Add(containerReq)

// track our cumulative restartable init container resources
addResourceList(restartableInitContainerReqs, containerReq)
// Track our cumulative restartable init container resources
restartableInitContainerReqs.Add(containerReq)
containerReq = restartableInitContainerReqs
} else {
tmp := v1.ResourceList{}
addResourceList(tmp, containerReq)
addResourceList(tmp, restartableInitContainerReqs)
tmp := EmptyResource()
tmp.Add(containerReq)
tmp.Add(restartableInitContainerReqs)
containerReq = tmp
}
maxResourceList(initContainerReqs, containerReq)
initContainerReqs.SetMaxResource(containerReq)
}

result.SetMaxResource(initContainerReqs)
result.AddScalar(v1.ResourcePods, 1)

return result
Expand Down Expand Up @@ -172,21 +172,3 @@ func GetPodResourceWithoutInitContainers(pod *v1.Pod) *Resource {

return result
}
func addResourceList(list, newList v1.ResourceList) {
for name, quantity := range newList {
if value, ok := list[name]; !ok {
list[name] = quantity.DeepCopy()
} else {
value.Add(quantity)
list[name] = value
}
}
}

func maxResourceList(list, newList v1.ResourceList) {
for name, quantity := range newList {
if value, ok := list[name]; !ok || quantity.Cmp(value) > 0 {
list[name] = quantity.DeepCopy()
}
}
}
Loading

0 comments on commit 16cb70c

Please sign in to comment.