Skip to content

Commit

Permalink
Add support for vGPU
Browse files Browse the repository at this point in the history
  • Loading branch information
haijianyang committed Nov 6, 2023
1 parent b1f986d commit c34d6dc
Show file tree
Hide file tree
Showing 16 changed files with 988 additions and 207 deletions.
10 changes: 9 additions & 1 deletion api/v1beta1/elfmachine_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -318,8 +318,16 @@ func (m *ElfMachine) GetVMDisconnectionTimestamp() *metav1.Time {
return nil
}

func (m *ElfMachine) RequiresGPUOrVGPUDevices() bool {
return m.RequiresGPUDevices() || m.RequiresVGPUDevices()
}

func (m *ElfMachine) RequiresGPUDevices() bool {
return len(m.Spec.GPUDevices) > 0 || len(m.Spec.VGPUDevices) > 0
return len(m.Spec.GPUDevices) > 0
}

func (m *ElfMachine) RequiresVGPUDevices() bool {
return len(m.Spec.VGPUDevices) > 0
}

//+kubebuilder:object:root=true
Expand Down
16 changes: 8 additions & 8 deletions controllers/elfmachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@ func (r *ElfMachineReconciler) reconcileDelete(ctx *context.MachineContext) (rec
// locked by the virtual machine may not be unlocked.
// For example, the Cluster or ElfMachine was deleted during a pause.
if !ctrlutil.ContainsFinalizer(ctx.ElfMachine, infrav1.MachineFinalizer) &&
ctx.ElfMachine.RequiresGPUDevices() {
ctx.ElfMachine.RequiresGPUOrVGPUDevices() {
unlockGPUDevicesLockedByVM(ctx.ElfCluster.Spec.Cluster, ctx.ElfMachine.Name)
}
}()
Expand Down Expand Up @@ -532,23 +532,23 @@ func (r *ElfMachineReconciler) reconcileVM(ctx *context.MachineContext) (*models
}

var hostID *string
var gpuDevices []*models.GpuDevice
var gpuDeviceInfos []*service.GPUDeviceInfo
// The virtual machine of the Control Plane does not support GPU Devices.
if machineutil.IsControlPlaneMachine(ctx.Machine) {
hostID, err = r.preCheckPlacementGroup(ctx)
if err != nil || hostID == nil {
return nil, false, err
}
} else {
hostID, gpuDevices, err = r.selectHostAndGPUsForVM(ctx, "")
hostID, gpuDeviceInfos, err = r.selectHostAndGPUsForVM(ctx, "")
if err != nil || hostID == nil {
return nil, false, err
}
}

ctx.Logger.Info("Create VM for ElfMachine")

withTaskVM, err := ctx.VMService.Clone(ctx.ElfCluster, ctx.ElfMachine, bootstrapData, *hostID, gpuDevices)
withTaskVM, err := ctx.VMService.Clone(ctx.ElfCluster, ctx.ElfMachine, bootstrapData, *hostID, gpuDeviceInfos)
if err != nil {
releaseTicketForCreateVM(ctx.ElfMachine.Name)

Expand All @@ -561,7 +561,7 @@ func (r *ElfMachineReconciler) reconcileVM(ctx *context.MachineContext) (*models
ctx.ElfMachine.SetVM(util.GetVMRef(vm))
} else {
// Duplicate VM error does not require unlocking GPU devices.
if ctx.ElfMachine.RequiresGPUDevices() {
if ctx.ElfMachine.RequiresGPUOrVGPUDevices() {
unlockGPUDevicesLockedByVM(ctx.ElfCluster.Spec.Cluster, ctx.ElfMachine.Name)
}

Expand Down Expand Up @@ -907,11 +907,11 @@ func (r *ElfMachineReconciler) reconcileVMTask(ctx *context.MachineContext, vm *
setVMDuplicate(ctx.ElfMachine.Name)
}

if ctx.ElfMachine.RequiresGPUDevices() {
if ctx.ElfMachine.RequiresGPUOrVGPUDevices() {
unlockGPUDevicesLockedByVM(ctx.ElfCluster.Spec.Cluster, ctx.ElfMachine.Name)
}
case service.IsPowerOnVMTask(task) || service.IsUpdateVMTask(task):
if ctx.ElfMachine.RequiresGPUDevices() {
if ctx.ElfMachine.RequiresGPUOrVGPUDevices() {

Check warning on line 914 in controllers/elfmachine_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/elfmachine_controller.go#L914

Added line #L914 was not covered by tests
unlockGPUDevicesLockedByVM(ctx.ElfCluster.Spec.Cluster, ctx.ElfMachine.Name)
}
case service.IsMemoryInsufficientError(errorMessage):
Expand All @@ -933,7 +933,7 @@ func (r *ElfMachineReconciler) reconcileVMTask(ctx *context.MachineContext, vm *
ctx.Logger.Info("VM task succeeded", "vmRef", vmRef, "taskRef", taskRef, "taskDescription", service.GetTowerString(task.Description))

if service.IsCloneVMTask(task) || service.IsUpdateVMTask(task) {
if ctx.ElfMachine.RequiresGPUDevices() {
if ctx.ElfMachine.RequiresGPUOrVGPUDevices() {

Check warning on line 936 in controllers/elfmachine_controller.go

View check run for this annotation

Codecov / codecov/patch

controllers/elfmachine_controller.go#L936

Added line #L936 was not covered by tests
unlockGPUDevicesLockedByVM(ctx.ElfCluster.Spec.Cluster, ctx.ElfMachine.Name)
}
}
Expand Down
209 changes: 136 additions & 73 deletions controllers/elfmachine_controller_gpu.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,8 @@ import (
// 3. A non-empty string indicates that the specified host ID was returned.
//
// The return gpudevices: the GPU devices for virtual machine.
func (r *ElfMachineReconciler) selectHostAndGPUsForVM(ctx *context.MachineContext, preferredHostID string) (rethost *string, gpudevices []*models.GpuDevice, reterr error) {
if !ctx.ElfMachine.RequiresGPUDevices() {
func (r *ElfMachineReconciler) selectHostAndGPUsForVM(ctx *context.MachineContext, preferredHostID string) (rethost *string, gpudevices []*service.GPUDeviceInfo, reterr error) {
if !ctx.ElfMachine.RequiresGPUOrVGPUDevices() {
return pointer.String(""), nil, nil
}

Expand All @@ -58,12 +58,12 @@ func (r *ElfMachineReconciler) selectHostAndGPUsForVM(ctx *context.MachineContex

// If the GPU devices locked by the virtual machine still exist, use them directly.
if lockedVMGPUs := getGPUDevicesLockedByVM(ctx.ElfCluster.Spec.Cluster, ctx.ElfMachine.Name); lockedVMGPUs != nil {
if ok, gpuDevices, err := r.checkGPUsCanBeUsedForVM(ctx, lockedVMGPUs.GPUDeviceIDs, ctx.ElfMachine.Name); err != nil {
if ok, err := r.checkGPUsCanBeUsedForVM(ctx, lockedVMGPUs.GetGPUIDs()); err != nil {
return nil, nil, err
} else if ok {
ctx.Logger.V(1).Info("Found locked VM GPU devices, so skip allocation", "lockedVMGPUs", lockedVMGPUs)

return &lockedVMGPUs.HostID, gpuDevices, nil
return &lockedVMGPUs.HostID, lockedVMGPUs.GetGPUDeviceInfos(), nil
}

// If the GPU devices returned by Tower is inconsistent with the locked GPU,
Expand All @@ -84,30 +84,46 @@ func (r *ElfMachineReconciler) selectHostAndGPUsForVM(ctx *context.MachineContex
}

// Get all GPU devices of available hosts.
gpuDevices, err := ctx.VMService.FindGPUDevicesByHostIDs(availableHosts.IDs())
gpuDeviceUsage := models.GpuDeviceUsagePASSTHROUGH
if ctx.ElfMachine.RequiresVGPUDevices() {
gpuDeviceUsage = models.GpuDeviceUsageVGPU
}

Check warning on line 90 in controllers/elfmachine_controller_gpu.go

View check run for this annotation

Codecov / codecov/patch

controllers/elfmachine_controller_gpu.go#L89-L90

Added lines #L89 - L90 were not covered by tests
gpuDevices, err := ctx.VMService.FindGPUDevicesByHostIDs(availableHosts.IDs(), gpuDeviceUsage)
if err != nil || len(gpuDevices) == 0 {
return nil, nil, err
}

Check warning on line 94 in controllers/elfmachine_controller_gpu.go

View check run for this annotation

Codecov / codecov/patch

controllers/elfmachine_controller_gpu.go#L93-L94

Added lines #L93 - L94 were not covered by tests

gpuDeviceIDs := make([]string, len(gpuDevices))
for i := 0; i < len(gpuDevices); i++ {
gpuDeviceIDs[i] = *gpuDevices[i].ID
}
// Get GPU devices with VMs and allocation details.
gpuDeviceInfos, err := ctx.VMService.FindGPUDeviceInfos(gpuDeviceIDs)
if err != nil {
return nil, nil, err
}

lockedClusterGPUIDs := getLockedClusterGPUIDs(ctx.ElfCluster.Spec.Cluster)
service.AggregateUnusedGPUDevicesToGPUDeviceInfos(gpuDeviceInfos, gpuDevices)

// Group GPU devices by host.
hostGPUDeviceMap := make(map[string][]*models.GpuDevice)
hostIDSet := sets.NewString()
for i := 0; i < len(gpuDevices); i++ {
// Filter already used or locked GPU devices.
if !service.GPUCanBeUsedForVM(gpuDevices[i], ctx.ElfMachine.Name) ||
lockedClusterGPUIDs.Has(*gpuDevices[i].ID) {
continue
}
// Filter already used GPU devices.
gpuDeviceInfos = gpuDeviceInfos.Filter(func(g *service.GPUDeviceInfo) bool {
return g.AvailableCount > 0
})

hostIDSet.Insert(*gpuDevices[i].Host.ID)
if gpus, ok := hostGPUDeviceMap[*gpuDevices[i].Host.ID]; !ok {
hostGPUDeviceMap[*gpuDevices[i].Host.ID] = []*models.GpuDevice{gpuDevices[i]}
// Filter locked GPU devices.
gpuDeviceInfos = filterGPUDeviceInfosByLockGPUDevices(ctx.ElfCluster.Spec.Cluster, gpuDeviceInfos)

// Group GPU deviceInfos by host.
hostGPUDeviceInfoMap := make(map[string]service.GPUDeviceInfos)
hostIDSet := sets.NewString()
gpuDeviceInfos.Iterate(func(gpuDeviceInfo *service.GPUDeviceInfo) {
hostIDSet.Insert(gpuDeviceInfo.HostID)
if gpuInfos, ok := hostGPUDeviceInfoMap[gpuDeviceInfo.HostID]; !ok {
hostGPUDeviceInfoMap[gpuDeviceInfo.HostID] = service.NewGPUDeviceInfos(gpuDeviceInfo)
} else {
hostGPUDeviceMap[*gpuDevices[i].Host.ID] = append(gpus, gpuDevices[i])
gpuInfos.Insert(gpuDeviceInfo)

Check warning on line 124 in controllers/elfmachine_controller_gpu.go

View check run for this annotation

Codecov / codecov/patch

controllers/elfmachine_controller_gpu.go#L124

Added line #L124 was not covered by tests
}
}
})

// Choose a host that meets ElfMachine GPU needs.
// Use a random host list to reduce the probability of the same host being selected at the same time.
Expand All @@ -122,25 +138,29 @@ func (r *ElfMachineReconciler) selectHostAndGPUsForVM(ctx *context.MachineContex
}

for i := 0; i < len(unsortedHostIDs); i++ {
if hostGPUDevices, ok := hostGPUDeviceMap[unsortedHostIDs[i]]; ok {
selectedGPUDevices := selectGPUDevicesForVM(hostGPUDevices, ctx.ElfMachine.Spec.GPUDevices)
if len(selectedGPUDevices) > 0 {
gpuDeviceIDs := make([]string, len(selectedGPUDevices))
for i := 0; i < len(selectedGPUDevices); i++ {
gpuDeviceIDs[i] = *selectedGPUDevices[i].ID
}

// Lock the selected GPU devices to prevent it from being allocated to multiple virtual machines.
if !lockGPUDevicesForVM(ctx.ElfCluster.Spec.Cluster, ctx.ElfMachine.Name, unsortedHostIDs[i], gpuDeviceIDs) {
// Lock failure indicates that the GPU devices are locked by another virtual machine.
// Just trying other hosts.
continue
}

ctx.Logger.Info("Selected host and GPU devices for VM", "hostId", unsortedHostIDs[i], "gpuDeviceIds", gpuDeviceIDs)

return &unsortedHostIDs[i], selectedGPUDevices, nil
hostGPUDeviceInfos, ok := hostGPUDeviceInfoMap[unsortedHostIDs[i]]
if !ok {
continue

Check warning on line 143 in controllers/elfmachine_controller_gpu.go

View check run for this annotation

Codecov / codecov/patch

controllers/elfmachine_controller_gpu.go#L143

Added line #L143 was not covered by tests
}

var selectedGPUDeviceInfos []*service.GPUDeviceInfo
if ctx.ElfMachine.RequiresGPUDevices() {
selectedGPUDeviceInfos = selectGPUDevicesForVM(hostGPUDeviceInfos, ctx.ElfMachine.Spec.GPUDevices)
} else {
selectedGPUDeviceInfos = selectVGPUDevicesForVM(hostGPUDeviceInfos, ctx.ElfMachine.Spec.VGPUDevices)
}

Check warning on line 151 in controllers/elfmachine_controller_gpu.go

View check run for this annotation

Codecov / codecov/patch

controllers/elfmachine_controller_gpu.go#L150-L151

Added lines #L150 - L151 were not covered by tests

if len(selectedGPUDeviceInfos) > 0 {
// Lock the selected GPU devices to prevent it from being allocated to multiple virtual machines.
if !lockGPUDevicesForVM(ctx.ElfCluster.Spec.Cluster, ctx.ElfMachine.Name, unsortedHostIDs[i], selectedGPUDeviceInfos) {
// Lock failure indicates that the GPU devices are locked by another virtual machine.
// Just trying other hosts.
continue

Check warning on line 158 in controllers/elfmachine_controller_gpu.go

View check run for this annotation

Codecov / codecov/patch

controllers/elfmachine_controller_gpu.go#L156-L158

Added lines #L156 - L158 were not covered by tests
}

ctx.Logger.Info("Selected host and GPU devices for VM", "hostId", unsortedHostIDs[i], "gpuDevices", selectedGPUDeviceInfos)

return &unsortedHostIDs[i], selectedGPUDeviceInfos, nil
}
}

Expand All @@ -149,38 +169,86 @@ func (r *ElfMachineReconciler) selectHostAndGPUsForVM(ctx *context.MachineContex

// selectGPUDevicesForVM selects the GPU devices required by the virtual machine from the host's GPU devices.
// Empty GPU devices indicates that the host's GPU devices cannot meet the GPU requirements of the virtual machine.
func selectGPUDevicesForVM(hostGPUDevices []*models.GpuDevice, requiredGPUDevices []infrav1.GPUPassthroughDeviceSpec) []*models.GpuDevice {
func selectGPUDevicesForVM(hostGPUDeviceInfos service.GPUDeviceInfos, requiredGPUDevices []infrav1.GPUPassthroughDeviceSpec) []*service.GPUDeviceInfo {
// Group GPU devices by model.
modelGPUDeviceMap := make(map[string][]*models.GpuDevice)
for i := 0; i < len(hostGPUDevices); i++ {
if gpus, ok := modelGPUDeviceMap[*hostGPUDevices[i].Model]; !ok {
modelGPUDeviceMap[*hostGPUDevices[i].Model] = []*models.GpuDevice{hostGPUDevices[i]}
modelGPUDeviceMap := make(map[string][]*service.GPUDeviceInfo)
hostGPUDeviceInfos.Iterate(func(gpuDeviceInfo *service.GPUDeviceInfo) {
if gpuInfos, ok := modelGPUDeviceMap[gpuDeviceInfo.Model]; !ok {
modelGPUDeviceMap[gpuDeviceInfo.Model] = []*service.GPUDeviceInfo{gpuDeviceInfo}
} else {
modelGPUDeviceMap[*hostGPUDevices[i].Model] = append(gpus, hostGPUDevices[i])
modelGPUDeviceMap[gpuDeviceInfo.Model] = append(gpuInfos, gpuDeviceInfo)

Check warning on line 179 in controllers/elfmachine_controller_gpu.go

View check run for this annotation

Codecov / codecov/patch

controllers/elfmachine_controller_gpu.go#L179

Added line #L179 was not covered by tests
}
}
})

var selectedGPUDevices []*models.GpuDevice
var selectedGPUDeviceInfos []*service.GPUDeviceInfo
for i := 0; i < len(requiredGPUDevices); i++ {
if gpus, ok := modelGPUDeviceMap[requiredGPUDevices[i].Model]; !ok {
gpuDevices, ok := modelGPUDeviceMap[requiredGPUDevices[i].Model]
if !ok || len(gpuDevices) < int(requiredGPUDevices[i].Count) {
return nil
}

Check warning on line 188 in controllers/elfmachine_controller_gpu.go

View check run for this annotation

Codecov / codecov/patch

controllers/elfmachine_controller_gpu.go#L188

Added line #L188 was not covered by tests

gpuInfos := gpuDevices[:int(requiredGPUDevices[i].Count)]
for j := 0; j < len(gpuInfos); j++ {
selectedGPUDeviceInfos = append(selectedGPUDeviceInfos, &service.GPUDeviceInfo{ID: gpuInfos[j].ID, AllocatedCount: 1, AvailableCount: 1})
}
}

return selectedGPUDeviceInfos
}

// selectVGPUDevicesForVM selects the vGPU devices required by the virtual machine from the host's vGPU devices.
// Empty vGPU devices indicates that the host's vGPU devices cannot meet the vGPU requirements of the virtual machine.
func selectVGPUDevicesForVM(hostGPUDeviceInfos service.GPUDeviceInfos, requiredVGPUDevices []infrav1.VGPUDeviceSpec) []*service.GPUDeviceInfo {
// Group vGPU devices by vGPU type.
typeVGPUDeviceInfoMap := make(map[string][]*service.GPUDeviceInfo)
hostGPUDeviceInfos.Iterate(func(gpuDeviceInfo *service.GPUDeviceInfo) {
if gpuInfos, ok := typeVGPUDeviceInfoMap[gpuDeviceInfo.VGPUType]; !ok {
typeVGPUDeviceInfoMap[gpuDeviceInfo.VGPUType] = []*service.GPUDeviceInfo{gpuDeviceInfo}
} else {
if len(gpus) < int(requiredGPUDevices[i].Count) {
return nil
typeVGPUDeviceInfoMap[gpuDeviceInfo.VGPUType] = append(gpuInfos, gpuDeviceInfo)
}
})

var selectedGPUDeviceInfos []*service.GPUDeviceInfo
for i := 0; i < len(requiredVGPUDevices); i++ {
gpuDeviceInfos, ok := typeVGPUDeviceInfoMap[requiredVGPUDevices[i].Type]
if !ok {
return nil
}

var gpuInfos []*service.GPUDeviceInfo
requiredCount := requiredVGPUDevices[i].Count
for j := 0; j < len(gpuDeviceInfos); j++ {
if gpuDeviceInfos[j].AvailableCount <= 0 {
continue
}

if gpuDeviceInfos[j].AvailableCount >= requiredCount {
gpuInfos = append(gpuInfos, &service.GPUDeviceInfo{ID: gpuDeviceInfos[j].ID, AllocatedCount: requiredCount, AvailableCount: gpuDeviceInfos[j].AvailableCount})
requiredCount = 0

break
} else {
gpuInfos = append(gpuInfos, &service.GPUDeviceInfo{ID: gpuDeviceInfos[j].ID, AllocatedCount: gpuDeviceInfos[j].AvailableCount, AvailableCount: gpuDeviceInfos[j].AvailableCount})
requiredCount -= gpuDeviceInfos[j].AvailableCount
}
}

selectedGPUDevices = append(selectedGPUDevices, gpus[:int(requiredGPUDevices[i].Count)]...)
// Remove selected GPU devices.
modelGPUDeviceMap[requiredGPUDevices[i].Model] = gpus[int(requiredGPUDevices[i].Count):]
// If requiredCount is greater than 0, it means there are not enough vGPUs,
// just return directly.
if requiredCount > 0 {
return nil
}

selectedGPUDeviceInfos = append(selectedGPUDeviceInfos, gpuInfos...)
}

return selectedGPUDevices
return selectedGPUDeviceInfos
}

// reconcileGPUDevices ensures that the virtual machine has the expected GPU devices.
func (r *ElfMachineReconciler) reconcileGPUDevices(ctx *context.MachineContext, vm *models.VM) (bool, error) {
if !ctx.ElfMachine.RequiresGPUDevices() {
if !ctx.ElfMachine.RequiresGPUOrVGPUDevices() {
return true, nil
}

Expand Down Expand Up @@ -213,7 +281,7 @@ func (r *ElfMachineReconciler) reconcileGPUDevices(ctx *context.MachineContext,
gpuIDs[i] = *vm.GpuDevices[i].ID
}

if ok, _, err := r.checkGPUsCanBeUsedForVM(ctx, gpuIDs, ctx.ElfMachine.Name); err != nil {
if ok, err := r.checkGPUsCanBeUsedForVM(ctx, gpuIDs); err != nil {
return false, err
} else if !ok {
// If the GPU devices are already in use,
Expand All @@ -228,7 +296,7 @@ func (r *ElfMachineReconciler) reconcileGPUDevices(ctx *context.MachineContext,

// addGPUDevicesForVM adds expected GPU devices to the virtual machine.
func (r *ElfMachineReconciler) addGPUDevicesForVM(ctx *context.MachineContext, vm *models.VM) (bool, error) {
hostID, gpuDevices, err := r.selectHostAndGPUsForVM(ctx, *vm.Host.ID)
hostID, gpuDeviceInfos, err := r.selectHostAndGPUsForVM(ctx, *vm.Host.ID)
if err != nil || hostID == nil {
return false, err
}
Expand All @@ -244,15 +312,7 @@ func (r *ElfMachineReconciler) addGPUDevicesForVM(ctx *context.MachineContext, v
return ok, err
}

gpus := make([]*models.VMGpuOperationParams, len(gpuDevices))
for i := 0; i < len(gpuDevices); i++ {
gpus[i] = &models.VMGpuOperationParams{
GpuID: gpuDevices[i].ID,
Amount: service.TowerInt32(1),
}
}

task, err := ctx.VMService.AddGPUDevices(ctx.ElfMachine.Status.VMRef, gpus)
task, err := ctx.VMService.AddGPUDevices(ctx.ElfMachine.Status.VMRef, gpuDeviceInfos)
if err != nil {
conditions.MarkFalse(ctx.ElfMachine, infrav1.VMProvisionedCondition, infrav1.AttachingGPUFailedReason, clusterv1.ConditionSeverityWarning, err.Error())

Expand Down Expand Up @@ -298,19 +358,22 @@ func (r *ElfMachineReconciler) removeVMGPUDevices(ctx *context.MachineContext, v

// checkGPUsCanBeUsedForVM checks whether GPU devices can be used by the specified virtual machine.
// The return true means the GPU devices can be used for the virtual machine.
func (r *ElfMachineReconciler) checkGPUsCanBeUsedForVM(ctx *context.MachineContext, gpuDeviceIDs []string, vm string) (bool, []*models.GpuDevice, error) {
func (r *ElfMachineReconciler) checkGPUsCanBeUsedForVM(ctx *context.MachineContext, gpuDeviceIDs []string) (bool, error) {
gpuDevices, err := ctx.VMService.FindGPUDevicesByIDs(gpuDeviceIDs)
if err != nil {
return false, nil, err
if err != nil || len(gpuDevices) != len(gpuDeviceIDs) {
return false, err
}

if len(gpuDevices) != len(gpuDeviceIDs) {
return false, nil, nil
gpuDeviceInfos, err := ctx.VMService.FindGPUDeviceInfos(gpuDeviceIDs)
if err != nil {
return false, err

Check warning on line 369 in controllers/elfmachine_controller_gpu.go

View check run for this annotation

Codecov / codecov/patch

controllers/elfmachine_controller_gpu.go#L369

Added line #L369 was not covered by tests
}

if len(service.FilterOutGPUsCanNotBeUsedForVM(gpuDevices, vm)) != len(gpuDeviceIDs) {
return false, nil, nil
service.AggregateUnusedGPUDevicesToGPUDeviceInfos(gpuDeviceInfos, gpuDevices)

if service.HasGPUsCanNotBeUsedForVM(gpuDeviceInfos, ctx.ElfMachine) {
return false, nil
}

return true, gpuDevices, nil
return true, nil
}
Loading

0 comments on commit c34d6dc

Please sign in to comment.