From a1a9c6884c5cfda4c972f4087ad4d4b9c3da6518 Mon Sep 17 00:00:00 2001 From: Henry Date: Fri, 14 Jan 2022 21:11:06 +0100 Subject: [PATCH] Fixing bug multi-gpu training (#6299) * Fixing bug multi-gpu training This solves this issue: https://github.com/ultralytics/yolov5/issues/6297#issue-1103853348 * Update torch_utils.py for pep8 --- utils/torch_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 060768e8251b..451bcdd29b7c 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -62,7 +62,8 @@ def select_device(device='', batch_size=0, newline=True): os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False elif device: # non-cpu device requested assert torch.cuda.is_available(), 'CUDA unavailable' # check CUDA is available - assert torch.cuda.device_count() > int(device), f'invalid CUDA device {device} requested' # check index + device_list = [int(val) for val in device.replace(',', '')] + assert all([torch.cuda.device_count() > element for element in device_list]), f'invalid CUDA device {device} requested' # check index os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable (must be after asserts) cuda = not cpu and torch.cuda.is_available()