From 323c266cfe65f16f522092f1bc84998e04ed7f94 Mon Sep 17 00:00:00 2001 From: JackieWu Date: Mon, 9 Jan 2023 12:46:42 +0800 Subject: [PATCH] [Bug Fixed] use torch.cuda.is_available() (#2661) Co-authored-by: Olatunji Ruwase --- deepspeed/runtime/fp16/fused_optimizer.py | 2 +- deepspeed/runtime/fp16/unfused_optimizer.py | 2 +- deepspeed/runtime/zero/stage3.py | 2 +- deepspeed/runtime/zero/stage_1_and_2.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/deepspeed/runtime/fp16/fused_optimizer.py b/deepspeed/runtime/fp16/fused_optimizer.py index aeed2f4b18e1..7734d6ef0a29 100755 --- a/deepspeed/runtime/fp16/fused_optimizer.py +++ b/deepspeed/runtime/fp16/fused_optimizer.py @@ -41,7 +41,7 @@ def __init__(self, self.deepspeed = deepspeed self.has_moe_layers = has_moe_layers self.using_pipeline = self.deepspeed.pipeline_parallelism - if not torch.cuda.is_available: + if not torch.cuda.is_available(): raise SystemError("Cannot use fp16 without CUDA.") self.optimizer = init_optimizer diff --git a/deepspeed/runtime/fp16/unfused_optimizer.py b/deepspeed/runtime/fp16/unfused_optimizer.py index 88f0df443405..3bf906404e87 100755 --- a/deepspeed/runtime/fp16/unfused_optimizer.py +++ b/deepspeed/runtime/fp16/unfused_optimizer.py @@ -40,7 +40,7 @@ def __init__(self, if dist.get_rank() == 0: logger.info(f'Fused Lamb Legacy : {self.fused_lamb_legacy} ') - if not torch.cuda.is_available: + if not torch.cuda.is_available(): raise SystemError("Cannot use fp16 without CUDA.") self.optimizer = init_optimizer diff --git a/deepspeed/runtime/zero/stage3.py b/deepspeed/runtime/zero/stage3.py index 6e5e151a995e..10edf4ba818a 100755 --- a/deepspeed/runtime/zero/stage3.py +++ b/deepspeed/runtime/zero/stage3.py @@ -122,7 +122,7 @@ def __init__(self, # - assume all params requires grad # - flat by groups, not keeping state. TODO: remove state explicitly? # - master grad and unflat master weight never exist. TODO: a way to save out unflat master? - if not torch.cuda.is_available: + if not torch.cuda.is_available(): raise SystemError("Cannot use fp16 without CUDA.") self.optimizer = init_optimizer diff --git a/deepspeed/runtime/zero/stage_1_and_2.py b/deepspeed/runtime/zero/stage_1_and_2.py index 85bf055c6154..c2fcb15a97f8 100755 --- a/deepspeed/runtime/zero/stage_1_and_2.py +++ b/deepspeed/runtime/zero/stage_1_and_2.py @@ -154,7 +154,7 @@ def __init__(self, # - assume all params requires grad # - flat by groups, not keeping state. TODO: remove state explicitly? # - master grad and unflat master weight never exist. TODO: a way to save out unflat master? - if not torch.cuda.is_available: + if not torch.cuda.is_available(): raise SystemError("Cannot use fp16 without CUDA.") self.optimizer = init_optimizer