From 30939e2aee539bd3a46573aa1ed86168b31fddf4 Mon Sep 17 00:00:00 2001 From: Wojtek Kowaluk Date: Sat, 18 Mar 2023 00:56:23 +0100 Subject: [PATCH] add mps support on apple silicon --- modules/models.py | 9 ++++++++- modules/text_generation.py | 4 ++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/modules/models.py b/modules/models.py index 63060d4324..c37b058668 100644 --- a/modules/models.py +++ b/modules/models.py @@ -46,6 +46,13 @@ def load_model(model_name): if not any([shared.args.cpu, shared.args.load_in_8bit, shared.args.gptq_bits, shared.args.auto_devices, shared.args.disk, shared.args.gpu_memory is not None, shared.args.cpu_memory is not None, shared.args.deepspeed, shared.args.flexgen, shared.is_RWKV]): if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')): model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True) + if torch.has_mps: + model = AutoModelForCausalLM.from_pretrained( + Path(f"models/{shared.model_name}"),low_cpu_mem_usage=True, + torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16 + ) + device = torch.device('mps') + model = model.to(device) else: model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16).cuda() @@ -97,7 +104,7 @@ def load_model(model_name): # Custom else: params = {"low_cpu_mem_usage": True} - if not shared.args.cpu and not torch.cuda.is_available(): + if not shared.args.cpu and not torch.cuda.is_available() and not torch.has_mps: print("Warning: torch.cuda.is_available() returned False.\nThis means that no GPU has been detected.\nFalling back to CPU mode.\n") shared.args.cpu = True diff --git a/modules/text_generation.py b/modules/text_generation.py index e5b4ad9144..3a7bfa6eb6 100644 --- a/modules/text_generation.py +++ b/modules/text_generation.py @@ -33,9 +33,13 @@ def encode(prompt, tokens_to_generate=0, add_special_tokens=True): return input_ids.numpy() elif shared.args.deepspeed: return input_ids.to(device=local_rank) + elif torch.has_mps: + device = torch.device('mps') + return input_ids.to(device) else: return input_ids.cuda() + def decode(output_ids): # Open Assistant relies on special tokens like <|endoftext|> if re.match('(oasst|galactica)-*', shared.model_name.lower()):