Skip to content

Commit 30939e2

Browse files
Wojtek KowalukWojtek Kowaluk
Wojtek Kowaluk
authored and
Wojtek Kowaluk
committed
add mps support on apple silicon
1 parent 7d97da1 commit 30939e2

File tree

2 files changed

+12
-1
lines changed

2 files changed

+12
-1
lines changed

modules/models.py

+8-1
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,13 @@ def load_model(model_name):
4646
if not any([shared.args.cpu, shared.args.load_in_8bit, shared.args.gptq_bits, shared.args.auto_devices, shared.args.disk, shared.args.gpu_memory is not None, shared.args.cpu_memory is not None, shared.args.deepspeed, shared.args.flexgen, shared.is_RWKV]):
4747
if any(size in shared.model_name.lower() for size in ('13b', '20b', '30b')):
4848
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), device_map='auto', load_in_8bit=True)
49+
if torch.has_mps:
50+
model = AutoModelForCausalLM.from_pretrained(
51+
Path(f"models/{shared.model_name}"),low_cpu_mem_usage=True,
52+
torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16
53+
)
54+
device = torch.device('mps')
55+
model = model.to(device)
4956
else:
5057
model = AutoModelForCausalLM.from_pretrained(Path(f"models/{shared.model_name}"), low_cpu_mem_usage=True, torch_dtype=torch.bfloat16 if shared.args.bf16 else torch.float16).cuda()
5158

@@ -97,7 +104,7 @@ def load_model(model_name):
97104
# Custom
98105
else:
99106
params = {"low_cpu_mem_usage": True}
100-
if not shared.args.cpu and not torch.cuda.is_available():
107+
if not shared.args.cpu and not torch.cuda.is_available() and not torch.has_mps:
101108
print("Warning: torch.cuda.is_available() returned False.\nThis means that no GPU has been detected.\nFalling back to CPU mode.\n")
102109
shared.args.cpu = True
103110

modules/text_generation.py

+4
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,13 @@ def encode(prompt, tokens_to_generate=0, add_special_tokens=True):
3333
return input_ids.numpy()
3434
elif shared.args.deepspeed:
3535
return input_ids.to(device=local_rank)
36+
elif torch.has_mps:
37+
device = torch.device('mps')
38+
return input_ids.to(device)
3639
else:
3740
return input_ids.cuda()
3841

42+
3943
def decode(output_ids):
4044
# Open Assistant relies on special tokens like <|endoftext|>
4145
if re.match('(oasst|galactica)-*', shared.model_name.lower()):

0 commit comments

Comments
 (0)