We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 67d6247 commit 66256acCopy full SHA for 66256ac
modules/models.py
@@ -98,7 +98,7 @@ def load_model(model_name):
98
command = "AutoModelForCausalLM.from_pretrained"
99
params = ["low_cpu_mem_usage=True"]
100
if not shared.args.cpu and not torch.cuda.is_available():
101
- print("Warning: no GPU has been detected.\nFalling back to CPU mode.\n")
+ print("Warning: torch.cuda.is_available() returned False.\nThis means that no GPU has been detected.\nFalling back to CPU mode.\n")
102
shared.args.cpu = True
103
104
if shared.args.cpu:
0 commit comments