We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 9170576 commit d550559Copy full SHA for d550559
invokeai/backend/quantization/gguf/utils.py
@@ -5,7 +5,8 @@
5
import gguf
6
import torch
7
8
-TORCH_COMPATIBLE_QTYPES = {None, gguf.GGMLQuantizationType.F32, gguf.GGMLQuantizationType.F16}
+# should not be a Set until this is resolved: https://github.com/pytorch/pytorch/issues/145761
9
+TORCH_COMPATIBLE_QTYPES = [None, gguf.GGMLQuantizationType.F32, gguf.GGMLQuantizationType.F16]
10
11
# K Quants #
12
QK_K = 256
0 commit comments