Skip to content

Commit ef79941

Browse files
committed
llama : disable FA if KV head size do not match
1 parent b473e95 commit ef79941

File tree

1 file changed

+5
-0
lines changed

1 file changed

+5
-0
lines changed

llama.cpp

+5
Original file line numberDiff line numberDiff line change
@@ -16260,6 +16260,11 @@ struct llama_context * llama_new_context_with_model(
1626016260
params.flash_attn = false;
1626116261
}
1626216262

16263+
if (params.flash_attn && model->hparams.n_embd_head_k != model->hparams.n_embd_head_v) {
16264+
LLAMA_LOG_WARN("%s: flash_attn requires n_embd_head_k == n_embd_head_v - forcing off\n", __func__);
16265+
params.flash_attn = false;
16266+
}
16267+
1626316268
if (params.type_v != GGML_TYPE_F16 && !params.flash_attn) {
1626416269
LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__);
1626516270
return nullptr;

0 commit comments

Comments
 (0)