llama : disable FA if KV head size do not match (#7982)

This commit is contained in:
Georgi Gerganov 2024-06-17 19:40:01 +03:00 committed by GitHub
parent b473e95084
commit 7c26775adb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -16260,6 +16260,11 @@ struct llama_context * llama_new_context_with_model(
params.flash_attn = false;
}
if (params.flash_attn && model->hparams.n_embd_head_k != model->hparams.n_embd_head_v) {
LLAMA_LOG_WARN("%s: flash_attn requires n_embd_head_k == n_embd_head_v - forcing off\n", __func__);
params.flash_attn = false;
}
if (params.type_v != GGML_TYPE_F16 && !params.flash_attn) {
LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__);
return nullptr;