llama : disable FA if KV head size do not match

This commit is contained in:
Georgi Gerganov 2024-06-17 19:20:24 +03:00
parent b473e95084
commit ef79941ac9
No known key found for this signature in database
GPG Key ID: 449E073F9DC10735

View File

@ -16260,6 +16260,11 @@ struct llama_context * llama_new_context_with_model(
params.flash_attn = false; params.flash_attn = false;
} }
if (params.flash_attn && model->hparams.n_embd_head_k != model->hparams.n_embd_head_v) {
LLAMA_LOG_WARN("%s: flash_attn requires n_embd_head_k == n_embd_head_v - forcing off\n", __func__);
params.flash_attn = false;
}
if (params.type_v != GGML_TYPE_F16 && !params.flash_attn) { if (params.type_v != GGML_TYPE_F16 && !params.flash_attn) {
LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__); LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__);
return nullptr; return nullptr;