mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-13 04:00:16 +00:00
Merge branch 'add-gemma2-soft-capping' of github.com:ggerganov/llama.cpp into add-gemma2-soft-capping
This commit is contained in:
commit
6f2464e3dd
@ -17402,11 +17402,12 @@ struct llama_context * llama_new_context_with_model(
|
||||
params.flash_attn = false;
|
||||
}
|
||||
|
||||
if (params.flash_attn && model->arch == LLM_ARCH_GEMMA2) {
|
||||
LLAMA_LOG_WARN("%s: flash_attn is not compatible with Gemma2 - forcing off\n", __func__);
|
||||
if (params.flash_attn && model->hparams.attn_soft_cap) {
|
||||
LLAMA_LOG_WARN("%s: flash_attn is not compatible with attn_soft_cap - forcing off\n", __func__);
|
||||
params.flash_attn = false;
|
||||
}
|
||||
|
||||
|
||||
if (params.flash_attn && model->hparams.n_embd_head_k != model->hparams.n_embd_head_v) {
|
||||
LLAMA_LOG_WARN("%s: flash_attn requires n_embd_head_k == n_embd_head_v - forcing off\n", __func__);
|
||||
params.flash_attn = false;
|
||||
|
Loading…
Reference in New Issue
Block a user