mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 02:44:36 +00:00
llama : default n_swa for phi-3 (#8931)
* default n_swa for phi-3 * fix * double check swa
This commit is contained in:
parent
7c3f55c100
commit
7eb23840ed
@ -4901,7 +4901,6 @@ static void llm_load_hparams(
|
||||
} break;
|
||||
case LLM_ARCH_PHI3:
|
||||
{
|
||||
ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa);
|
||||
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
||||
|
||||
switch (hparams.n_layer) {
|
||||
@ -4910,6 +4909,22 @@ static void llm_load_hparams(
|
||||
case 40: model.type = e_model::MODEL_14B; break;
|
||||
default: model.type = e_model::MODEL_UNKNOWN;
|
||||
}
|
||||
|
||||
// for backward compatibility ; see: https://github.com/ggerganov/llama.cpp/pull/8931
|
||||
if ((hparams.n_layer == 32 || hparams.n_layer == 40) && hparams.n_ctx_train == 4096) {
|
||||
// default value for Phi-3-mini-4k-instruct and Phi-3-medium-4k-instruct
|
||||
hparams.n_swa = 2047;
|
||||
} else if (hparams.n_layer == 32 && hparams.n_head_kv(0) == 32 && hparams.n_ctx_train == 131072) {
|
||||
// default value for Phi-3-mini-128k-instruct
|
||||
hparams.n_swa = 262144;
|
||||
} else if (hparams.n_layer == 40 && hparams.n_ctx_train == 131072) {
|
||||
// default value for Phi-3-medium-128k-instruct
|
||||
hparams.n_swa = 131072;
|
||||
}
|
||||
bool found_swa = ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa, false);
|
||||
if (!found_swa && hparams.n_swa == 0) {
|
||||
throw std::runtime_error("invalid value for sliding_window");
|
||||
}
|
||||
} break;
|
||||
case LLM_ARCH_PLAMO:
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user