mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
convert-llama-hf-to-gguf.py : rope scale fix
This commit is contained in:
parent
9070e330ab
commit
7a7d1ba68a
@ -126,7 +126,7 @@ gguf_writer.add_head_count(head_count)
|
|||||||
gguf_writer.add_head_count_kv(head_count_kv)
|
gguf_writer.add_head_count_kv(head_count_kv)
|
||||||
gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
|
gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
|
||||||
|
|
||||||
if "rope_scaling" in hparams and "factor" in hparams["rope_scaling"]:
|
if "rope_scaling" in hparams and hparams["rope_scaling"] != None and "factor" in hparams["rope_scaling"]:
|
||||||
if "type" in hparams["rope_scaling"]:
|
if "type" in hparams["rope_scaling"]:
|
||||||
if hparams["rope_scaling"]["type"] == "linear":
|
if hparams["rope_scaling"]["type"] == "linear":
|
||||||
gguf_writer.add_rope_scale_linear(hparams["rope_scaling"]["factor"])
|
gguf_writer.add_rope_scale_linear(hparams["rope_scaling"]["factor"])
|
||||||
|
Loading…
Reference in New Issue
Block a user