From 7a7d1ba68ab511269f5e686b7d4d2b7c6b9a3e45 Mon Sep 17 00:00:00 2001 From: klosax <131523366+klosax@users.noreply.github.com> Date: Mon, 21 Aug 2023 14:12:02 +0200 Subject: [PATCH] convert-llama-hf-to-gguf.py : rope scale fix --- convert-llama-hf-to-gguf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/convert-llama-hf-to-gguf.py b/convert-llama-hf-to-gguf.py index 3a7d4c6c8..d5b3897c7 100644 --- a/convert-llama-hf-to-gguf.py +++ b/convert-llama-hf-to-gguf.py @@ -126,7 +126,7 @@ gguf_writer.add_head_count(head_count) gguf_writer.add_head_count_kv(head_count_kv) gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"]) -if "rope_scaling" in hparams and "factor" in hparams["rope_scaling"]: +if "rope_scaling" in hparams and hparams["rope_scaling"] != None and "factor" in hparams["rope_scaling"]: if "type" in hparams["rope_scaling"]: if hparams["rope_scaling"]["type"] == "linear": gguf_writer.add_rope_scale_linear(hparams["rope_scaling"]["factor"])