From f3de876a1212767c9ba9223de26472b40f9043ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=2E=20Yusuf=20Sar=C4=B1g=C3=B6z?= Date: Mon, 31 Jul 2023 23:58:29 +0300 Subject: [PATCH] fix : update convert-llama-h5-to-gguf.py --- convert-llama-h5-to-gguf.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/convert-llama-h5-to-gguf.py b/convert-llama-h5-to-gguf.py index 0451ffe23..412d334fb 100644 --- a/convert-llama-h5-to-gguf.py +++ b/convert-llama-h5-to-gguf.py @@ -46,13 +46,13 @@ if len(sys.argv) > 2: print("Invalid ftype: " + str(ftype)) sys.exit(1) fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf" + +with open(dir_model + "/config.json", "r", encoding="utf-8") as f: + hparams = json.load(f) if hparams["architectures"][0] != "LlamaForCausalLM": print("Model architecture not supported: " + hparams["architectures"][0] ) sys.exit() - -with open(dir_model + "/config.json", "r", encoding="utf-8") as f: - hparams = json.load(f) model = AutoModelForCausalLM.from_pretrained(dir_model, low_cpu_mem_usage=True, trust_remote_code=True) list_vars = model.state_dict()