fix : update convert-llama-h5-to-gguf.py

This commit is contained in:
M. Yusuf Sarıgöz 2023-07-31 23:58:29 +03:00
parent bb42aefaeb
commit f3de876a12

View File

@ -47,13 +47,13 @@ if len(sys.argv) > 2:
sys.exit(1) sys.exit(1)
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf" fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf"
with open(dir_model + "/config.json", "r", encoding="utf-8") as f:
hparams = json.load(f)
if hparams["architectures"][0] != "LlamaForCausalLM": if hparams["architectures"][0] != "LlamaForCausalLM":
print("Model architecture not supported: " + hparams["architectures"][0] ) print("Model architecture not supported: " + hparams["architectures"][0] )
sys.exit() sys.exit()
with open(dir_model + "/config.json", "r", encoding="utf-8") as f:
hparams = json.load(f)
model = AutoModelForCausalLM.from_pretrained(dir_model, low_cpu_mem_usage=True, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(dir_model, low_cpu_mem_usage=True, trust_remote_code=True)
list_vars = model.state_dict() list_vars = model.state_dict()