mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-28 12:24:35 +00:00
convert-llama-h5-to-gguf.py : map tensor names
This commit is contained in:
parent
f4d137d98c
commit
7d5f4522dd
@ -1,6 +1,8 @@
|
||||
# Quick and dirty HF llama --> gguf conversion, GQA/70b wont work
|
||||
|
||||
import gguf
|
||||
import gguf_tensor_map as tmap
|
||||
import os
|
||||
import sys
|
||||
import struct
|
||||
import json
|
||||
@ -12,7 +14,6 @@ from sentencepiece import SentencePieceProcessor
|
||||
|
||||
|
||||
#NDArray = np.ndarray[Any, Any]
|
||||
|
||||
# compatible with python < 3.9
|
||||
NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]'
|
||||
|
||||
@ -32,6 +33,7 @@ if len(sys.argv) < 3:
|
||||
# output in the same directory as the model
|
||||
dir_model = sys.argv[1]
|
||||
fname_out = sys.argv[1] + "/ggml-model.bin"
|
||||
last_dir = os.path.basename(os.path.normpath(dir_model))
|
||||
|
||||
|
||||
# possible tensor data types
|
||||
@ -48,6 +50,8 @@ if len(sys.argv) > 2:
|
||||
print("Invalid ftype: " + str(ftype))
|
||||
sys.exit(1)
|
||||
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf"
|
||||
|
||||
print("gguf: loading model "+last_dir)
|
||||
|
||||
with open(dir_model + "/config.json", "r", encoding="utf-8") as f:
|
||||
hparams = json.load(f)
|
||||
@ -62,32 +66,34 @@ list_vars = model.state_dict()
|
||||
gguf_writer = gguf.GGUFWriter.open(fname_out)
|
||||
|
||||
|
||||
print("gguf: add key-values, metadata")
|
||||
print("gguf: get model metadata")
|
||||
|
||||
llm_arch = "llama"
|
||||
llm_arch = "llama"
|
||||
head_count = hparams["num_attention_heads"]
|
||||
block_count = hparams["num_hidden_layers"]
|
||||
|
||||
gguf_writer.add_name("llama2-7b")
|
||||
gguf_writer.add_description("gguf test model")
|
||||
gguf_writer.add_architecture(llm_arch)
|
||||
gguf_writer.add_context_length(llm_arch, hparams["max_position_embeddings"])
|
||||
gguf_writer.add_embedding_length(llm_arch, hparams["hidden_size"])
|
||||
gguf_writer.add_layer_count(llm_arch, hparams["num_hidden_layers"])
|
||||
gguf_writer.add_layer_count(llm_arch, block_count)
|
||||
gguf_writer.add_feed_forward_length(llm_arch, hparams["intermediate_size"])
|
||||
gguf_writer.add_rope_dimension_count(llm_arch, hparams["hidden_size"] // hparams["num_attention_heads"])
|
||||
gguf_writer.add_head_count(llm_arch, hparams["num_attention_heads"])
|
||||
gguf_writer.add_head_count(llm_arch, head_count)
|
||||
gguf_writer.add_layer_norm_rms_eps(llm_arch, hparams["rms_norm_eps"])
|
||||
|
||||
|
||||
# TOKENIZATION
|
||||
|
||||
print("gguf: add key-values, tokenizer")
|
||||
print("gguf: get tokenizer metadata")
|
||||
|
||||
tokens: List[str] = []
|
||||
scores: List[float] = []
|
||||
|
||||
if Path(dir_model + "/tokenizer.model").is_file():
|
||||
# vocab type sentencepiece
|
||||
print("gguf: adding sentencepiece tokenizer vocab")
|
||||
print("gguf: get sentencepiece tokenizer vocab and scores")
|
||||
|
||||
tokenizer = SentencePieceProcessor(dir_model + "/tokenizer.model")
|
||||
|
||||
@ -119,7 +125,7 @@ if Path(dir_model + "/tokenizer.json").is_file():
|
||||
tokenizer = json.load(f)
|
||||
|
||||
if "added_tokens" in tokenizer and Path(dir_model + "/tokenizer_config.json").is_file():
|
||||
print("gguf: adding special token ids")
|
||||
print("gguf: get special token ids")
|
||||
|
||||
with open(dir_model + "/tokenizer_config.json", "r", encoding="utf-8") as f:
|
||||
tokenizer_config = json.load(f)
|
||||
@ -154,8 +160,10 @@ if Path(dir_model + "/tokenizer.json").is_file():
|
||||
|
||||
# TENSORS
|
||||
|
||||
tensor_map = tmap.get_tensor_map(block_count)
|
||||
|
||||
# tensor info
|
||||
print("gguf: add gguf tensor info")
|
||||
print("gguf: get tensor metadata")
|
||||
|
||||
for name in list_vars.keys():
|
||||
data = list_vars[name].squeeze().numpy()
|
||||
@ -166,45 +174,16 @@ for name in list_vars.keys():
|
||||
|
||||
# permute these
|
||||
if name.endswith(".q_proj.weight") or name.endswith(".k_proj.weight"):
|
||||
data = permute(data, hparams["num_attention_heads"])
|
||||
data = permute(data,head_count)
|
||||
|
||||
# chnage tensor name
|
||||
|
||||
if name == "model.embed_tokens.weight":
|
||||
name = "tok_embeddings.weight"
|
||||
elif name == "model.norm.weight":
|
||||
name = "norm.weight"
|
||||
elif name == "lm_head.weight":
|
||||
name = "output.weight"
|
||||
# map tensor names
|
||||
if name.endswith(".weight") and name[:-7] in tensor_map:
|
||||
name = tensor_map[name[:-7]] + ".weight"
|
||||
elif name.endswith(".bias") and name[:-5] in tensor_map:
|
||||
name = tensor_map[name[:-5]] + ".bias"
|
||||
else:
|
||||
for i in range(80): # maximum number of layers
|
||||
if name == "model.layers." + str(i) + ".input_layernorm.weight":
|
||||
name = "layers." + str(i) + ".attention_norm.weight"
|
||||
break
|
||||
if name == "model.layers." + str(i) + ".self_attn.q_proj.weight":
|
||||
name = "layers." + str(i) + ".attention.wq.weight"
|
||||
break
|
||||
if name == "model.layers." + str(i) + ".self_attn.k_proj.weight":
|
||||
name = "layers." + str(i) + ".attention.wk.weight"
|
||||
break
|
||||
if name == "model.layers." + str(i) + ".self_attn.v_proj.weight":
|
||||
name = "layers." + str(i) + ".attention.wv.weight"
|
||||
break
|
||||
if name == "model.layers." + str(i) + ".self_attn.o_proj.weight":
|
||||
name = "layers." + str(i) + ".attention.wo.weight"
|
||||
break
|
||||
if name == "model.layers." + str(i) + ".post_attention_layernorm.weight":
|
||||
name = "layers." + str(i) + ".ffn_norm.weight"
|
||||
break
|
||||
if name == "model.layers." + str(i) + ".mlp.gate_proj.weight":
|
||||
name = "layers." + str(i) + ".feed_forward.w1.weight"
|
||||
break
|
||||
if name == "model.layers." + str(i) + ".mlp.down_proj.weight":
|
||||
name = "layers." + str(i) + ".feed_forward.w2.weight"
|
||||
break
|
||||
if name == "model.layers." + str(i) + ".mlp.up_proj.weight":
|
||||
name = "layers." + str(i) + ".feed_forward.w3.weight"
|
||||
break
|
||||
print( "Can not map tensor '" + name + "'" )
|
||||
sys.exit()
|
||||
|
||||
n_dims = len(data.shape)
|
||||
|
||||
@ -227,9 +206,9 @@ for name in list_vars.keys():
|
||||
|
||||
print("gguf: write header")
|
||||
gguf_writer.write_header_to_file()
|
||||
print("gguf: write key-values")
|
||||
print("gguf: write metadata")
|
||||
gguf_writer.write_kv_data_to_file()
|
||||
print("gguf: write tensor info")
|
||||
print("gguf: write tensor metadata")
|
||||
gguf_writer.write_ti_data_to_file()
|
||||
|
||||
# tensor data
|
||||
@ -237,17 +216,14 @@ print("gguf: write tensor data")
|
||||
|
||||
for name in list_vars.keys():
|
||||
data = list_vars[name].squeeze().numpy()
|
||||
# print("Process tensor: " + name + " with shape: ", data.shape)
|
||||
|
||||
# we don't need these
|
||||
if name.endswith(".rotary_emb.inv_freq"):
|
||||
# print(" Skip tensor: " + name)
|
||||
continue
|
||||
|
||||
# permute these
|
||||
if name.endswith(".q_proj.weight") or name.endswith(".k_proj.weight"):
|
||||
# print(" Permute tensor: " + name)
|
||||
data = permute(data, hparams["num_attention_heads"])
|
||||
data = permute(data, head_count)
|
||||
|
||||
n_dims = len(data.shape)
|
||||
|
||||
@ -255,16 +231,13 @@ for name in list_vars.keys():
|
||||
ftype_cur = 0
|
||||
if ftype != 0:
|
||||
if name.endswith(".weight") and n_dims == 2:
|
||||
# print(" Converting to float16")
|
||||
data = data.astype(np.float16)
|
||||
ftype_cur = 1
|
||||
else:
|
||||
# print(" Converting to float32")
|
||||
data = data.astype(np.float32)
|
||||
ftype_cur = 0
|
||||
else:
|
||||
if data.dtype != np.float32:
|
||||
# print(" Converting to float32")
|
||||
data = data.astype(np.float32)
|
||||
ftype_cur = 0
|
||||
|
||||
@ -273,5 +246,5 @@ for name in list_vars.keys():
|
||||
gguf_writer.close()
|
||||
|
||||
|
||||
print("gguf: conversion done, output file: " + fname_out)
|
||||
print("gguf: model successfully exported to '" + fname_out + "'" )
|
||||
print("")
|
||||
|
Loading…
Reference in New Issue
Block a user