mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-28 12:24:35 +00:00
gguf.py : write tensors in a single pass (#2644)
* gguf : single pass for writing tensors + refactoring writer * gguf : single pass for writing tensors + refactoring writer * gguf : single pass for writing tensors + refactoring writer * gguf : style fixes in simple conversion script * gguf : refactor gptneox conversion script * gguf : rename h5 to hf (for HuggingFace) * gguf : refactor pth to gguf conversion script * gguf : rm file_type key and method * gguf.py : fix vertical alignment * gguf.py : indentation --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
parent
5484737d58
commit
fc3a523211
@ -13,6 +13,8 @@ from pathlib import Path
|
|||||||
from transformers import AutoTokenizer
|
from transformers import AutoTokenizer
|
||||||
|
|
||||||
# ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py
|
# ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py
|
||||||
|
|
||||||
|
|
||||||
def bytes_to_unicode():
|
def bytes_to_unicode():
|
||||||
"""
|
"""
|
||||||
Returns list of utf-8 byte and a corresponding list of unicode strings.
|
Returns list of utf-8 byte and a corresponding list of unicode strings.
|
||||||
@ -34,6 +36,7 @@ def bytes_to_unicode():
|
|||||||
cs = [chr(n) for n in cs]
|
cs = [chr(n) for n in cs]
|
||||||
return dict(zip(bs, cs))
|
return dict(zip(bs, cs))
|
||||||
|
|
||||||
|
|
||||||
def count_model_parts(dir_model: str) -> int:
|
def count_model_parts(dir_model: str) -> int:
|
||||||
num_parts = 0
|
num_parts = 0
|
||||||
for filename in os.listdir(dir_model):
|
for filename in os.listdir(dir_model):
|
||||||
@ -44,6 +47,7 @@ def count_model_parts(dir_model: str) -> int:
|
|||||||
print("gguf: found " + str(num_parts) + " model parts")
|
print("gguf: found " + str(num_parts) + " model parts")
|
||||||
return num_parts
|
return num_parts
|
||||||
|
|
||||||
|
|
||||||
if len(sys.argv) < 3:
|
if len(sys.argv) < 3:
|
||||||
print("Usage: convert-h5-to-ggml.py dir-model ftype\n")
|
print("Usage: convert-h5-to-ggml.py dir-model ftype\n")
|
||||||
print(" ftype == 0 -> float32")
|
print(" ftype == 0 -> float32")
|
||||||
@ -58,7 +62,7 @@ last_dir = os.path.basename(os.path.normpath(dir_model))
|
|||||||
# possible tensor data types
|
# possible tensor data types
|
||||||
# ftype == 0 -> float32
|
# ftype == 0 -> float32
|
||||||
# ftype == 1 -> float16
|
# ftype == 1 -> float16
|
||||||
#
|
|
||||||
# map from ftype to string
|
# map from ftype to string
|
||||||
ftype_str = ["f32", "f16"]
|
ftype_str = ["f32", "f16"]
|
||||||
|
|
||||||
@ -67,6 +71,7 @@ if len(sys.argv) > 2:
|
|||||||
ftype = int(sys.argv[2])
|
ftype = int(sys.argv[2])
|
||||||
if ftype < 0 or ftype > 1:
|
if ftype < 0 or ftype > 1:
|
||||||
print("Invalid ftype: " + str(ftype))
|
print("Invalid ftype: " + str(ftype))
|
||||||
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf"
|
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf"
|
||||||
@ -77,30 +82,30 @@ with open(dir_model + "/config.json", "r", encoding="utf-8") as f:
|
|||||||
hparams = json.load(f)
|
hparams = json.load(f)
|
||||||
|
|
||||||
if hparams["architectures"][0] != "GPTNeoXForCausalLM":
|
if hparams["architectures"][0] != "GPTNeoXForCausalLM":
|
||||||
print("Model architecture not supported: " + hparams["architectures"][0] )
|
print("Model architecture not supported: " + hparams["architectures"][0])
|
||||||
|
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
# get number of model parts
|
# get number of model parts
|
||||||
num_parts = count_model_parts(dir_model)
|
num_parts = count_model_parts(dir_model)
|
||||||
|
|
||||||
gguf_writer = gguf.GGUFWriter.open(fname_out)
|
llm_arch = "gptneox"
|
||||||
|
gguf_writer = gguf.GGUFWriter(fname_out, arch=llm_arch)
|
||||||
|
|
||||||
print("gguf: get model metadata")
|
print("gguf: get model metadata")
|
||||||
|
|
||||||
llm_arch = "gptneox"
|
|
||||||
block_count = hparams["num_hidden_layers"]
|
block_count = hparams["num_hidden_layers"]
|
||||||
|
|
||||||
gguf_writer.add_architecture(llm_arch)
|
gguf_writer.add_architecture()
|
||||||
gguf_writer.add_name(last_dir)
|
gguf_writer.add_name(last_dir)
|
||||||
gguf_writer.add_file_type( "All tensors F32" if ftype == 0 else "Most tensors F16, some F32")
|
gguf_writer.add_context_length(hparams["max_position_embeddings"])
|
||||||
gguf_writer.add_context_length(llm_arch, hparams["max_position_embeddings"])
|
gguf_writer.add_embedding_length(hparams["hidden_size"])
|
||||||
gguf_writer.add_embedding_length(llm_arch, hparams["hidden_size"])
|
gguf_writer.add_block_count(block_count)
|
||||||
gguf_writer.add_block_count(llm_arch, block_count)
|
gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
|
||||||
gguf_writer.add_feed_forward_length(llm_arch, hparams["intermediate_size"])
|
gguf_writer.add_rope_dimension_count(int(hparams["rotary_pct"]*(hparams["hidden_size"]//hparams["num_attention_heads"])))
|
||||||
gguf_writer.add_rope_dimension_count(llm_arch, int( hparams["rotary_pct"]*(hparams["hidden_size"]//hparams["num_attention_heads"])) )
|
gguf_writer.add_head_count(hparams["num_attention_heads"])
|
||||||
gguf_writer.add_head_count(llm_arch, hparams["num_attention_heads"])
|
gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True)
|
||||||
gguf_writer.add_parallel_residual(llm_arch, hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True)
|
gguf_writer.add_layer_norm_eps(hparams["layer_norm_eps"])
|
||||||
gguf_writer.add_layer_norm_eps(llm_arch, hparams["layer_norm_eps"])
|
|
||||||
|
|
||||||
# TOKENIZATION
|
# TOKENIZATION
|
||||||
|
|
||||||
@ -124,14 +129,14 @@ if Path(dir_model + "/tokenizer.json").is_file():
|
|||||||
|
|
||||||
print("gguf: get gpt2 tokenizer vocab")
|
print("gguf: get gpt2 tokenizer vocab")
|
||||||
|
|
||||||
vocab_size = len( tokenizer_json["model"]["vocab"] )
|
vocab_size = len(tokenizer_json["model"]["vocab"])
|
||||||
|
|
||||||
# ref: https://github.com/cmp-nct/ggllm.cpp/blob/master/falcon_convert.py
|
# ref: https://github.com/cmp-nct/ggllm.cpp/blob/master/falcon_convert.py
|
||||||
tokenizer = AutoTokenizer.from_pretrained(dir_model)
|
tokenizer = AutoTokenizer.from_pretrained(dir_model)
|
||||||
|
|
||||||
reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()}
|
reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()}
|
||||||
byte_encoder = bytes_to_unicode()
|
byte_encoder = bytes_to_unicode()
|
||||||
byte_decoder = {v:k for k, v in byte_encoder.items()}
|
byte_decoder = {v: k for k, v in byte_encoder.items()}
|
||||||
|
|
||||||
for i in range(vocab_size):
|
for i in range(vocab_size):
|
||||||
if i in reverse_vocab:
|
if i in reverse_vocab:
|
||||||
@ -146,8 +151,9 @@ if Path(dir_model + "/tokenizer.json").is_file():
|
|||||||
text.extend(c.encode('utf-8'))
|
text.extend(c.encode('utf-8'))
|
||||||
else:
|
else:
|
||||||
print(f"Key {i} not in tokenizer vocabulary. Padding with an arbitrary token.")
|
print(f"Key {i} not in tokenizer vocabulary. Padding with an arbitrary token.")
|
||||||
padding_token = f"[PAD{i}]".encode("utf8")
|
pad_token = f"[PAD{i}]".encode("utf8")
|
||||||
text = bytearray(padding_token)
|
text = bytearray(pad_token)
|
||||||
|
|
||||||
tokens.append(text)
|
tokens.append(text)
|
||||||
|
|
||||||
gguf_writer.add_token_list(tokens)
|
gguf_writer.add_token_list(tokens)
|
||||||
@ -201,7 +207,7 @@ else:
|
|||||||
)
|
)
|
||||||
|
|
||||||
for part_name in part_names:
|
for part_name in part_names:
|
||||||
print("gguf: loading model part '"+ part_name + "'")
|
print("gguf: loading model part '" + part_name + "'")
|
||||||
model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu")
|
model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu")
|
||||||
|
|
||||||
for name in model_part.keys():
|
for name in model_part.keys():
|
||||||
@ -223,11 +229,12 @@ for part_name in part_names:
|
|||||||
elif name.endswith(".bias") and name[:-5] in tensor_map:
|
elif name.endswith(".bias") and name[:-5] in tensor_map:
|
||||||
name = tensor_map[name[:-5]] + ".bias"
|
name = tensor_map[name[:-5]] + ".bias"
|
||||||
else:
|
else:
|
||||||
print( "Can not map tensor '" + name + "'" )
|
print("Can not map tensor '" + name + "'")
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
n_dims = len(data.shape)
|
n_dims = len(data.shape)
|
||||||
data_dtype = data.dtype
|
data_dtype = data.dtype
|
||||||
|
old_dtype = data_dtype
|
||||||
|
|
||||||
# if f32 desired, convert any float16 to float32
|
# if f32 desired, convert any float16 to float32
|
||||||
if ftype == 0 and data.dtype == np.float16:
|
if ftype == 0 and data.dtype == np.float16:
|
||||||
@ -241,77 +248,21 @@ for part_name in part_names:
|
|||||||
if ftype == 1 and data.dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
if ftype == 1 and data.dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
||||||
data_dtype = np.float16
|
data_dtype = np.float16
|
||||||
|
|
||||||
data_nbytes = data.size * 2 if data_dtype == np.float16 else data.size * 4
|
print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data_dtype))
|
||||||
|
|
||||||
gguf_writer.add_tensor_info(name, data.shape, data_dtype, data_nbytes)
|
data = data.astype(data_dtype)
|
||||||
|
|
||||||
|
gguf_writer.add_tensor(name, data)
|
||||||
|
|
||||||
|
|
||||||
print("gguf: write header")
|
print("gguf: write header")
|
||||||
gguf_writer.write_header_to_file()
|
gguf_writer.write_header_to_file()
|
||||||
print("gguf: write metadata")
|
print("gguf: write metadata")
|
||||||
gguf_writer.write_kv_data_to_file()
|
gguf_writer.write_kv_data_to_file()
|
||||||
print("gguf: write tensor metadata")
|
print("gguf: write tensors")
|
||||||
gguf_writer.write_ti_data_to_file()
|
gguf_writer.write_tensors_to_file()
|
||||||
|
|
||||||
# tensor data
|
|
||||||
print("gguf: convert and write tensor data")
|
|
||||||
|
|
||||||
if num_parts == 0:
|
|
||||||
part_names = ("pytorch_model.bin",)
|
|
||||||
else:
|
|
||||||
part_names = (
|
|
||||||
f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1)
|
|
||||||
)
|
|
||||||
|
|
||||||
for part_name in part_names:
|
|
||||||
print("gguf: loading model part '"+ part_name + "'")
|
|
||||||
model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu")
|
|
||||||
|
|
||||||
for name in model_part.keys():
|
|
||||||
data = model_part[name]
|
|
||||||
|
|
||||||
old_dtype = data.dtype
|
|
||||||
|
|
||||||
# we don't need these
|
|
||||||
if name.endswith(".attention.masked_bias") or name.endswith(".attention.bias") or name.endswith(".attention.rotary_emb.inv_freq"):
|
|
||||||
continue
|
|
||||||
|
|
||||||
# convert any unsupported data types to float32
|
|
||||||
if data.dtype != torch.float16 and data.dtype != torch.float32:
|
|
||||||
data = data.to(torch.float32)
|
|
||||||
|
|
||||||
data = data.squeeze().numpy()
|
|
||||||
|
|
||||||
# map tensor names
|
|
||||||
if name.endswith(".weight") and name[:-7] in tensor_map:
|
|
||||||
name = tensor_map[name[:-7]] + ".weight"
|
|
||||||
elif name.endswith(".bias") and name[:-5] in tensor_map:
|
|
||||||
name = tensor_map[name[:-5]] + ".bias"
|
|
||||||
else:
|
|
||||||
print( "Can not map tensor '" + name + "'" )
|
|
||||||
sys.exit()
|
|
||||||
|
|
||||||
n_dims = len(data.shape)
|
|
||||||
data_dtype = data.dtype
|
|
||||||
|
|
||||||
# if f32 desired, convert any float16 to float32
|
|
||||||
if ftype == 0 and data.dtype == np.float16:
|
|
||||||
data = data.astype(np.float32)
|
|
||||||
|
|
||||||
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
|
|
||||||
if ftype == 1 and data_dtype == np.float16 and n_dims == 1:
|
|
||||||
data = data.astype(np.float32)
|
|
||||||
|
|
||||||
# if f16 desired, convert any float32 2-dim weight tensors to float16
|
|
||||||
if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
|
||||||
data = data.astype(np.float16)
|
|
||||||
|
|
||||||
print( name + ", shape " + str(len(data.shape)) + ", " + str(old_dtype) + " --> " + str(data.dtype))
|
|
||||||
|
|
||||||
gguf_writer.write_tensor_to_file(data)
|
|
||||||
|
|
||||||
gguf_writer.close()
|
gguf_writer.close()
|
||||||
|
|
||||||
|
print("gguf: model successfully exported to '" + fname_out + "'")
|
||||||
print("gguf: model successfully exported to '" + fname_out + "'" )
|
|
||||||
print("")
|
print("")
|
@ -18,6 +18,7 @@ from sentencepiece import SentencePieceProcessor
|
|||||||
# compatible with python < 3.9
|
# compatible with python < 3.9
|
||||||
NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]'
|
NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]'
|
||||||
|
|
||||||
|
|
||||||
def count_model_parts(dir_model: str) -> int:
|
def count_model_parts(dir_model: str) -> int:
|
||||||
num_parts = 0
|
num_parts = 0
|
||||||
for filename in os.listdir(dir_model):
|
for filename in os.listdir(dir_model):
|
||||||
@ -28,10 +29,12 @@ def count_model_parts(dir_model: str) -> int:
|
|||||||
print("gguf: found " + str(num_parts) + " model parts")
|
print("gguf: found " + str(num_parts) + " model parts")
|
||||||
return num_parts
|
return num_parts
|
||||||
|
|
||||||
|
|
||||||
if len(sys.argv) < 3:
|
if len(sys.argv) < 3:
|
||||||
print("Usage: convert-h5-to-ggml.py dir-model ftype\n")
|
print("Usage: convert-h5-to-ggml.py dir-model ftype\n")
|
||||||
print(" ftype == 0 -> float32")
|
print(" ftype == 0 -> float32")
|
||||||
print(" ftype == 1 -> float16")
|
print(" ftype == 1 -> float16")
|
||||||
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
@ -43,7 +46,7 @@ last_dir = os.path.basename(os.path.normpath(dir_model))
|
|||||||
# possible tensor data types
|
# possible tensor data types
|
||||||
# ftype == 0 -> float32
|
# ftype == 0 -> float32
|
||||||
# ftype == 1 -> float16
|
# ftype == 1 -> float16
|
||||||
#
|
|
||||||
# map from ftype to string
|
# map from ftype to string
|
||||||
ftype_str = ["f32", "f16"]
|
ftype_str = ["f32", "f16"]
|
||||||
|
|
||||||
@ -52,6 +55,7 @@ if len(sys.argv) > 2:
|
|||||||
ftype = int(sys.argv[2])
|
ftype = int(sys.argv[2])
|
||||||
if ftype < 0 or ftype > 1:
|
if ftype < 0 or ftype > 1:
|
||||||
print("Invalid ftype: " + str(ftype))
|
print("Invalid ftype: " + str(ftype))
|
||||||
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf"
|
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf"
|
||||||
@ -70,14 +74,14 @@ num_parts = count_model_parts(dir_model)
|
|||||||
|
|
||||||
if num_parts > 1:
|
if num_parts > 1:
|
||||||
print("gguf: Only models with a single datafile are supported.")
|
print("gguf: Only models with a single datafile are supported.")
|
||||||
sys.exit()
|
|
||||||
|
|
||||||
gguf_writer = gguf.GGUFWriter.open(fname_out)
|
sys.exit()
|
||||||
|
llm_arch = "llama"
|
||||||
|
gguf_writer = gguf.GGUFWriter(fname_out, arch=llm_arch)
|
||||||
|
|
||||||
|
|
||||||
print("gguf: get model metadata")
|
print("gguf: get model metadata")
|
||||||
|
|
||||||
llm_arch = "llama"
|
|
||||||
block_count = hparams["num_hidden_layers"]
|
block_count = hparams["num_hidden_layers"]
|
||||||
head_count = hparams["num_attention_heads"]
|
head_count = hparams["num_attention_heads"]
|
||||||
|
|
||||||
@ -89,21 +93,20 @@ else:
|
|||||||
if "_name_or_path" in hparams:
|
if "_name_or_path" in hparams:
|
||||||
hf_repo = hparams["_name_or_path"]
|
hf_repo = hparams["_name_or_path"]
|
||||||
else:
|
else:
|
||||||
hf_repo=""
|
hf_repo = ""
|
||||||
|
|
||||||
gguf_writer.add_architecture(llm_arch)
|
gguf_writer.add_architecture()
|
||||||
gguf_writer.add_name(last_dir)
|
gguf_writer.add_name(last_dir)
|
||||||
gguf_writer.add_file_type( "All tensors F32" if ftype == 0 else "Most tensors F16, some F32")
|
|
||||||
gguf_writer.add_source_hf_repo(hf_repo)
|
gguf_writer.add_source_hf_repo(hf_repo)
|
||||||
gguf_writer.add_tensor_data_layout(llm_arch, "Meta AI original pth")
|
gguf_writer.add_tensor_data_layout("Meta AI original pth")
|
||||||
gguf_writer.add_context_length(llm_arch, hparams["max_position_embeddings"])
|
gguf_writer.add_context_length(hparams["max_position_embeddings"])
|
||||||
gguf_writer.add_embedding_length(llm_arch, hparams["hidden_size"])
|
gguf_writer.add_embedding_length(hparams["hidden_size"])
|
||||||
gguf_writer.add_block_count(llm_arch, block_count)
|
gguf_writer.add_block_count(block_count)
|
||||||
gguf_writer.add_feed_forward_length(llm_arch, hparams["intermediate_size"])
|
gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
|
||||||
gguf_writer.add_rope_dimension_count(llm_arch, hparams["hidden_size"] // hparams["num_attention_heads"])
|
gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"])
|
||||||
gguf_writer.add_head_count(llm_arch, head_count)
|
gguf_writer.add_head_count(head_count)
|
||||||
gguf_writer.add_head_count_kv(llm_arch, head_count_kv)
|
gguf_writer.add_head_count_kv(head_count_kv)
|
||||||
gguf_writer.add_layer_norm_rms_eps(llm_arch, hparams["rms_norm_eps"])
|
gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
|
||||||
|
|
||||||
|
|
||||||
# TOKENIZATION
|
# TOKENIZATION
|
||||||
@ -125,19 +128,23 @@ if Path(dir_model + "/tokenizer.model").is_file():
|
|||||||
score: float
|
score: float
|
||||||
|
|
||||||
piece = tokenizer.id_to_piece(i)
|
piece = tokenizer.id_to_piece(i)
|
||||||
text = piece.encode("utf-8")
|
text = piece.encode("utf-8")
|
||||||
score = tokenizer.get_score(i)
|
score = tokenizer.get_score(i)
|
||||||
|
|
||||||
toktype = 1 # defualt to normal token type
|
toktype = 1 # defualt to normal token type
|
||||||
if tokenizer.is_unknown(i): toktype = 2
|
if tokenizer.is_unknown(i):
|
||||||
if tokenizer.is_control(i): toktype = 3
|
toktype = 2
|
||||||
|
if tokenizer.is_control(i):
|
||||||
|
toktype = 3
|
||||||
|
|
||||||
# TODO: How to determinate if a token is user defined?
|
# TODO: How to determinate if a token is user defined?
|
||||||
# ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
|
# ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
|
||||||
# if tokenizer.is_user_defined(i): toktype = 4
|
# if tokenizer.is_user_defined(i): toktype = 4
|
||||||
|
|
||||||
if tokenizer.is_unused(i): toktype = 5
|
if tokenizer.is_unused(i):
|
||||||
if tokenizer.is_byte(i): toktype = 6
|
toktype = 5
|
||||||
|
if tokenizer.is_byte(i):
|
||||||
|
toktype = 6
|
||||||
|
|
||||||
tokens.append(text)
|
tokens.append(text)
|
||||||
scores.append(score)
|
scores.append(score)
|
||||||
@ -193,10 +200,10 @@ tensor_map = gguf.get_tensor_name_map(block_count)
|
|||||||
# tensor info
|
# tensor info
|
||||||
print("gguf: get tensor metadata")
|
print("gguf: get tensor metadata")
|
||||||
|
|
||||||
part_names = ( f"consolidated.{n:02}.pth" for n in range(0, num_parts) )
|
part_names = (f"consolidated.{n:02}.pth" for n in range(0, num_parts))
|
||||||
|
|
||||||
for part_name in part_names:
|
for part_name in part_names:
|
||||||
print("gguf: loading model part '"+ part_name + "'")
|
print("gguf: loading model part '" + part_name + "'")
|
||||||
model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu")
|
model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu")
|
||||||
|
|
||||||
for name in model_part.keys():
|
for name in model_part.keys():
|
||||||
@ -218,11 +225,12 @@ for part_name in part_names:
|
|||||||
elif name.endswith(".bias") and name[:-5] in tensor_map:
|
elif name.endswith(".bias") and name[:-5] in tensor_map:
|
||||||
name = tensor_map[name[:-5]] + ".bias"
|
name = tensor_map[name[:-5]] + ".bias"
|
||||||
else:
|
else:
|
||||||
print( "Can not map tensor '" + name + "'" )
|
print("Can not map tensor '" + name + "'")
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
n_dims = len(data.shape)
|
n_dims = len(data.shape)
|
||||||
data_dtype = data.dtype
|
data_dtype = data.dtype
|
||||||
|
old_dtype = data_dtype
|
||||||
|
|
||||||
# if f32 desired, convert any float16 to float32
|
# if f32 desired, convert any float16 to float32
|
||||||
if ftype == 0 and data.dtype == np.float16:
|
if ftype == 0 and data.dtype == np.float16:
|
||||||
@ -236,69 +244,19 @@ for part_name in part_names:
|
|||||||
if ftype == 1 and data.dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
if ftype == 1 and data.dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
||||||
data_dtype = np.float16
|
data_dtype = np.float16
|
||||||
|
|
||||||
data_nbytes = data.size * 2 if data_dtype == np.float16 else data.size * 4
|
print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data_dtype))
|
||||||
|
|
||||||
gguf_writer.add_tensor_info(name, data.shape, data_dtype, data_nbytes)
|
data = data.astype(data_dtype)
|
||||||
|
|
||||||
|
gguf_writer.add_tensor(name, data)
|
||||||
|
|
||||||
|
|
||||||
print("gguf: write header")
|
print("gguf: write header")
|
||||||
gguf_writer.write_header_to_file()
|
gguf_writer.write_header_to_file()
|
||||||
print("gguf: write metadata")
|
print("gguf: write metadata")
|
||||||
gguf_writer.write_kv_data_to_file()
|
gguf_writer.write_kv_data_to_file()
|
||||||
print("gguf: write tensor metadata")
|
print("gguf: write tensors")
|
||||||
gguf_writer.write_ti_data_to_file()
|
gguf_writer.write_tensors_to_file()
|
||||||
|
|
||||||
# tensor data
|
|
||||||
print("gguf: convert and write tensor data")
|
|
||||||
|
|
||||||
part_names = ( f"consolidated.{n:02}.pth" for n in range(0, num_parts) )
|
|
||||||
|
|
||||||
for part_name in part_names:
|
|
||||||
print("gguf: loading model part '"+ part_name + "'")
|
|
||||||
model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu")
|
|
||||||
|
|
||||||
for name in model_part.keys():
|
|
||||||
data = model_part[name]
|
|
||||||
|
|
||||||
old_dtype = data.dtype
|
|
||||||
|
|
||||||
# we don't need these
|
|
||||||
if name == "rope.freqs":
|
|
||||||
continue
|
|
||||||
|
|
||||||
# convert any unsupported data types to float32
|
|
||||||
if data.dtype != torch.float16 and data.dtype != torch.float32:
|
|
||||||
data = data.to(torch.float32)
|
|
||||||
|
|
||||||
data = data.squeeze().numpy()
|
|
||||||
|
|
||||||
# map tensor names
|
|
||||||
if name.endswith(".weight") and name[:-7] in tensor_map:
|
|
||||||
name = tensor_map[name[:-7]] + ".weight"
|
|
||||||
elif name.endswith(".bias") and name[:-5] in tensor_map:
|
|
||||||
name = tensor_map[name[:-5]] + ".bias"
|
|
||||||
else:
|
|
||||||
print( "Can not map tensor '" + name + "'" )
|
|
||||||
sys.exit()
|
|
||||||
|
|
||||||
n_dims = len(data.shape)
|
|
||||||
data_dtype = data.dtype
|
|
||||||
|
|
||||||
# if f32 desired, convert any float16 to float32
|
|
||||||
if ftype == 0 and data.dtype == np.float16:
|
|
||||||
data = data.astype(np.float32)
|
|
||||||
|
|
||||||
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
|
|
||||||
if ftype == 1 and data_dtype == np.float16 and n_dims == 1:
|
|
||||||
data = data.astype(np.float32)
|
|
||||||
|
|
||||||
# if f16 desired, convert any float32 2-dim weight tensors to float16
|
|
||||||
if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
|
||||||
data = data.astype(np.float16)
|
|
||||||
|
|
||||||
print( name + ", shape " + str(len(data.shape)) + ", " + str(old_dtype) + " --> " + str(data.dtype))
|
|
||||||
|
|
||||||
gguf_writer.write_tensor_data(data)
|
|
||||||
|
|
||||||
gguf_writer.close()
|
gguf_writer.close()
|
||||||
|
|
||||||
|
@ -18,26 +18,35 @@ NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]'
|
|||||||
|
|
||||||
# reverse HF permute back to original pth layout
|
# reverse HF permute back to original pth layout
|
||||||
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py
|
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py
|
||||||
|
|
||||||
|
|
||||||
def reverse_hf_permute(weights: NDArray, n_head: int, n_kv_head: Optional[int] = None) -> NDArray:
|
def reverse_hf_permute(weights: NDArray, n_head: int, n_kv_head: Optional[int] = None) -> NDArray:
|
||||||
if n_kv_head is not None and n_head != n_kv_head: n_head //= n_kv_head
|
if n_kv_head is not None and n_head != n_kv_head:
|
||||||
|
n_head //= n_kv_head
|
||||||
|
|
||||||
return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
|
return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
|
||||||
.swapaxes(1, 2)
|
.swapaxes(1, 2)
|
||||||
.reshape(weights.shape))
|
.reshape(weights.shape))
|
||||||
|
|
||||||
|
|
||||||
def count_model_parts(dir_model: str) -> int:
|
def count_model_parts(dir_model: str) -> int:
|
||||||
num_parts = 0
|
num_parts = 0
|
||||||
|
|
||||||
for filename in os.listdir(dir_model):
|
for filename in os.listdir(dir_model):
|
||||||
if filename.startswith("pytorch_model-"):
|
if filename.startswith("pytorch_model-"):
|
||||||
num_parts += 1
|
num_parts += 1
|
||||||
|
|
||||||
if num_parts > 0:
|
if num_parts > 0:
|
||||||
print("gguf: found " + str(num_parts) + " model parts")
|
print("gguf: found " + str(num_parts) + " model parts")
|
||||||
|
|
||||||
return num_parts
|
return num_parts
|
||||||
|
|
||||||
|
|
||||||
if len(sys.argv) < 3:
|
if len(sys.argv) < 3:
|
||||||
print("Usage: convert-h5-to-ggml.py dir-model ftype\n")
|
print("Usage: convert-h5-to-ggml.py dir-model ftype\n")
|
||||||
print(" ftype == 0 -> float32")
|
print(" ftype == 0 -> float32")
|
||||||
print(" ftype == 1 -> float16")
|
print(" ftype == 1 -> float16")
|
||||||
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
@ -49,7 +58,8 @@ last_dir = os.path.basename(os.path.normpath(dir_model))
|
|||||||
# possible tensor data types
|
# possible tensor data types
|
||||||
# ftype == 0 -> float32
|
# ftype == 0 -> float32
|
||||||
# ftype == 1 -> float16
|
# ftype == 1 -> float16
|
||||||
#
|
|
||||||
|
|
||||||
# map from ftype to string
|
# map from ftype to string
|
||||||
ftype_str = ["f32", "f16"]
|
ftype_str = ["f32", "f16"]
|
||||||
|
|
||||||
@ -58,6 +68,7 @@ if len(sys.argv) > 2:
|
|||||||
ftype = int(sys.argv[2])
|
ftype = int(sys.argv[2])
|
||||||
if ftype < 0 or ftype > 1:
|
if ftype < 0 or ftype > 1:
|
||||||
print("Invalid ftype: " + str(ftype))
|
print("Invalid ftype: " + str(ftype))
|
||||||
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf"
|
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf"
|
||||||
@ -69,17 +80,17 @@ with open(dir_model + "/config.json", "r", encoding="utf-8") as f:
|
|||||||
|
|
||||||
if hparams["architectures"][0] != "LlamaForCausalLM":
|
if hparams["architectures"][0] != "LlamaForCausalLM":
|
||||||
print("Model architecture not supported: " + hparams["architectures"][0])
|
print("Model architecture not supported: " + hparams["architectures"][0])
|
||||||
|
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
# get number of model parts
|
# get number of model parts
|
||||||
num_parts = count_model_parts(dir_model)
|
num_parts = count_model_parts(dir_model)
|
||||||
|
|
||||||
gguf_writer = gguf.GGUFWriter.open(fname_out)
|
gguf_writer = gguf.GGUFWriter(fname_out, arch="llama")
|
||||||
|
|
||||||
|
|
||||||
print("gguf: get model metadata")
|
print("gguf: get model metadata")
|
||||||
|
|
||||||
llm_arch = "llama"
|
|
||||||
block_count = hparams["num_hidden_layers"]
|
block_count = hparams["num_hidden_layers"]
|
||||||
head_count = hparams["num_attention_heads"]
|
head_count = hparams["num_attention_heads"]
|
||||||
|
|
||||||
@ -91,7 +102,7 @@ else:
|
|||||||
if "_name_or_path" in hparams:
|
if "_name_or_path" in hparams:
|
||||||
hf_repo = hparams["_name_or_path"]
|
hf_repo = hparams["_name_or_path"]
|
||||||
else:
|
else:
|
||||||
hf_repo=""
|
hf_repo = ""
|
||||||
|
|
||||||
if "max_sequence_length" in hparams:
|
if "max_sequence_length" in hparams:
|
||||||
ctx_length = hparams["max_sequence_length"]
|
ctx_length = hparams["max_sequence_length"]
|
||||||
@ -99,22 +110,22 @@ elif "max_position_embeddings" in hparams:
|
|||||||
ctx_length = hparams["max_position_embeddings"]
|
ctx_length = hparams["max_position_embeddings"]
|
||||||
else:
|
else:
|
||||||
print("gguf: can not find ctx length parameter.")
|
print("gguf: can not find ctx length parameter.")
|
||||||
|
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
|
|
||||||
gguf_writer.add_architecture(llm_arch)
|
gguf_writer.add_architecture()
|
||||||
gguf_writer.add_name(last_dir)
|
gguf_writer.add_name(last_dir)
|
||||||
gguf_writer.add_file_type("All tensors F32" if ftype == 0 else "Most tensors F16, some F32")
|
|
||||||
gguf_writer.add_source_hf_repo(hf_repo)
|
gguf_writer.add_source_hf_repo(hf_repo)
|
||||||
gguf_writer.add_tensor_data_layout(llm_arch, "Meta AI original pth")
|
gguf_writer.add_tensor_data_layout("Meta AI original pth")
|
||||||
gguf_writer.add_context_length(llm_arch, ctx_length)
|
gguf_writer.add_context_length(ctx_length)
|
||||||
gguf_writer.add_embedding_length(llm_arch, hparams["hidden_size"])
|
gguf_writer.add_embedding_length(hparams["hidden_size"])
|
||||||
gguf_writer.add_block_count(llm_arch, block_count)
|
gguf_writer.add_block_count(block_count)
|
||||||
gguf_writer.add_feed_forward_length(llm_arch, hparams["intermediate_size"])
|
gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
|
||||||
gguf_writer.add_rope_dimension_count(llm_arch, hparams["hidden_size"] // hparams["num_attention_heads"])
|
gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"])
|
||||||
gguf_writer.add_head_count(llm_arch, head_count)
|
gguf_writer.add_head_count(head_count)
|
||||||
gguf_writer.add_head_count_kv(llm_arch, head_count_kv)
|
gguf_writer.add_head_count_kv(head_count_kv)
|
||||||
gguf_writer.add_layer_norm_rms_eps(llm_arch, hparams["rms_norm_eps"])
|
gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
|
||||||
|
|
||||||
|
|
||||||
# TOKENIZATION
|
# TOKENIZATION
|
||||||
@ -136,19 +147,23 @@ if Path(dir_model + "/tokenizer.model").is_file():
|
|||||||
score: float
|
score: float
|
||||||
|
|
||||||
piece = tokenizer.id_to_piece(i)
|
piece = tokenizer.id_to_piece(i)
|
||||||
text = piece.encode("utf-8")
|
text = piece.encode("utf-8")
|
||||||
score = tokenizer.get_score(i)
|
score = tokenizer.get_score(i)
|
||||||
|
|
||||||
toktype = 1 # defualt to normal token type
|
toktype = 1 # defualt to normal token type
|
||||||
if tokenizer.is_unknown(i): toktype = 2
|
if tokenizer.is_unknown(i):
|
||||||
if tokenizer.is_control(i): toktype = 3
|
toktype = 2
|
||||||
|
if tokenizer.is_control(i):
|
||||||
|
toktype = 3
|
||||||
|
|
||||||
# TODO: How to determinate if a token is user defined?
|
# TODO: How to determinate if a token is user defined?
|
||||||
# ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
|
# ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
|
||||||
# if tokenizer.is_user_defined(i): toktype = 4
|
# if tokenizer.is_user_defined(i): toktype = 4
|
||||||
|
|
||||||
if tokenizer.is_unused(i): toktype = 5
|
if tokenizer.is_unused(i):
|
||||||
if tokenizer.is_byte(i): toktype = 6
|
toktype = 5
|
||||||
|
if tokenizer.is_byte(i):
|
||||||
|
toktype = 6
|
||||||
|
|
||||||
tokens.append(text)
|
tokens.append(text)
|
||||||
scores.append(score)
|
scores.append(score)
|
||||||
@ -212,7 +227,7 @@ else:
|
|||||||
)
|
)
|
||||||
|
|
||||||
for part_name in part_names:
|
for part_name in part_names:
|
||||||
print("gguf: loading model part '"+ part_name + "'")
|
print("gguf: loading model part '" + part_name + "'")
|
||||||
model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu")
|
model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu")
|
||||||
|
|
||||||
for name in model_part.keys():
|
for name in model_part.keys():
|
||||||
@ -238,11 +253,13 @@ for part_name in part_names:
|
|||||||
elif name.endswith(".bias") and name[:-5] in tensor_map:
|
elif name.endswith(".bias") and name[:-5] in tensor_map:
|
||||||
name = tensor_map[name[:-5]] + ".bias"
|
name = tensor_map[name[:-5]] + ".bias"
|
||||||
else:
|
else:
|
||||||
print( "Can not map tensor '" + name + "'" )
|
print("Can not map tensor '" + name + "'")
|
||||||
|
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
n_dims = len(data.shape)
|
n_dims = len(data.shape)
|
||||||
data_dtype = data.dtype
|
data_dtype = data.dtype
|
||||||
|
old_dtype = data_dtype
|
||||||
|
|
||||||
# if f32 desired, convert any float16 to float32
|
# if f32 desired, convert any float16 to float32
|
||||||
if ftype == 0 and data.dtype == np.float16:
|
if ftype == 0 and data.dtype == np.float16:
|
||||||
@ -256,78 +273,19 @@ for part_name in part_names:
|
|||||||
if ftype == 1 and data.dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
if ftype == 1 and data.dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
||||||
data_dtype = np.float16
|
data_dtype = np.float16
|
||||||
|
|
||||||
data_nbytes = data.size * 2 if data_dtype == np.float16 else data.size * 4
|
data = data.astype(data_dtype)
|
||||||
|
|
||||||
gguf_writer.add_tensor_info(name, data.shape, data_dtype, data_nbytes)
|
print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype))
|
||||||
|
|
||||||
|
gguf_writer.add_tensor(name, data)
|
||||||
|
|
||||||
|
|
||||||
print("gguf: write header")
|
print("gguf: write header")
|
||||||
gguf_writer.write_header_to_file()
|
gguf_writer.write_header_to_file()
|
||||||
print("gguf: write metadata")
|
print("gguf: write metadata")
|
||||||
gguf_writer.write_kv_data_to_file()
|
gguf_writer.write_kv_data_to_file()
|
||||||
print("gguf: write tensor metadata")
|
print("gguf: write tensors")
|
||||||
gguf_writer.write_ti_data_to_file()
|
gguf_writer.write_tensors_to_file()
|
||||||
|
|
||||||
# tensor data
|
|
||||||
print("gguf: convert and write tensor data")
|
|
||||||
|
|
||||||
if num_parts == 0:
|
|
||||||
part_names = ("pytorch_model.bin",)
|
|
||||||
else:
|
|
||||||
part_names = (
|
|
||||||
f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1)
|
|
||||||
)
|
|
||||||
|
|
||||||
for part_name in part_names:
|
|
||||||
print("gguf: loading model part '"+ part_name + "'")
|
|
||||||
model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu")
|
|
||||||
|
|
||||||
for name in model_part.keys():
|
|
||||||
data = model_part[name]
|
|
||||||
|
|
||||||
old_dtype = data.dtype
|
|
||||||
|
|
||||||
# we don't need these
|
|
||||||
if name.endswith(".rotary_emb.inv_freq"):
|
|
||||||
continue
|
|
||||||
|
|
||||||
# convert any unsupported data types to float32
|
|
||||||
if data.dtype != torch.float16 and data.dtype != torch.float32:
|
|
||||||
data = data.to(torch.float32)
|
|
||||||
|
|
||||||
data = data.squeeze().numpy()
|
|
||||||
|
|
||||||
# reverse permute these
|
|
||||||
if name.endswith(".q_proj.weight") or name.endswith(".k_proj.weight"):
|
|
||||||
data = reverse_hf_permute(data, head_count, head_count_kv)
|
|
||||||
|
|
||||||
# map tensor names
|
|
||||||
if name.endswith(".weight") and name[:-7] in tensor_map:
|
|
||||||
name = tensor_map[name[:-7]] + ".weight"
|
|
||||||
elif name.endswith(".bias") and name[:-5] in tensor_map:
|
|
||||||
name = tensor_map[name[:-5]] + ".bias"
|
|
||||||
else:
|
|
||||||
print( "Can not map tensor '" + name + "'" )
|
|
||||||
sys.exit()
|
|
||||||
|
|
||||||
n_dims = len(data.shape)
|
|
||||||
data_dtype = data.dtype
|
|
||||||
|
|
||||||
# if f32 desired, convert any float16 to float32
|
|
||||||
if ftype == 0 and data.dtype == np.float16:
|
|
||||||
data = data.astype(np.float32)
|
|
||||||
|
|
||||||
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
|
|
||||||
if ftype == 1 and data_dtype == np.float16 and n_dims == 1:
|
|
||||||
data = data.astype(np.float32)
|
|
||||||
|
|
||||||
# if f16 desired, convert any float32 2-dim weight tensors to float16
|
|
||||||
if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
|
||||||
data = data.astype(np.float16)
|
|
||||||
|
|
||||||
print(name + ", shape " + str(len(data.shape)) + ", " + str(old_dtype) + " --> " + str(data.dtype))
|
|
||||||
|
|
||||||
gguf_writer.write_tensor_to_file(data)
|
|
||||||
|
|
||||||
gguf_writer.close()
|
gguf_writer.close()
|
||||||
|
|
329
gguf.py
329
gguf.py
@ -1,11 +1,7 @@
|
|||||||
"""TODOs
|
import shutil
|
||||||
1. Implement writers for known architectures, LLaMA in particular.
|
|
||||||
2. Add docstrings from the format specs.
|
|
||||||
3. After development is done, Convert it to a proper pip-installable Python package, and possibly move it to its own repo under ggml-org.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import struct
|
import struct
|
||||||
|
import tempfile
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from enum import IntEnum, auto
|
from enum import IntEnum, auto
|
||||||
@ -27,30 +23,29 @@ KEY_GENERAL_NAME = "general.name"
|
|||||||
KEY_GENERAL_AUTHOR = "general.author"
|
KEY_GENERAL_AUTHOR = "general.author"
|
||||||
KEY_GENERAL_URL = "general.url"
|
KEY_GENERAL_URL = "general.url"
|
||||||
KEY_GENERAL_DESCRIPTION = "general.description"
|
KEY_GENERAL_DESCRIPTION = "general.description"
|
||||||
KEY_GENERAL_FILE_TYPE = "general.file_type"
|
|
||||||
KEY_GENERAL_LICENSE = "general.license"
|
KEY_GENERAL_LICENSE = "general.license"
|
||||||
KEY_GENERAL_SOURCE_URL = "general.source.url"
|
KEY_GENERAL_SOURCE_URL = "general.source.url"
|
||||||
KEY_GENERAL_SOURCE_HF_REPO = "general.source.hugginface.repository"
|
KEY_GENERAL_SOURCE_HF_REPO = "general.source.hugginface.repository"
|
||||||
|
|
||||||
# LLM
|
# LLM
|
||||||
KEY_LLM_CONTEXT_LENGTH = "{arch}.context_length"
|
KEY_LLM_CONTEXT_LENGTH = "{arch}.context_length"
|
||||||
KEY_LLM_EMBEDDING_LENGTH = "{arch}.embedding_length"
|
KEY_LLM_EMBEDDING_LENGTH = "{arch}.embedding_length"
|
||||||
KEY_LLM_BLOCK_COUNT = "{arch}.block_count"
|
KEY_LLM_BLOCK_COUNT = "{arch}.block_count"
|
||||||
KEY_LLM_FEED_FORWARD_LENGTH = "{arch}.feed_forward_length"
|
KEY_LLM_FEED_FORWARD_LENGTH = "{arch}.feed_forward_length"
|
||||||
KEY_LLM_USE_PARALLEL_RESIDUAL = "{arch}.use_parallel_residual"
|
KEY_LLM_USE_PARALLEL_RESIDUAL = "{arch}.use_parallel_residual"
|
||||||
KEY_LLM_TENSOR_DATA_LAYOUT = "{arch}.tensor_data_layout"
|
KEY_LLM_TENSOR_DATA_LAYOUT = "{arch}.tensor_data_layout"
|
||||||
|
|
||||||
# attention
|
# attention
|
||||||
KEY_ATTENTION_HEAD_COUNT = "{arch}.attention.head_count"
|
KEY_ATTENTION_HEAD_COUNT = "{arch}.attention.head_count"
|
||||||
KEY_ATTENTION_HEAD_COUNT_KV = "{arch}.attention.head_count_kv"
|
KEY_ATTENTION_HEAD_COUNT_KV = "{arch}.attention.head_count_kv"
|
||||||
KEY_ATTENTION_MAX_ALIBI_BIAS = "{arch}.attention.max_alibi_bias"
|
KEY_ATTENTION_MAX_ALIBI_BIAS = "{arch}.attention.max_alibi_bias"
|
||||||
KEY_ATTENTION_CLAMP_KQV = "{arch}.attention.clamp_kqv"
|
KEY_ATTENTION_CLAMP_KQV = "{arch}.attention.clamp_kqv"
|
||||||
KEY_ATTENTION_LAYERNORM_EPS = "{arch}.attention.layer_norm_epsilon"
|
KEY_ATTENTION_LAYERNORM_EPS = "{arch}.attention.layer_norm_epsilon"
|
||||||
KEY_ATTENTION_LAYERNORM_RMS_EPS = "{arch}.attention.layer_norm_rms_epsilon"
|
KEY_ATTENTION_LAYERNORM_RMS_EPS = "{arch}.attention.layer_norm_rms_epsilon"
|
||||||
|
|
||||||
# RoPE
|
# RoPE
|
||||||
KEY_ROPE_DIMENSION_COUNT = "{arch}.rope.dimension_count"
|
KEY_ROPE_DIMENSION_COUNT = "{arch}.rope.dimension_count"
|
||||||
KEY_ROPE_SCALE = "{arch}.rope.scale"
|
KEY_ROPE_SCALE = "{arch}.rope.scale"
|
||||||
|
|
||||||
# tokenization
|
# tokenization
|
||||||
KEY_TOKENIZER_MODEL = "tokenizer.ggml.model"
|
KEY_TOKENIZER_MODEL = "tokenizer.ggml.model"
|
||||||
@ -70,6 +65,7 @@ KEY_TOKENIZER_RWKV = "tokenizer.rwkv.world"
|
|||||||
# recommended mapping of model tensor names for storage in gguf
|
# recommended mapping of model tensor names for storage in gguf
|
||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
class MODEL_ARCH(IntEnum):
|
class MODEL_ARCH(IntEnum):
|
||||||
LLAMA = auto()
|
LLAMA = auto()
|
||||||
FALCON = auto()
|
FALCON = auto()
|
||||||
@ -78,81 +74,84 @@ class MODEL_ARCH(IntEnum):
|
|||||||
GPTNEOX = auto()
|
GPTNEOX = auto()
|
||||||
MPT = auto()
|
MPT = auto()
|
||||||
|
|
||||||
|
|
||||||
class MODEL_TENSOR(IntEnum):
|
class MODEL_TENSOR(IntEnum):
|
||||||
TOKEN_EMBD = auto()
|
TOKEN_EMBD = auto()
|
||||||
POS_EMBD = auto()
|
POS_EMBD = auto()
|
||||||
OUTPUT = auto()
|
OUTPUT = auto()
|
||||||
OUTPUT_NORM = auto()
|
OUTPUT_NORM = auto()
|
||||||
ROPE_FREQS = auto()
|
ROPE_FREQS = auto()
|
||||||
ATTN_Q = auto()
|
ATTN_Q = auto()
|
||||||
ATTN_K = auto()
|
ATTN_K = auto()
|
||||||
ATTN_V = auto()
|
ATTN_V = auto()
|
||||||
ATTN_QKV = auto()
|
ATTN_QKV = auto()
|
||||||
ATTN_OUT = auto()
|
ATTN_OUT = auto()
|
||||||
ATTN_NORM = auto()
|
ATTN_NORM = auto()
|
||||||
ATTN_NORM_2 = auto()
|
ATTN_NORM_2 = auto()
|
||||||
ATTN_ROT_EMBD = auto()
|
ATTN_ROT_EMBD = auto()
|
||||||
FFN_GATE = auto()
|
FFN_GATE = auto()
|
||||||
FFN_DOWN = auto()
|
FFN_DOWN = auto()
|
||||||
FFN_UP = auto()
|
FFN_UP = auto()
|
||||||
FFN_NORM = auto()
|
FFN_NORM = auto()
|
||||||
|
|
||||||
|
|
||||||
MODEL_ARCH_NAMES = {
|
MODEL_ARCH_NAMES = {
|
||||||
MODEL_ARCH.LLAMA : "llama",
|
MODEL_ARCH.LLAMA: "llama",
|
||||||
MODEL_ARCH.FALCON : "falcon",
|
MODEL_ARCH.FALCON: "falcon",
|
||||||
MODEL_ARCH.GPT2 : "gpt2",
|
MODEL_ARCH.GPT2: "gpt2",
|
||||||
MODEL_ARCH.GPTJ : "gptj",
|
MODEL_ARCH.GPTJ: "gptj",
|
||||||
MODEL_ARCH.GPTNEOX : "gptneox",
|
MODEL_ARCH.GPTNEOX: "gptneox",
|
||||||
MODEL_ARCH.MPT : "mpt",
|
MODEL_ARCH.MPT: "mpt",
|
||||||
}
|
}
|
||||||
|
|
||||||
MODEL_TENSOR_NAMES = {
|
MODEL_TENSOR_NAMES = {
|
||||||
MODEL_ARCH.LLAMA : {
|
MODEL_ARCH.LLAMA: {
|
||||||
MODEL_TENSOR.TOKEN_EMBD : "token_embd",
|
MODEL_TENSOR.TOKEN_EMBD: "token_embd",
|
||||||
MODEL_TENSOR.OUTPUT_NORM : "output_norm",
|
MODEL_TENSOR.OUTPUT_NORM: "output_norm",
|
||||||
MODEL_TENSOR.OUTPUT : "output",
|
MODEL_TENSOR.OUTPUT: "output",
|
||||||
MODEL_TENSOR.ROPE_FREQS : "rope_freqs",
|
MODEL_TENSOR.ROPE_FREQS: "rope_freqs",
|
||||||
MODEL_TENSOR.ATTN_NORM : "blk.{bid}.attn_norm",
|
MODEL_TENSOR.ATTN_NORM: "blk.{bid}.attn_norm",
|
||||||
MODEL_TENSOR.ATTN_Q : "blk.{bid}.attn_q",
|
MODEL_TENSOR.ATTN_Q: "blk.{bid}.attn_q",
|
||||||
MODEL_TENSOR.ATTN_K : "blk.{bid}.attn_k",
|
MODEL_TENSOR.ATTN_K: "blk.{bid}.attn_k",
|
||||||
MODEL_TENSOR.ATTN_V : "blk.{bid}.attn_v",
|
MODEL_TENSOR.ATTN_V: "blk.{bid}.attn_v",
|
||||||
MODEL_TENSOR.ATTN_OUT : "blk.{bid}.attn_output",
|
MODEL_TENSOR.ATTN_OUT: "blk.{bid}.attn_output",
|
||||||
MODEL_TENSOR.ATTN_ROT_EMBD : "blk.{bid}.attn_rot_embd",
|
MODEL_TENSOR.ATTN_ROT_EMBD: "blk.{bid}.attn_rot_embd",
|
||||||
MODEL_TENSOR.FFN_NORM : "blk.{bid}.ffn_norm",
|
MODEL_TENSOR.FFN_NORM: "blk.{bid}.ffn_norm",
|
||||||
MODEL_TENSOR.FFN_GATE : "blk.{bid}.ffn_gate",
|
MODEL_TENSOR.FFN_GATE: "blk.{bid}.ffn_gate",
|
||||||
MODEL_TENSOR.FFN_DOWN : "blk.{bid}.ffn_down",
|
MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down",
|
||||||
MODEL_TENSOR.FFN_UP : "blk.{bid}.ffn_up",
|
MODEL_TENSOR.FFN_UP: "blk.{bid}.ffn_up",
|
||||||
},
|
},
|
||||||
MODEL_ARCH.FALCON : {
|
MODEL_ARCH.FALCON: {
|
||||||
MODEL_TENSOR.TOKEN_EMBD : "token_embd",
|
MODEL_TENSOR.TOKEN_EMBD: "token_embd",
|
||||||
MODEL_TENSOR.OUTPUT_NORM : "output_norm",
|
MODEL_TENSOR.OUTPUT_NORM: "output_norm",
|
||||||
MODEL_TENSOR.OUTPUT : "output",
|
MODEL_TENSOR.OUTPUT: "output",
|
||||||
MODEL_TENSOR.ATTN_NORM : "blk.{bid}.attn_norm",
|
MODEL_TENSOR.ATTN_NORM: "blk.{bid}.attn_norm",
|
||||||
MODEL_TENSOR.ATTN_NORM_2 : "blk.{bid}.attn_norm_2",
|
MODEL_TENSOR.ATTN_NORM_2: "blk.{bid}.attn_norm_2",
|
||||||
MODEL_TENSOR.ATTN_QKV : "blk.{bid}.attn_qkv",
|
MODEL_TENSOR.ATTN_QKV: "blk.{bid}.attn_qkv",
|
||||||
MODEL_TENSOR.ATTN_OUT : "blk.{bid}.attn_output",
|
MODEL_TENSOR.ATTN_OUT: "blk.{bid}.attn_output",
|
||||||
MODEL_TENSOR.FFN_DOWN : "blk.{bid}.ffn_down",
|
MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down",
|
||||||
MODEL_TENSOR.FFN_UP : "blk.{bid}.ffn_up",
|
MODEL_TENSOR.FFN_UP: "blk.{bid}.ffn_up",
|
||||||
},
|
},
|
||||||
MODEL_ARCH.GPT2 : {
|
MODEL_ARCH.GPT2: {
|
||||||
# TODO
|
# TODO
|
||||||
},
|
},
|
||||||
# TODO
|
# TODO
|
||||||
}
|
}
|
||||||
|
|
||||||
# tensors that will not be serialized
|
# tensors that will not be serialized
|
||||||
MODEL_TENSOR_SKIP = {
|
MODEL_TENSOR_SKIP = {
|
||||||
MODEL_ARCH.LLAMA : [
|
MODEL_ARCH.LLAMA: [
|
||||||
MODEL_TENSOR.ROPE_FREQS,
|
MODEL_TENSOR.ROPE_FREQS,
|
||||||
MODEL_TENSOR.ATTN_ROT_EMBD,
|
MODEL_TENSOR.ATTN_ROT_EMBD,
|
||||||
],
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# TODO: the following helper functions should be removed
|
# TODO: the following helper functions should be removed
|
||||||
# instead, get_tensor_name_map should return tuples of (name, MODEL_TENSOR)
|
# instead, get_tensor_name_map should return tuples of (name, MODEL_TENSOR)
|
||||||
# however, my Python is very bad, and I couldn't figure out how to do this, hence these functions
|
# however, my Python is very bad, and I couldn't figure out how to do this, hence these functions
|
||||||
# REMOVE
|
# REMOVE
|
||||||
def should_skip_tensor_TMP(arch : MODEL_ARCH, n_blocks : int, name : str) -> bool:
|
def should_skip_tensor_TMP(arch: MODEL_ARCH, n_blocks: int, name: str) -> bool:
|
||||||
for skip in MODEL_TENSOR_SKIP.get(arch, []):
|
for skip in MODEL_TENSOR_SKIP.get(arch, []):
|
||||||
for i in range(n_blocks):
|
for i in range(n_blocks):
|
||||||
if name == MODEL_TENSOR_NAMES[arch][skip].format(bid=i):
|
if name == MODEL_TENSOR_NAMES[arch][skip].format(bid=i):
|
||||||
@ -160,151 +159,152 @@ def should_skip_tensor_TMP(arch : MODEL_ARCH, n_blocks : int, name : str) -> boo
|
|||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def get_tensor_name_map(arch : MODEL_ARCH, n_blocks : int) -> dict:
|
|
||||||
|
def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> dict:
|
||||||
tensor_map = {}
|
tensor_map = {}
|
||||||
|
|
||||||
# Token embeddings
|
# Token embeddings
|
||||||
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.TOKEN_EMBD, None)
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.TOKEN_EMBD, None)
|
||||||
|
|
||||||
tensor_map["gpt_neox.embed_in"] = mapped_to # gptneox
|
tensor_map["gpt_neox.embed_in"] = mapped_to # gptneox
|
||||||
tensor_map["transformer.wte"] = mapped_to # gpt2 mpt
|
tensor_map["transformer.wte"] = mapped_to # gpt2 mpt
|
||||||
tensor_map["transformer.word_embeddings"] = mapped_to # falcon
|
tensor_map["transformer.word_embeddings"] = mapped_to # falcon
|
||||||
tensor_map["model.embed_tokens"] = mapped_to # llama-hf
|
tensor_map["model.embed_tokens"] = mapped_to # llama-hf
|
||||||
tensor_map["tok_embeddings"] = mapped_to # llama-pth
|
tensor_map["tok_embeddings"] = mapped_to # llama-pth
|
||||||
|
|
||||||
# Position embeddings
|
# Position embeddings
|
||||||
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.POS_EMBD, None)
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.POS_EMBD, None)
|
||||||
|
|
||||||
tensor_map["transformer.wpe"] = mapped_to # gpt2
|
tensor_map["transformer.wpe"] = mapped_to # gpt2
|
||||||
|
|
||||||
# Output
|
# Output
|
||||||
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.OUTPUT, None)
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.OUTPUT, None)
|
||||||
|
|
||||||
tensor_map["embed_out"] = mapped_to # gptneox
|
tensor_map["embed_out"] = mapped_to # gptneox
|
||||||
tensor_map["lm_head"] = mapped_to # gpt2 mpt falcon llama-hf
|
tensor_map["lm_head"] = mapped_to # gpt2 mpt falcon llama-hf
|
||||||
tensor_map["output"] = mapped_to # llama-pth
|
tensor_map["output"] = mapped_to # llama-pth
|
||||||
|
|
||||||
# Output norm
|
# Output norm
|
||||||
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.OUTPUT_NORM, None)
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.OUTPUT_NORM, None)
|
||||||
|
|
||||||
tensor_map["gpt_neox.final_layer_norm"] = mapped_to # gptneox
|
tensor_map["gpt_neox.final_layer_norm"] = mapped_to # gptneox
|
||||||
tensor_map["transformer.ln_f"] = mapped_to # gpt2 falcon
|
tensor_map["transformer.ln_f"] = mapped_to # gpt2 falcon
|
||||||
tensor_map["transformer.norm_f"] = mapped_to # mpt
|
tensor_map["transformer.norm_f"] = mapped_to # mpt
|
||||||
tensor_map["model.norm"] = mapped_to # llama-hf
|
tensor_map["model.norm"] = mapped_to # llama-hf
|
||||||
tensor_map["norm"] = mapped_to # llama-pth
|
tensor_map["norm"] = mapped_to # llama-pth
|
||||||
|
|
||||||
# Rope frequencies
|
# Rope frequencies
|
||||||
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ROPE_FREQS, None)
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ROPE_FREQS, None)
|
||||||
|
|
||||||
tensor_map["rope.freqs"] = mapped_to # llama-pth
|
tensor_map["rope.freqs"] = mapped_to # llama-pth
|
||||||
|
|
||||||
# Attention and feed-forward blocks
|
# Attention and feed-forward blocks
|
||||||
for i in range(0,n_blocks):
|
for i in range(0, n_blocks):
|
||||||
# Attention norm
|
# Attention norm
|
||||||
# TODO: is there are simpler way to write these 2 lines in Python?
|
# TODO: is there are simpler way to write these 2 lines in Python?
|
||||||
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_NORM, None)
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_NORM, None)
|
||||||
mapped_to = mapped_to.format(bid=i) if mapped_to else None
|
mapped_to = mapped_to.format(bid=i) if mapped_to else None
|
||||||
|
|
||||||
tensor_map["gpt_neox.layers."+str(i)+".input_layernorm"] = mapped_to # gptneox
|
tensor_map["gpt_neox.layers."+str(i)+".input_layernorm"] = mapped_to # gptneox
|
||||||
tensor_map["transformer.h."+str(i)+".ln_1"] = mapped_to # gpt2
|
tensor_map["transformer.h."+str(i)+".ln_1"] = mapped_to # gpt2
|
||||||
tensor_map["transformer.blocks."+str(i)+".norm_1"] = mapped_to # mpt
|
tensor_map["transformer.blocks."+str(i)+".norm_1"] = mapped_to # mpt
|
||||||
tensor_map["transformer.h."+str(i)+".input_layernorm"] = mapped_to # falcon7b
|
tensor_map["transformer.h."+str(i)+".input_layernorm"] = mapped_to # falcon7b
|
||||||
tensor_map["transformer.h."+str(i)+".ln_attn"] = mapped_to # falcon40b
|
tensor_map["transformer.h."+str(i)+".ln_attn"] = mapped_to # falcon40b
|
||||||
tensor_map["model.layers."+str(i)+".input_layernorm"] = mapped_to # llama-hf
|
tensor_map["model.layers."+str(i)+".input_layernorm"] = mapped_to # llama-hf
|
||||||
tensor_map["layers."+str(i)+".attention_norm"] = mapped_to # llama-pth
|
tensor_map["layers."+str(i)+".attention_norm"] = mapped_to # llama-pth
|
||||||
|
|
||||||
# Attention norm 2
|
# Attention norm 2
|
||||||
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_NORM_2, None)
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_NORM_2, None)
|
||||||
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
tensor_map["transformer.h."+str(i)+".ln_mlp"] = mapped_to # falcon40b
|
tensor_map["transformer.h."+str(i)+".ln_mlp"] = mapped_to # falcon40b
|
||||||
|
|
||||||
# Attention query-key-value
|
# Attention query-key-value
|
||||||
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_QKV, None)
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_QKV, None)
|
||||||
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
tensor_map["gpt_neox.layers."+str(i)+".attention.query_key_value"] = mapped_to # gptneox
|
tensor_map["gpt_neox.layers."+str(i)+".attention.query_key_value"] = mapped_to # gptneox
|
||||||
tensor_map["transformer.h."+str(i)+".attn.c_attn"] = mapped_to # gpt2
|
tensor_map["transformer.h."+str(i)+".attn.c_attn"] = mapped_to # gpt2
|
||||||
tensor_map["transformer.blocks."+str(i)+".attn.Wqkv"] = mapped_to # mpt
|
tensor_map["transformer.blocks."+str(i)+".attn.Wqkv"] = mapped_to # mpt
|
||||||
tensor_map["transformer.h."+str(i)+".self_attention.query_key_value"] = mapped_to # falcon
|
tensor_map["transformer.h."+str(i)+".self_attention.query_key_value"] = mapped_to # falcon
|
||||||
|
|
||||||
# Attention query
|
# Attention query
|
||||||
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_Q, None)
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_Q, None)
|
||||||
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
tensor_map["model.layers."+str(i)+".self_attn.q_proj"] = mapped_to # llama-hf
|
tensor_map["model.layers."+str(i)+".self_attn.q_proj"] = mapped_to # llama-hf
|
||||||
tensor_map["layers."+str(i)+".attention.wq"] = mapped_to # llama-pth
|
tensor_map["layers."+str(i)+".attention.wq"] = mapped_to # llama-pth
|
||||||
|
|
||||||
# Attention key
|
# Attention key
|
||||||
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_K, None)
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_K, None)
|
||||||
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
tensor_map["model.layers."+str(i)+".self_attn.k_proj"] = mapped_to # llama-hf
|
tensor_map["model.layers."+str(i)+".self_attn.k_proj"] = mapped_to # llama-hf
|
||||||
tensor_map["layers."+str(i)+".attention.wk"] = mapped_to # llama-pth
|
tensor_map["layers."+str(i)+".attention.wk"] = mapped_to # llama-pth
|
||||||
|
|
||||||
# Attention value
|
# Attention value
|
||||||
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_V, None)
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_V, None)
|
||||||
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
tensor_map["model.layers."+str(i)+".self_attn.v_proj"] = mapped_to # llama-hf
|
tensor_map["model.layers."+str(i)+".self_attn.v_proj"] = mapped_to # llama-hf
|
||||||
tensor_map["layers."+str(i)+".attention.wv"] = mapped_to # llama-pth
|
tensor_map["layers."+str(i)+".attention.wv"] = mapped_to # llama-pth
|
||||||
|
|
||||||
# Attention output
|
# Attention output
|
||||||
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_OUT, None)
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_OUT, None)
|
||||||
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
tensor_map["gpt_neox.layers."+str(i)+".attention.dense"] = mapped_to # gptneox
|
tensor_map["gpt_neox.layers."+str(i)+".attention.dense"] = mapped_to # gptneox
|
||||||
tensor_map["transformer.h."+str(i)+".attn.c_proj"] = mapped_to # gpt2
|
tensor_map["transformer.h."+str(i)+".attn.c_proj"] = mapped_to # gpt2
|
||||||
tensor_map["transformer.blocks."+str(i)+".attn.out_proj"] = mapped_to # mpt
|
tensor_map["transformer.blocks."+str(i)+".attn.out_proj"] = mapped_to # mpt
|
||||||
tensor_map["transformer.h."+str(i)+".self_attention.dense"] = mapped_to # falcon
|
tensor_map["transformer.h."+str(i)+".self_attention.dense"] = mapped_to # falcon
|
||||||
tensor_map["model.layers."+str(i)+".self_attn.o_proj"] = mapped_to # llama-hf
|
tensor_map["model.layers."+str(i)+".self_attn.o_proj"] = mapped_to # llama-hf
|
||||||
tensor_map["layers."+str(i)+".attention.wo"] = mapped_to # llama-pth
|
tensor_map["layers."+str(i)+".attention.wo"] = mapped_to # llama-pth
|
||||||
|
|
||||||
# Rotary embeddings
|
# Rotary embeddings
|
||||||
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_ROT_EMBD, None)
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_ROT_EMBD, None)
|
||||||
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
tensor_map["model.layers."+str(i)+".self_attn.rotary_emb.inv_freq"] = mapped_to # llama-hf
|
tensor_map["model.layers."+str(i)+".self_attn.rotary_emb.inv_freq"] = mapped_to # llama-hf
|
||||||
tensor_map["layers."+str(i)+".attention.inner_attention.rope.freqs"] = mapped_to # llama-pth
|
tensor_map["layers."+str(i)+".attention.inner_attention.rope.freqs"] = mapped_to # llama-pth
|
||||||
|
|
||||||
# Feed-forward norm
|
# Feed-forward norm
|
||||||
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_NORM, None)
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_NORM, None)
|
||||||
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
tensor_map["gpt_neox.layers."+str(i)+".post_attention_layernorm"] = mapped_to # gptneox
|
tensor_map["gpt_neox.layers."+str(i)+".post_attention_layernorm"] = mapped_to # gptneox
|
||||||
tensor_map["transformer.h."+str(i)+".ln_2"] = mapped_to # gpt2
|
tensor_map["transformer.h."+str(i)+".ln_2"] = mapped_to # gpt2
|
||||||
tensor_map["transformer.blocks."+str(i)+".norm_2"] = mapped_to # mpt
|
tensor_map["transformer.blocks."+str(i)+".norm_2"] = mapped_to # mpt
|
||||||
tensor_map["model.layers."+str(i)+".post_attention_layernorm"] = mapped_to # llama-hf
|
tensor_map["model.layers."+str(i)+".post_attention_layernorm"] = mapped_to # llama-hf
|
||||||
tensor_map["layers."+str(i)+".ffn_norm"] = mapped_to # llama-pth
|
tensor_map["layers."+str(i)+".ffn_norm"] = mapped_to # llama-pth
|
||||||
|
|
||||||
# Feed-forward up
|
# Feed-forward up
|
||||||
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_UP, None)
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_UP, None)
|
||||||
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
tensor_map["gpt_neox.layers."+str(i)+".mlp.dense_h_to_4h"] = mapped_to # gptneox
|
tensor_map["gpt_neox.layers."+str(i)+".mlp.dense_h_to_4h"] = mapped_to # gptneox
|
||||||
tensor_map["transformer.h."+str(i)+".mlp.c_fc"] = mapped_to # gpt2
|
tensor_map["transformer.h."+str(i)+".mlp.c_fc"] = mapped_to # gpt2
|
||||||
tensor_map["transformer.blocks."+str(i)+".ffn.up_proj"] = mapped_to # mpt
|
tensor_map["transformer.blocks."+str(i)+".ffn.up_proj"] = mapped_to # mpt
|
||||||
tensor_map["transformer.h."+str(i)+".mlp.dense_h_to_4h"] = mapped_to # falcon
|
tensor_map["transformer.h."+str(i)+".mlp.dense_h_to_4h"] = mapped_to # falcon
|
||||||
tensor_map["model.layers."+str(i)+".mlp.up_proj"] = mapped_to # llama-hf
|
tensor_map["model.layers."+str(i)+".mlp.up_proj"] = mapped_to # llama-hf
|
||||||
tensor_map["layers."+str(i)+".feed_forward.w3"] = mapped_to # llama-pth
|
tensor_map["layers."+str(i)+".feed_forward.w3"] = mapped_to # llama-pth
|
||||||
|
|
||||||
# Feed-forward gate
|
# Feed-forward gate
|
||||||
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_GATE, None)
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_GATE, None)
|
||||||
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
tensor_map["model.layers."+str(i)+".mlp.gate_proj"] = mapped_to # llama-hf
|
tensor_map["model.layers."+str(i)+".mlp.gate_proj"] = mapped_to # llama-hf
|
||||||
tensor_map["layers."+str(i)+".feed_forward.w1"] = mapped_to # llama-pth
|
tensor_map["layers."+str(i)+".feed_forward.w1"] = mapped_to # llama-pth
|
||||||
|
|
||||||
# Feed-forward down
|
# Feed-forward down
|
||||||
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_DOWN, None)
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_DOWN, None)
|
||||||
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
tensor_map["gpt_neox.layers."+str(i)+".mlp.dense_4h_to_h"] = mapped_to # gptneox
|
tensor_map["gpt_neox.layers."+str(i)+".mlp.dense_4h_to_h"] = mapped_to # gptneox
|
||||||
tensor_map["transformer.h."+str(i)+".mlp.c_proj"] = mapped_to # gpt2
|
tensor_map["transformer.h."+str(i)+".mlp.c_proj"] = mapped_to # gpt2
|
||||||
tensor_map["transformer.blocks."+str(i)+".ffn.down_proj"] = mapped_to # mpt
|
tensor_map["transformer.blocks."+str(i)+".ffn.down_proj"] = mapped_to # mpt
|
||||||
tensor_map["transformer.h."+str(i)+".mlp.dense_4h_to_h"] = mapped_to # falcon
|
tensor_map["transformer.h."+str(i)+".mlp.dense_4h_to_h"] = mapped_to # falcon
|
||||||
tensor_map["model.layers."+str(i)+".mlp.down_proj"] = mapped_to # llama-hf
|
tensor_map["model.layers."+str(i)+".mlp.down_proj"] = mapped_to # llama-hf
|
||||||
tensor_map["layers."+str(i)+".feed_forward.w2"] = mapped_to # llama-pth
|
tensor_map["layers."+str(i)+".feed_forward.w2"] = mapped_to # llama-pth
|
||||||
|
|
||||||
return tensor_map
|
return tensor_map
|
||||||
|
|
||||||
@ -312,6 +312,7 @@ def get_tensor_name_map(arch : MODEL_ARCH, n_blocks : int) -> dict:
|
|||||||
# implementation
|
# implementation
|
||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
class GGMLQuantizationType(IntEnum):
|
class GGMLQuantizationType(IntEnum):
|
||||||
F32 = 0
|
F32 = 0
|
||||||
F16 = 1
|
F16 = 1
|
||||||
@ -481,6 +482,19 @@ class GGUFWriter:
|
|||||||
self.offset_tensor += GGUFWriter.ggml_pad(tensor_nbytes, self.data_alignment)
|
self.offset_tensor += GGUFWriter.ggml_pad(tensor_nbytes, self.data_alignment)
|
||||||
self.ti_data_count += 1
|
self.ti_data_count += 1
|
||||||
|
|
||||||
|
def add_tensor(self, name: str, tensor: np.ndarray):
|
||||||
|
if not hasattr(self, "temp_file"):
|
||||||
|
self.temp_file = tempfile.SpooledTemporaryFile(mode="w+b", max_size=256*1024*1024)
|
||||||
|
self.temp_file.seek(0)
|
||||||
|
|
||||||
|
self.add_tensor_info(name, tensor.shape, tensor.dtype, tensor.nbytes)
|
||||||
|
|
||||||
|
tensor.tofile(self.temp_file)
|
||||||
|
|
||||||
|
pad = GGUFWriter.ggml_pad(tensor.nbytes, self.data_alignment) - tensor.nbytes
|
||||||
|
if pad != 0:
|
||||||
|
self.temp_file.write(bytes([0] * pad))
|
||||||
|
|
||||||
def write_tensor_data(self, tensor: np.ndarray):
|
def write_tensor_data(self, tensor: np.ndarray):
|
||||||
pad = GGUFWriter.ggml_pad(self.fout.tell(), self.data_alignment) - self.fout.tell()
|
pad = GGUFWriter.ggml_pad(self.fout.tell(), self.data_alignment) - self.fout.tell()
|
||||||
if pad != 0:
|
if pad != 0:
|
||||||
@ -492,6 +506,19 @@ class GGUFWriter:
|
|||||||
if pad != 0:
|
if pad != 0:
|
||||||
self.fout.write(bytes([0] * pad))
|
self.fout.write(bytes([0] * pad))
|
||||||
|
|
||||||
|
def write_tensors_to_file(self):
|
||||||
|
self.write_ti_data_to_file()
|
||||||
|
|
||||||
|
pad = GGUFWriter.ggml_pad(self.fout.tell(), self.data_alignment) - self.fout.tell()
|
||||||
|
if pad != 0:
|
||||||
|
self.fout.write(bytes([0] * pad))
|
||||||
|
|
||||||
|
self.temp_file.seek(0)
|
||||||
|
|
||||||
|
shutil.copyfileobj(self.temp_file, self.fout)
|
||||||
|
self.flush()
|
||||||
|
self.temp_file.close()
|
||||||
|
|
||||||
def flush(self):
|
def flush(self):
|
||||||
self.fout.flush()
|
self.fout.flush()
|
||||||
|
|
||||||
@ -513,9 +540,6 @@ class GGUFWriter:
|
|||||||
def add_description(self, description: str):
|
def add_description(self, description: str):
|
||||||
self.add_string(KEY_GENERAL_DESCRIPTION, description)
|
self.add_string(KEY_GENERAL_DESCRIPTION, description)
|
||||||
|
|
||||||
def add_file_type(self, file_type: str):
|
|
||||||
self.add_string(KEY_GENERAL_FILE_TYPE, file_type)
|
|
||||||
|
|
||||||
def add_source_url(self, url: str):
|
def add_source_url(self, url: str):
|
||||||
self.add_string(KEY_GENERAL_SOURCE_URL, url)
|
self.add_string(KEY_GENERAL_SOURCE_URL, url)
|
||||||
|
|
||||||
@ -618,23 +642,28 @@ class GGUFWriter:
|
|||||||
def add_pad_token_id(self, id: int):
|
def add_pad_token_id(self, id: int):
|
||||||
self.add_uint32(KEY_TOKENIZER_PAD_ID, id)
|
self.add_uint32(KEY_TOKENIZER_PAD_ID, id)
|
||||||
|
|
||||||
|
|
||||||
# Example usage:
|
# Example usage:
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
# Example usage with a file
|
# Example usage with a file
|
||||||
gguf_writer = GGUFWriter("example.gguf", "llama")
|
gguf_writer = GGUFWriter("example.gguf", "llama")
|
||||||
|
|
||||||
|
gguf_writer.add_architecture()
|
||||||
|
gguf_writer.add_block_count(12)
|
||||||
gguf_writer.add_uint32("answer", 42) # Write a 32-bit integer
|
gguf_writer.add_uint32("answer", 42) # Write a 32-bit integer
|
||||||
gguf_writer.add_float32("answer_in_float", 42.0) # Write a 32-bit float
|
gguf_writer.add_float32("answer_in_float", 42.0) # Write a 32-bit float
|
||||||
gguf_writer.add_custom_alignment(64)
|
gguf_writer.add_custom_alignment(64)
|
||||||
|
|
||||||
tensor1 = np.ones((32,), dtype=np.float32) * 100.0
|
tensor1 = np.ones((32,), dtype=np.float32) * 100.0
|
||||||
tensor2 = np.ones((32,), dtype=np.float32) * 101.0
|
tensor2 = np.ones((64,), dtype=np.float32) * 101.0
|
||||||
gguf_writer.add_tensor_info("tensor0", tensor1)
|
tensor3 = np.ones((96,), dtype=np.float32) * 102.0
|
||||||
gguf_writer.add_tensor_info("tensor1", tensor2)
|
|
||||||
|
gguf_writer.add_tensor("tensor1", tensor1)
|
||||||
|
gguf_writer.add_tensor("tensor2", tensor2)
|
||||||
|
gguf_writer.add_tensor("tensor3", tensor3)
|
||||||
|
|
||||||
gguf_writer.write_header_to_file()
|
gguf_writer.write_header_to_file()
|
||||||
gguf_writer.write_kv_data_to_file()
|
gguf_writer.write_kv_data_to_file()
|
||||||
gguf_writer.write_ti_data_to_file()
|
gguf_writer.write_tensors_to_file()
|
||||||
gguf_writer.write_tensor_data(tensor1)
|
|
||||||
gguf_writer.write_tensor_data(tensor2)
|
|
||||||
|
|
||||||
gguf_writer.close()
|
gguf_writer.close()
|
||||||
|
Loading…
Reference in New Issue
Block a user