mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-28 12:24:35 +00:00
gguf.py : write tensors in a single pass (#2644)
* gguf : single pass for writing tensors + refactoring writer * gguf : single pass for writing tensors + refactoring writer * gguf : single pass for writing tensors + refactoring writer * gguf : style fixes in simple conversion script * gguf : refactor gptneox conversion script * gguf : rename h5 to hf (for HuggingFace) * gguf : refactor pth to gguf conversion script * gguf : rm file_type key and method * gguf.py : fix vertical alignment * gguf.py : indentation --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
parent
5484737d58
commit
fc3a523211
@ -13,6 +13,8 @@ from pathlib import Path
|
|||||||
from transformers import AutoTokenizer
|
from transformers import AutoTokenizer
|
||||||
|
|
||||||
# ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py
|
# ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py
|
||||||
|
|
||||||
|
|
||||||
def bytes_to_unicode():
|
def bytes_to_unicode():
|
||||||
"""
|
"""
|
||||||
Returns list of utf-8 byte and a corresponding list of unicode strings.
|
Returns list of utf-8 byte and a corresponding list of unicode strings.
|
||||||
@ -34,6 +36,7 @@ def bytes_to_unicode():
|
|||||||
cs = [chr(n) for n in cs]
|
cs = [chr(n) for n in cs]
|
||||||
return dict(zip(bs, cs))
|
return dict(zip(bs, cs))
|
||||||
|
|
||||||
|
|
||||||
def count_model_parts(dir_model: str) -> int:
|
def count_model_parts(dir_model: str) -> int:
|
||||||
num_parts = 0
|
num_parts = 0
|
||||||
for filename in os.listdir(dir_model):
|
for filename in os.listdir(dir_model):
|
||||||
@ -44,6 +47,7 @@ def count_model_parts(dir_model: str) -> int:
|
|||||||
print("gguf: found " + str(num_parts) + " model parts")
|
print("gguf: found " + str(num_parts) + " model parts")
|
||||||
return num_parts
|
return num_parts
|
||||||
|
|
||||||
|
|
||||||
if len(sys.argv) < 3:
|
if len(sys.argv) < 3:
|
||||||
print("Usage: convert-h5-to-ggml.py dir-model ftype\n")
|
print("Usage: convert-h5-to-ggml.py dir-model ftype\n")
|
||||||
print(" ftype == 0 -> float32")
|
print(" ftype == 0 -> float32")
|
||||||
@ -58,7 +62,7 @@ last_dir = os.path.basename(os.path.normpath(dir_model))
|
|||||||
# possible tensor data types
|
# possible tensor data types
|
||||||
# ftype == 0 -> float32
|
# ftype == 0 -> float32
|
||||||
# ftype == 1 -> float16
|
# ftype == 1 -> float16
|
||||||
#
|
|
||||||
# map from ftype to string
|
# map from ftype to string
|
||||||
ftype_str = ["f32", "f16"]
|
ftype_str = ["f32", "f16"]
|
||||||
|
|
||||||
@ -67,6 +71,7 @@ if len(sys.argv) > 2:
|
|||||||
ftype = int(sys.argv[2])
|
ftype = int(sys.argv[2])
|
||||||
if ftype < 0 or ftype > 1:
|
if ftype < 0 or ftype > 1:
|
||||||
print("Invalid ftype: " + str(ftype))
|
print("Invalid ftype: " + str(ftype))
|
||||||
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf"
|
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf"
|
||||||
@ -78,29 +83,29 @@ with open(dir_model + "/config.json", "r", encoding="utf-8") as f:
|
|||||||
|
|
||||||
if hparams["architectures"][0] != "GPTNeoXForCausalLM":
|
if hparams["architectures"][0] != "GPTNeoXForCausalLM":
|
||||||
print("Model architecture not supported: " + hparams["architectures"][0])
|
print("Model architecture not supported: " + hparams["architectures"][0])
|
||||||
|
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
# get number of model parts
|
# get number of model parts
|
||||||
num_parts = count_model_parts(dir_model)
|
num_parts = count_model_parts(dir_model)
|
||||||
|
|
||||||
gguf_writer = gguf.GGUFWriter.open(fname_out)
|
llm_arch = "gptneox"
|
||||||
|
gguf_writer = gguf.GGUFWriter(fname_out, arch=llm_arch)
|
||||||
|
|
||||||
print("gguf: get model metadata")
|
print("gguf: get model metadata")
|
||||||
|
|
||||||
llm_arch = "gptneox"
|
|
||||||
block_count = hparams["num_hidden_layers"]
|
block_count = hparams["num_hidden_layers"]
|
||||||
|
|
||||||
gguf_writer.add_architecture(llm_arch)
|
gguf_writer.add_architecture()
|
||||||
gguf_writer.add_name(last_dir)
|
gguf_writer.add_name(last_dir)
|
||||||
gguf_writer.add_file_type( "All tensors F32" if ftype == 0 else "Most tensors F16, some F32")
|
gguf_writer.add_context_length(hparams["max_position_embeddings"])
|
||||||
gguf_writer.add_context_length(llm_arch, hparams["max_position_embeddings"])
|
gguf_writer.add_embedding_length(hparams["hidden_size"])
|
||||||
gguf_writer.add_embedding_length(llm_arch, hparams["hidden_size"])
|
gguf_writer.add_block_count(block_count)
|
||||||
gguf_writer.add_block_count(llm_arch, block_count)
|
gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
|
||||||
gguf_writer.add_feed_forward_length(llm_arch, hparams["intermediate_size"])
|
gguf_writer.add_rope_dimension_count(int(hparams["rotary_pct"]*(hparams["hidden_size"]//hparams["num_attention_heads"])))
|
||||||
gguf_writer.add_rope_dimension_count(llm_arch, int( hparams["rotary_pct"]*(hparams["hidden_size"]//hparams["num_attention_heads"])) )
|
gguf_writer.add_head_count(hparams["num_attention_heads"])
|
||||||
gguf_writer.add_head_count(llm_arch, hparams["num_attention_heads"])
|
gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True)
|
||||||
gguf_writer.add_parallel_residual(llm_arch, hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True)
|
gguf_writer.add_layer_norm_eps(hparams["layer_norm_eps"])
|
||||||
gguf_writer.add_layer_norm_eps(llm_arch, hparams["layer_norm_eps"])
|
|
||||||
|
|
||||||
# TOKENIZATION
|
# TOKENIZATION
|
||||||
|
|
||||||
@ -146,8 +151,9 @@ if Path(dir_model + "/tokenizer.json").is_file():
|
|||||||
text.extend(c.encode('utf-8'))
|
text.extend(c.encode('utf-8'))
|
||||||
else:
|
else:
|
||||||
print(f"Key {i} not in tokenizer vocabulary. Padding with an arbitrary token.")
|
print(f"Key {i} not in tokenizer vocabulary. Padding with an arbitrary token.")
|
||||||
padding_token = f"[PAD{i}]".encode("utf8")
|
pad_token = f"[PAD{i}]".encode("utf8")
|
||||||
text = bytearray(padding_token)
|
text = bytearray(pad_token)
|
||||||
|
|
||||||
tokens.append(text)
|
tokens.append(text)
|
||||||
|
|
||||||
gguf_writer.add_token_list(tokens)
|
gguf_writer.add_token_list(tokens)
|
||||||
@ -228,6 +234,7 @@ for part_name in part_names:
|
|||||||
|
|
||||||
n_dims = len(data.shape)
|
n_dims = len(data.shape)
|
||||||
data_dtype = data.dtype
|
data_dtype = data.dtype
|
||||||
|
old_dtype = data_dtype
|
||||||
|
|
||||||
# if f32 desired, convert any float16 to float32
|
# if f32 desired, convert any float16 to float32
|
||||||
if ftype == 0 and data.dtype == np.float16:
|
if ftype == 0 and data.dtype == np.float16:
|
||||||
@ -241,77 +248,21 @@ for part_name in part_names:
|
|||||||
if ftype == 1 and data.dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
if ftype == 1 and data.dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
||||||
data_dtype = np.float16
|
data_dtype = np.float16
|
||||||
|
|
||||||
data_nbytes = data.size * 2 if data_dtype == np.float16 else data.size * 4
|
print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data_dtype))
|
||||||
|
|
||||||
gguf_writer.add_tensor_info(name, data.shape, data_dtype, data_nbytes)
|
data = data.astype(data_dtype)
|
||||||
|
|
||||||
|
gguf_writer.add_tensor(name, data)
|
||||||
|
|
||||||
|
|
||||||
print("gguf: write header")
|
print("gguf: write header")
|
||||||
gguf_writer.write_header_to_file()
|
gguf_writer.write_header_to_file()
|
||||||
print("gguf: write metadata")
|
print("gguf: write metadata")
|
||||||
gguf_writer.write_kv_data_to_file()
|
gguf_writer.write_kv_data_to_file()
|
||||||
print("gguf: write tensor metadata")
|
print("gguf: write tensors")
|
||||||
gguf_writer.write_ti_data_to_file()
|
gguf_writer.write_tensors_to_file()
|
||||||
|
|
||||||
# tensor data
|
|
||||||
print("gguf: convert and write tensor data")
|
|
||||||
|
|
||||||
if num_parts == 0:
|
|
||||||
part_names = ("pytorch_model.bin",)
|
|
||||||
else:
|
|
||||||
part_names = (
|
|
||||||
f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1)
|
|
||||||
)
|
|
||||||
|
|
||||||
for part_name in part_names:
|
|
||||||
print("gguf: loading model part '"+ part_name + "'")
|
|
||||||
model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu")
|
|
||||||
|
|
||||||
for name in model_part.keys():
|
|
||||||
data = model_part[name]
|
|
||||||
|
|
||||||
old_dtype = data.dtype
|
|
||||||
|
|
||||||
# we don't need these
|
|
||||||
if name.endswith(".attention.masked_bias") or name.endswith(".attention.bias") or name.endswith(".attention.rotary_emb.inv_freq"):
|
|
||||||
continue
|
|
||||||
|
|
||||||
# convert any unsupported data types to float32
|
|
||||||
if data.dtype != torch.float16 and data.dtype != torch.float32:
|
|
||||||
data = data.to(torch.float32)
|
|
||||||
|
|
||||||
data = data.squeeze().numpy()
|
|
||||||
|
|
||||||
# map tensor names
|
|
||||||
if name.endswith(".weight") and name[:-7] in tensor_map:
|
|
||||||
name = tensor_map[name[:-7]] + ".weight"
|
|
||||||
elif name.endswith(".bias") and name[:-5] in tensor_map:
|
|
||||||
name = tensor_map[name[:-5]] + ".bias"
|
|
||||||
else:
|
|
||||||
print( "Can not map tensor '" + name + "'" )
|
|
||||||
sys.exit()
|
|
||||||
|
|
||||||
n_dims = len(data.shape)
|
|
||||||
data_dtype = data.dtype
|
|
||||||
|
|
||||||
# if f32 desired, convert any float16 to float32
|
|
||||||
if ftype == 0 and data.dtype == np.float16:
|
|
||||||
data = data.astype(np.float32)
|
|
||||||
|
|
||||||
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
|
|
||||||
if ftype == 1 and data_dtype == np.float16 and n_dims == 1:
|
|
||||||
data = data.astype(np.float32)
|
|
||||||
|
|
||||||
# if f16 desired, convert any float32 2-dim weight tensors to float16
|
|
||||||
if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
|
||||||
data = data.astype(np.float16)
|
|
||||||
|
|
||||||
print( name + ", shape " + str(len(data.shape)) + ", " + str(old_dtype) + " --> " + str(data.dtype))
|
|
||||||
|
|
||||||
gguf_writer.write_tensor_to_file(data)
|
|
||||||
|
|
||||||
gguf_writer.close()
|
gguf_writer.close()
|
||||||
|
|
||||||
|
|
||||||
print("gguf: model successfully exported to '" + fname_out + "'")
|
print("gguf: model successfully exported to '" + fname_out + "'")
|
||||||
print("")
|
print("")
|
@ -18,6 +18,7 @@ from sentencepiece import SentencePieceProcessor
|
|||||||
# compatible with python < 3.9
|
# compatible with python < 3.9
|
||||||
NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]'
|
NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]'
|
||||||
|
|
||||||
|
|
||||||
def count_model_parts(dir_model: str) -> int:
|
def count_model_parts(dir_model: str) -> int:
|
||||||
num_parts = 0
|
num_parts = 0
|
||||||
for filename in os.listdir(dir_model):
|
for filename in os.listdir(dir_model):
|
||||||
@ -28,10 +29,12 @@ def count_model_parts(dir_model: str) -> int:
|
|||||||
print("gguf: found " + str(num_parts) + " model parts")
|
print("gguf: found " + str(num_parts) + " model parts")
|
||||||
return num_parts
|
return num_parts
|
||||||
|
|
||||||
|
|
||||||
if len(sys.argv) < 3:
|
if len(sys.argv) < 3:
|
||||||
print("Usage: convert-h5-to-ggml.py dir-model ftype\n")
|
print("Usage: convert-h5-to-ggml.py dir-model ftype\n")
|
||||||
print(" ftype == 0 -> float32")
|
print(" ftype == 0 -> float32")
|
||||||
print(" ftype == 1 -> float16")
|
print(" ftype == 1 -> float16")
|
||||||
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
@ -43,7 +46,7 @@ last_dir = os.path.basename(os.path.normpath(dir_model))
|
|||||||
# possible tensor data types
|
# possible tensor data types
|
||||||
# ftype == 0 -> float32
|
# ftype == 0 -> float32
|
||||||
# ftype == 1 -> float16
|
# ftype == 1 -> float16
|
||||||
#
|
|
||||||
# map from ftype to string
|
# map from ftype to string
|
||||||
ftype_str = ["f32", "f16"]
|
ftype_str = ["f32", "f16"]
|
||||||
|
|
||||||
@ -52,6 +55,7 @@ if len(sys.argv) > 2:
|
|||||||
ftype = int(sys.argv[2])
|
ftype = int(sys.argv[2])
|
||||||
if ftype < 0 or ftype > 1:
|
if ftype < 0 or ftype > 1:
|
||||||
print("Invalid ftype: " + str(ftype))
|
print("Invalid ftype: " + str(ftype))
|
||||||
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf"
|
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf"
|
||||||
@ -70,14 +74,14 @@ num_parts = count_model_parts(dir_model)
|
|||||||
|
|
||||||
if num_parts > 1:
|
if num_parts > 1:
|
||||||
print("gguf: Only models with a single datafile are supported.")
|
print("gguf: Only models with a single datafile are supported.")
|
||||||
sys.exit()
|
|
||||||
|
|
||||||
gguf_writer = gguf.GGUFWriter.open(fname_out)
|
sys.exit()
|
||||||
|
llm_arch = "llama"
|
||||||
|
gguf_writer = gguf.GGUFWriter(fname_out, arch=llm_arch)
|
||||||
|
|
||||||
|
|
||||||
print("gguf: get model metadata")
|
print("gguf: get model metadata")
|
||||||
|
|
||||||
llm_arch = "llama"
|
|
||||||
block_count = hparams["num_hidden_layers"]
|
block_count = hparams["num_hidden_layers"]
|
||||||
head_count = hparams["num_attention_heads"]
|
head_count = hparams["num_attention_heads"]
|
||||||
|
|
||||||
@ -91,19 +95,18 @@ if "_name_or_path" in hparams:
|
|||||||
else:
|
else:
|
||||||
hf_repo = ""
|
hf_repo = ""
|
||||||
|
|
||||||
gguf_writer.add_architecture(llm_arch)
|
gguf_writer.add_architecture()
|
||||||
gguf_writer.add_name(last_dir)
|
gguf_writer.add_name(last_dir)
|
||||||
gguf_writer.add_file_type( "All tensors F32" if ftype == 0 else "Most tensors F16, some F32")
|
|
||||||
gguf_writer.add_source_hf_repo(hf_repo)
|
gguf_writer.add_source_hf_repo(hf_repo)
|
||||||
gguf_writer.add_tensor_data_layout(llm_arch, "Meta AI original pth")
|
gguf_writer.add_tensor_data_layout("Meta AI original pth")
|
||||||
gguf_writer.add_context_length(llm_arch, hparams["max_position_embeddings"])
|
gguf_writer.add_context_length(hparams["max_position_embeddings"])
|
||||||
gguf_writer.add_embedding_length(llm_arch, hparams["hidden_size"])
|
gguf_writer.add_embedding_length(hparams["hidden_size"])
|
||||||
gguf_writer.add_block_count(llm_arch, block_count)
|
gguf_writer.add_block_count(block_count)
|
||||||
gguf_writer.add_feed_forward_length(llm_arch, hparams["intermediate_size"])
|
gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
|
||||||
gguf_writer.add_rope_dimension_count(llm_arch, hparams["hidden_size"] // hparams["num_attention_heads"])
|
gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"])
|
||||||
gguf_writer.add_head_count(llm_arch, head_count)
|
gguf_writer.add_head_count(head_count)
|
||||||
gguf_writer.add_head_count_kv(llm_arch, head_count_kv)
|
gguf_writer.add_head_count_kv(head_count_kv)
|
||||||
gguf_writer.add_layer_norm_rms_eps(llm_arch, hparams["rms_norm_eps"])
|
gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
|
||||||
|
|
||||||
|
|
||||||
# TOKENIZATION
|
# TOKENIZATION
|
||||||
@ -129,15 +132,19 @@ if Path(dir_model + "/tokenizer.model").is_file():
|
|||||||
score = tokenizer.get_score(i)
|
score = tokenizer.get_score(i)
|
||||||
|
|
||||||
toktype = 1 # defualt to normal token type
|
toktype = 1 # defualt to normal token type
|
||||||
if tokenizer.is_unknown(i): toktype = 2
|
if tokenizer.is_unknown(i):
|
||||||
if tokenizer.is_control(i): toktype = 3
|
toktype = 2
|
||||||
|
if tokenizer.is_control(i):
|
||||||
|
toktype = 3
|
||||||
|
|
||||||
# TODO: How to determinate if a token is user defined?
|
# TODO: How to determinate if a token is user defined?
|
||||||
# ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
|
# ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
|
||||||
# if tokenizer.is_user_defined(i): toktype = 4
|
# if tokenizer.is_user_defined(i): toktype = 4
|
||||||
|
|
||||||
if tokenizer.is_unused(i): toktype = 5
|
if tokenizer.is_unused(i):
|
||||||
if tokenizer.is_byte(i): toktype = 6
|
toktype = 5
|
||||||
|
if tokenizer.is_byte(i):
|
||||||
|
toktype = 6
|
||||||
|
|
||||||
tokens.append(text)
|
tokens.append(text)
|
||||||
scores.append(score)
|
scores.append(score)
|
||||||
@ -223,6 +230,7 @@ for part_name in part_names:
|
|||||||
|
|
||||||
n_dims = len(data.shape)
|
n_dims = len(data.shape)
|
||||||
data_dtype = data.dtype
|
data_dtype = data.dtype
|
||||||
|
old_dtype = data_dtype
|
||||||
|
|
||||||
# if f32 desired, convert any float16 to float32
|
# if f32 desired, convert any float16 to float32
|
||||||
if ftype == 0 and data.dtype == np.float16:
|
if ftype == 0 and data.dtype == np.float16:
|
||||||
@ -236,69 +244,19 @@ for part_name in part_names:
|
|||||||
if ftype == 1 and data.dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
if ftype == 1 and data.dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
||||||
data_dtype = np.float16
|
data_dtype = np.float16
|
||||||
|
|
||||||
data_nbytes = data.size * 2 if data_dtype == np.float16 else data.size * 4
|
print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data_dtype))
|
||||||
|
|
||||||
gguf_writer.add_tensor_info(name, data.shape, data_dtype, data_nbytes)
|
data = data.astype(data_dtype)
|
||||||
|
|
||||||
|
gguf_writer.add_tensor(name, data)
|
||||||
|
|
||||||
|
|
||||||
print("gguf: write header")
|
print("gguf: write header")
|
||||||
gguf_writer.write_header_to_file()
|
gguf_writer.write_header_to_file()
|
||||||
print("gguf: write metadata")
|
print("gguf: write metadata")
|
||||||
gguf_writer.write_kv_data_to_file()
|
gguf_writer.write_kv_data_to_file()
|
||||||
print("gguf: write tensor metadata")
|
print("gguf: write tensors")
|
||||||
gguf_writer.write_ti_data_to_file()
|
gguf_writer.write_tensors_to_file()
|
||||||
|
|
||||||
# tensor data
|
|
||||||
print("gguf: convert and write tensor data")
|
|
||||||
|
|
||||||
part_names = ( f"consolidated.{n:02}.pth" for n in range(0, num_parts) )
|
|
||||||
|
|
||||||
for part_name in part_names:
|
|
||||||
print("gguf: loading model part '"+ part_name + "'")
|
|
||||||
model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu")
|
|
||||||
|
|
||||||
for name in model_part.keys():
|
|
||||||
data = model_part[name]
|
|
||||||
|
|
||||||
old_dtype = data.dtype
|
|
||||||
|
|
||||||
# we don't need these
|
|
||||||
if name == "rope.freqs":
|
|
||||||
continue
|
|
||||||
|
|
||||||
# convert any unsupported data types to float32
|
|
||||||
if data.dtype != torch.float16 and data.dtype != torch.float32:
|
|
||||||
data = data.to(torch.float32)
|
|
||||||
|
|
||||||
data = data.squeeze().numpy()
|
|
||||||
|
|
||||||
# map tensor names
|
|
||||||
if name.endswith(".weight") and name[:-7] in tensor_map:
|
|
||||||
name = tensor_map[name[:-7]] + ".weight"
|
|
||||||
elif name.endswith(".bias") and name[:-5] in tensor_map:
|
|
||||||
name = tensor_map[name[:-5]] + ".bias"
|
|
||||||
else:
|
|
||||||
print( "Can not map tensor '" + name + "'" )
|
|
||||||
sys.exit()
|
|
||||||
|
|
||||||
n_dims = len(data.shape)
|
|
||||||
data_dtype = data.dtype
|
|
||||||
|
|
||||||
# if f32 desired, convert any float16 to float32
|
|
||||||
if ftype == 0 and data.dtype == np.float16:
|
|
||||||
data = data.astype(np.float32)
|
|
||||||
|
|
||||||
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
|
|
||||||
if ftype == 1 and data_dtype == np.float16 and n_dims == 1:
|
|
||||||
data = data.astype(np.float32)
|
|
||||||
|
|
||||||
# if f16 desired, convert any float32 2-dim weight tensors to float16
|
|
||||||
if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
|
||||||
data = data.astype(np.float16)
|
|
||||||
|
|
||||||
print( name + ", shape " + str(len(data.shape)) + ", " + str(old_dtype) + " --> " + str(data.dtype))
|
|
||||||
|
|
||||||
gguf_writer.write_tensor_data(data)
|
|
||||||
|
|
||||||
gguf_writer.close()
|
gguf_writer.close()
|
||||||
|
|
||||||
|
@ -18,26 +18,35 @@ NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]'
|
|||||||
|
|
||||||
# reverse HF permute back to original pth layout
|
# reverse HF permute back to original pth layout
|
||||||
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py
|
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py
|
||||||
|
|
||||||
|
|
||||||
def reverse_hf_permute(weights: NDArray, n_head: int, n_kv_head: Optional[int] = None) -> NDArray:
|
def reverse_hf_permute(weights: NDArray, n_head: int, n_kv_head: Optional[int] = None) -> NDArray:
|
||||||
if n_kv_head is not None and n_head != n_kv_head: n_head //= n_kv_head
|
if n_kv_head is not None and n_head != n_kv_head:
|
||||||
|
n_head //= n_kv_head
|
||||||
|
|
||||||
return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
|
return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
|
||||||
.swapaxes(1, 2)
|
.swapaxes(1, 2)
|
||||||
.reshape(weights.shape))
|
.reshape(weights.shape))
|
||||||
|
|
||||||
|
|
||||||
def count_model_parts(dir_model: str) -> int:
|
def count_model_parts(dir_model: str) -> int:
|
||||||
num_parts = 0
|
num_parts = 0
|
||||||
|
|
||||||
for filename in os.listdir(dir_model):
|
for filename in os.listdir(dir_model):
|
||||||
if filename.startswith("pytorch_model-"):
|
if filename.startswith("pytorch_model-"):
|
||||||
num_parts += 1
|
num_parts += 1
|
||||||
|
|
||||||
if num_parts > 0:
|
if num_parts > 0:
|
||||||
print("gguf: found " + str(num_parts) + " model parts")
|
print("gguf: found " + str(num_parts) + " model parts")
|
||||||
|
|
||||||
return num_parts
|
return num_parts
|
||||||
|
|
||||||
|
|
||||||
if len(sys.argv) < 3:
|
if len(sys.argv) < 3:
|
||||||
print("Usage: convert-h5-to-ggml.py dir-model ftype\n")
|
print("Usage: convert-h5-to-ggml.py dir-model ftype\n")
|
||||||
print(" ftype == 0 -> float32")
|
print(" ftype == 0 -> float32")
|
||||||
print(" ftype == 1 -> float16")
|
print(" ftype == 1 -> float16")
|
||||||
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
@ -49,7 +58,8 @@ last_dir = os.path.basename(os.path.normpath(dir_model))
|
|||||||
# possible tensor data types
|
# possible tensor data types
|
||||||
# ftype == 0 -> float32
|
# ftype == 0 -> float32
|
||||||
# ftype == 1 -> float16
|
# ftype == 1 -> float16
|
||||||
#
|
|
||||||
|
|
||||||
# map from ftype to string
|
# map from ftype to string
|
||||||
ftype_str = ["f32", "f16"]
|
ftype_str = ["f32", "f16"]
|
||||||
|
|
||||||
@ -58,6 +68,7 @@ if len(sys.argv) > 2:
|
|||||||
ftype = int(sys.argv[2])
|
ftype = int(sys.argv[2])
|
||||||
if ftype < 0 or ftype > 1:
|
if ftype < 0 or ftype > 1:
|
||||||
print("Invalid ftype: " + str(ftype))
|
print("Invalid ftype: " + str(ftype))
|
||||||
|
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf"
|
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf"
|
||||||
@ -69,17 +80,17 @@ with open(dir_model + "/config.json", "r", encoding="utf-8") as f:
|
|||||||
|
|
||||||
if hparams["architectures"][0] != "LlamaForCausalLM":
|
if hparams["architectures"][0] != "LlamaForCausalLM":
|
||||||
print("Model architecture not supported: " + hparams["architectures"][0])
|
print("Model architecture not supported: " + hparams["architectures"][0])
|
||||||
|
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
# get number of model parts
|
# get number of model parts
|
||||||
num_parts = count_model_parts(dir_model)
|
num_parts = count_model_parts(dir_model)
|
||||||
|
|
||||||
gguf_writer = gguf.GGUFWriter.open(fname_out)
|
gguf_writer = gguf.GGUFWriter(fname_out, arch="llama")
|
||||||
|
|
||||||
|
|
||||||
print("gguf: get model metadata")
|
print("gguf: get model metadata")
|
||||||
|
|
||||||
llm_arch = "llama"
|
|
||||||
block_count = hparams["num_hidden_layers"]
|
block_count = hparams["num_hidden_layers"]
|
||||||
head_count = hparams["num_attention_heads"]
|
head_count = hparams["num_attention_heads"]
|
||||||
|
|
||||||
@ -99,22 +110,22 @@ elif "max_position_embeddings" in hparams:
|
|||||||
ctx_length = hparams["max_position_embeddings"]
|
ctx_length = hparams["max_position_embeddings"]
|
||||||
else:
|
else:
|
||||||
print("gguf: can not find ctx length parameter.")
|
print("gguf: can not find ctx length parameter.")
|
||||||
|
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
|
|
||||||
gguf_writer.add_architecture(llm_arch)
|
gguf_writer.add_architecture()
|
||||||
gguf_writer.add_name(last_dir)
|
gguf_writer.add_name(last_dir)
|
||||||
gguf_writer.add_file_type("All tensors F32" if ftype == 0 else "Most tensors F16, some F32")
|
|
||||||
gguf_writer.add_source_hf_repo(hf_repo)
|
gguf_writer.add_source_hf_repo(hf_repo)
|
||||||
gguf_writer.add_tensor_data_layout(llm_arch, "Meta AI original pth")
|
gguf_writer.add_tensor_data_layout("Meta AI original pth")
|
||||||
gguf_writer.add_context_length(llm_arch, ctx_length)
|
gguf_writer.add_context_length(ctx_length)
|
||||||
gguf_writer.add_embedding_length(llm_arch, hparams["hidden_size"])
|
gguf_writer.add_embedding_length(hparams["hidden_size"])
|
||||||
gguf_writer.add_block_count(llm_arch, block_count)
|
gguf_writer.add_block_count(block_count)
|
||||||
gguf_writer.add_feed_forward_length(llm_arch, hparams["intermediate_size"])
|
gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
|
||||||
gguf_writer.add_rope_dimension_count(llm_arch, hparams["hidden_size"] // hparams["num_attention_heads"])
|
gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"])
|
||||||
gguf_writer.add_head_count(llm_arch, head_count)
|
gguf_writer.add_head_count(head_count)
|
||||||
gguf_writer.add_head_count_kv(llm_arch, head_count_kv)
|
gguf_writer.add_head_count_kv(head_count_kv)
|
||||||
gguf_writer.add_layer_norm_rms_eps(llm_arch, hparams["rms_norm_eps"])
|
gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
|
||||||
|
|
||||||
|
|
||||||
# TOKENIZATION
|
# TOKENIZATION
|
||||||
@ -140,15 +151,19 @@ if Path(dir_model + "/tokenizer.model").is_file():
|
|||||||
score = tokenizer.get_score(i)
|
score = tokenizer.get_score(i)
|
||||||
|
|
||||||
toktype = 1 # defualt to normal token type
|
toktype = 1 # defualt to normal token type
|
||||||
if tokenizer.is_unknown(i): toktype = 2
|
if tokenizer.is_unknown(i):
|
||||||
if tokenizer.is_control(i): toktype = 3
|
toktype = 2
|
||||||
|
if tokenizer.is_control(i):
|
||||||
|
toktype = 3
|
||||||
|
|
||||||
# TODO: How to determinate if a token is user defined?
|
# TODO: How to determinate if a token is user defined?
|
||||||
# ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
|
# ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
|
||||||
# if tokenizer.is_user_defined(i): toktype = 4
|
# if tokenizer.is_user_defined(i): toktype = 4
|
||||||
|
|
||||||
if tokenizer.is_unused(i): toktype = 5
|
if tokenizer.is_unused(i):
|
||||||
if tokenizer.is_byte(i): toktype = 6
|
toktype = 5
|
||||||
|
if tokenizer.is_byte(i):
|
||||||
|
toktype = 6
|
||||||
|
|
||||||
tokens.append(text)
|
tokens.append(text)
|
||||||
scores.append(score)
|
scores.append(score)
|
||||||
@ -239,10 +254,12 @@ for part_name in part_names:
|
|||||||
name = tensor_map[name[:-5]] + ".bias"
|
name = tensor_map[name[:-5]] + ".bias"
|
||||||
else:
|
else:
|
||||||
print("Can not map tensor '" + name + "'")
|
print("Can not map tensor '" + name + "'")
|
||||||
|
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
n_dims = len(data.shape)
|
n_dims = len(data.shape)
|
||||||
data_dtype = data.dtype
|
data_dtype = data.dtype
|
||||||
|
old_dtype = data_dtype
|
||||||
|
|
||||||
# if f32 desired, convert any float16 to float32
|
# if f32 desired, convert any float16 to float32
|
||||||
if ftype == 0 and data.dtype == np.float16:
|
if ftype == 0 and data.dtype == np.float16:
|
||||||
@ -256,78 +273,19 @@ for part_name in part_names:
|
|||||||
if ftype == 1 and data.dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
if ftype == 1 and data.dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
||||||
data_dtype = np.float16
|
data_dtype = np.float16
|
||||||
|
|
||||||
data_nbytes = data.size * 2 if data_dtype == np.float16 else data.size * 4
|
data = data.astype(data_dtype)
|
||||||
|
|
||||||
gguf_writer.add_tensor_info(name, data.shape, data_dtype, data_nbytes)
|
print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype))
|
||||||
|
|
||||||
|
gguf_writer.add_tensor(name, data)
|
||||||
|
|
||||||
|
|
||||||
print("gguf: write header")
|
print("gguf: write header")
|
||||||
gguf_writer.write_header_to_file()
|
gguf_writer.write_header_to_file()
|
||||||
print("gguf: write metadata")
|
print("gguf: write metadata")
|
||||||
gguf_writer.write_kv_data_to_file()
|
gguf_writer.write_kv_data_to_file()
|
||||||
print("gguf: write tensor metadata")
|
print("gguf: write tensors")
|
||||||
gguf_writer.write_ti_data_to_file()
|
gguf_writer.write_tensors_to_file()
|
||||||
|
|
||||||
# tensor data
|
|
||||||
print("gguf: convert and write tensor data")
|
|
||||||
|
|
||||||
if num_parts == 0:
|
|
||||||
part_names = ("pytorch_model.bin",)
|
|
||||||
else:
|
|
||||||
part_names = (
|
|
||||||
f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1)
|
|
||||||
)
|
|
||||||
|
|
||||||
for part_name in part_names:
|
|
||||||
print("gguf: loading model part '"+ part_name + "'")
|
|
||||||
model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu")
|
|
||||||
|
|
||||||
for name in model_part.keys():
|
|
||||||
data = model_part[name]
|
|
||||||
|
|
||||||
old_dtype = data.dtype
|
|
||||||
|
|
||||||
# we don't need these
|
|
||||||
if name.endswith(".rotary_emb.inv_freq"):
|
|
||||||
continue
|
|
||||||
|
|
||||||
# convert any unsupported data types to float32
|
|
||||||
if data.dtype != torch.float16 and data.dtype != torch.float32:
|
|
||||||
data = data.to(torch.float32)
|
|
||||||
|
|
||||||
data = data.squeeze().numpy()
|
|
||||||
|
|
||||||
# reverse permute these
|
|
||||||
if name.endswith(".q_proj.weight") or name.endswith(".k_proj.weight"):
|
|
||||||
data = reverse_hf_permute(data, head_count, head_count_kv)
|
|
||||||
|
|
||||||
# map tensor names
|
|
||||||
if name.endswith(".weight") and name[:-7] in tensor_map:
|
|
||||||
name = tensor_map[name[:-7]] + ".weight"
|
|
||||||
elif name.endswith(".bias") and name[:-5] in tensor_map:
|
|
||||||
name = tensor_map[name[:-5]] + ".bias"
|
|
||||||
else:
|
|
||||||
print( "Can not map tensor '" + name + "'" )
|
|
||||||
sys.exit()
|
|
||||||
|
|
||||||
n_dims = len(data.shape)
|
|
||||||
data_dtype = data.dtype
|
|
||||||
|
|
||||||
# if f32 desired, convert any float16 to float32
|
|
||||||
if ftype == 0 and data.dtype == np.float16:
|
|
||||||
data = data.astype(np.float32)
|
|
||||||
|
|
||||||
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
|
|
||||||
if ftype == 1 and data_dtype == np.float16 and n_dims == 1:
|
|
||||||
data = data.astype(np.float32)
|
|
||||||
|
|
||||||
# if f16 desired, convert any float32 2-dim weight tensors to float16
|
|
||||||
if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
|
||||||
data = data.astype(np.float16)
|
|
||||||
|
|
||||||
print(name + ", shape " + str(len(data.shape)) + ", " + str(old_dtype) + " --> " + str(data.dtype))
|
|
||||||
|
|
||||||
gguf_writer.write_tensor_to_file(data)
|
|
||||||
|
|
||||||
gguf_writer.close()
|
gguf_writer.close()
|
||||||
|
|
61
gguf.py
61
gguf.py
@ -1,11 +1,7 @@
|
|||||||
"""TODOs
|
import shutil
|
||||||
1. Implement writers for known architectures, LLaMA in particular.
|
|
||||||
2. Add docstrings from the format specs.
|
|
||||||
3. After development is done, Convert it to a proper pip-installable Python package, and possibly move it to its own repo under ggml-org.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
import struct
|
import struct
|
||||||
|
import tempfile
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from enum import IntEnum, auto
|
from enum import IntEnum, auto
|
||||||
@ -27,7 +23,6 @@ KEY_GENERAL_NAME = "general.name"
|
|||||||
KEY_GENERAL_AUTHOR = "general.author"
|
KEY_GENERAL_AUTHOR = "general.author"
|
||||||
KEY_GENERAL_URL = "general.url"
|
KEY_GENERAL_URL = "general.url"
|
||||||
KEY_GENERAL_DESCRIPTION = "general.description"
|
KEY_GENERAL_DESCRIPTION = "general.description"
|
||||||
KEY_GENERAL_FILE_TYPE = "general.file_type"
|
|
||||||
KEY_GENERAL_LICENSE = "general.license"
|
KEY_GENERAL_LICENSE = "general.license"
|
||||||
KEY_GENERAL_SOURCE_URL = "general.source.url"
|
KEY_GENERAL_SOURCE_URL = "general.source.url"
|
||||||
KEY_GENERAL_SOURCE_HF_REPO = "general.source.hugginface.repository"
|
KEY_GENERAL_SOURCE_HF_REPO = "general.source.hugginface.repository"
|
||||||
@ -70,6 +65,7 @@ KEY_TOKENIZER_RWKV = "tokenizer.rwkv.world"
|
|||||||
# recommended mapping of model tensor names for storage in gguf
|
# recommended mapping of model tensor names for storage in gguf
|
||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
class MODEL_ARCH(IntEnum):
|
class MODEL_ARCH(IntEnum):
|
||||||
LLAMA = auto()
|
LLAMA = auto()
|
||||||
FALCON = auto()
|
FALCON = auto()
|
||||||
@ -78,6 +74,7 @@ class MODEL_ARCH(IntEnum):
|
|||||||
GPTNEOX = auto()
|
GPTNEOX = auto()
|
||||||
MPT = auto()
|
MPT = auto()
|
||||||
|
|
||||||
|
|
||||||
class MODEL_TENSOR(IntEnum):
|
class MODEL_TENSOR(IntEnum):
|
||||||
TOKEN_EMBD = auto()
|
TOKEN_EMBD = auto()
|
||||||
POS_EMBD = auto()
|
POS_EMBD = auto()
|
||||||
@ -97,6 +94,7 @@ class MODEL_TENSOR(IntEnum):
|
|||||||
FFN_UP = auto()
|
FFN_UP = auto()
|
||||||
FFN_NORM = auto()
|
FFN_NORM = auto()
|
||||||
|
|
||||||
|
|
||||||
MODEL_ARCH_NAMES = {
|
MODEL_ARCH_NAMES = {
|
||||||
MODEL_ARCH.LLAMA: "llama",
|
MODEL_ARCH.LLAMA: "llama",
|
||||||
MODEL_ARCH.FALCON: "falcon",
|
MODEL_ARCH.FALCON: "falcon",
|
||||||
@ -148,6 +146,7 @@ MODEL_TENSOR_SKIP = {
|
|||||||
],
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# TODO: the following helper functions should be removed
|
# TODO: the following helper functions should be removed
|
||||||
# instead, get_tensor_name_map should return tuples of (name, MODEL_TENSOR)
|
# instead, get_tensor_name_map should return tuples of (name, MODEL_TENSOR)
|
||||||
# however, my Python is very bad, and I couldn't figure out how to do this, hence these functions
|
# however, my Python is very bad, and I couldn't figure out how to do this, hence these functions
|
||||||
@ -160,6 +159,7 @@ def should_skip_tensor_TMP(arch : MODEL_ARCH, n_blocks : int, name : str) -> boo
|
|||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> dict:
|
def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> dict:
|
||||||
tensor_map = {}
|
tensor_map = {}
|
||||||
|
|
||||||
@ -312,6 +312,7 @@ def get_tensor_name_map(arch : MODEL_ARCH, n_blocks : int) -> dict:
|
|||||||
# implementation
|
# implementation
|
||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
class GGMLQuantizationType(IntEnum):
|
class GGMLQuantizationType(IntEnum):
|
||||||
F32 = 0
|
F32 = 0
|
||||||
F16 = 1
|
F16 = 1
|
||||||
@ -481,6 +482,19 @@ class GGUFWriter:
|
|||||||
self.offset_tensor += GGUFWriter.ggml_pad(tensor_nbytes, self.data_alignment)
|
self.offset_tensor += GGUFWriter.ggml_pad(tensor_nbytes, self.data_alignment)
|
||||||
self.ti_data_count += 1
|
self.ti_data_count += 1
|
||||||
|
|
||||||
|
def add_tensor(self, name: str, tensor: np.ndarray):
|
||||||
|
if not hasattr(self, "temp_file"):
|
||||||
|
self.temp_file = tempfile.SpooledTemporaryFile(mode="w+b", max_size=256*1024*1024)
|
||||||
|
self.temp_file.seek(0)
|
||||||
|
|
||||||
|
self.add_tensor_info(name, tensor.shape, tensor.dtype, tensor.nbytes)
|
||||||
|
|
||||||
|
tensor.tofile(self.temp_file)
|
||||||
|
|
||||||
|
pad = GGUFWriter.ggml_pad(tensor.nbytes, self.data_alignment) - tensor.nbytes
|
||||||
|
if pad != 0:
|
||||||
|
self.temp_file.write(bytes([0] * pad))
|
||||||
|
|
||||||
def write_tensor_data(self, tensor: np.ndarray):
|
def write_tensor_data(self, tensor: np.ndarray):
|
||||||
pad = GGUFWriter.ggml_pad(self.fout.tell(), self.data_alignment) - self.fout.tell()
|
pad = GGUFWriter.ggml_pad(self.fout.tell(), self.data_alignment) - self.fout.tell()
|
||||||
if pad != 0:
|
if pad != 0:
|
||||||
@ -492,6 +506,19 @@ class GGUFWriter:
|
|||||||
if pad != 0:
|
if pad != 0:
|
||||||
self.fout.write(bytes([0] * pad))
|
self.fout.write(bytes([0] * pad))
|
||||||
|
|
||||||
|
def write_tensors_to_file(self):
|
||||||
|
self.write_ti_data_to_file()
|
||||||
|
|
||||||
|
pad = GGUFWriter.ggml_pad(self.fout.tell(), self.data_alignment) - self.fout.tell()
|
||||||
|
if pad != 0:
|
||||||
|
self.fout.write(bytes([0] * pad))
|
||||||
|
|
||||||
|
self.temp_file.seek(0)
|
||||||
|
|
||||||
|
shutil.copyfileobj(self.temp_file, self.fout)
|
||||||
|
self.flush()
|
||||||
|
self.temp_file.close()
|
||||||
|
|
||||||
def flush(self):
|
def flush(self):
|
||||||
self.fout.flush()
|
self.fout.flush()
|
||||||
|
|
||||||
@ -513,9 +540,6 @@ class GGUFWriter:
|
|||||||
def add_description(self, description: str):
|
def add_description(self, description: str):
|
||||||
self.add_string(KEY_GENERAL_DESCRIPTION, description)
|
self.add_string(KEY_GENERAL_DESCRIPTION, description)
|
||||||
|
|
||||||
def add_file_type(self, file_type: str):
|
|
||||||
self.add_string(KEY_GENERAL_FILE_TYPE, file_type)
|
|
||||||
|
|
||||||
def add_source_url(self, url: str):
|
def add_source_url(self, url: str):
|
||||||
self.add_string(KEY_GENERAL_SOURCE_URL, url)
|
self.add_string(KEY_GENERAL_SOURCE_URL, url)
|
||||||
|
|
||||||
@ -618,23 +642,28 @@ class GGUFWriter:
|
|||||||
def add_pad_token_id(self, id: int):
|
def add_pad_token_id(self, id: int):
|
||||||
self.add_uint32(KEY_TOKENIZER_PAD_ID, id)
|
self.add_uint32(KEY_TOKENIZER_PAD_ID, id)
|
||||||
|
|
||||||
|
|
||||||
# Example usage:
|
# Example usage:
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
# Example usage with a file
|
# Example usage with a file
|
||||||
gguf_writer = GGUFWriter("example.gguf", "llama")
|
gguf_writer = GGUFWriter("example.gguf", "llama")
|
||||||
|
|
||||||
|
gguf_writer.add_architecture()
|
||||||
|
gguf_writer.add_block_count(12)
|
||||||
gguf_writer.add_uint32("answer", 42) # Write a 32-bit integer
|
gguf_writer.add_uint32("answer", 42) # Write a 32-bit integer
|
||||||
gguf_writer.add_float32("answer_in_float", 42.0) # Write a 32-bit float
|
gguf_writer.add_float32("answer_in_float", 42.0) # Write a 32-bit float
|
||||||
gguf_writer.add_custom_alignment(64)
|
gguf_writer.add_custom_alignment(64)
|
||||||
|
|
||||||
tensor1 = np.ones((32,), dtype=np.float32) * 100.0
|
tensor1 = np.ones((32,), dtype=np.float32) * 100.0
|
||||||
tensor2 = np.ones((32,), dtype=np.float32) * 101.0
|
tensor2 = np.ones((64,), dtype=np.float32) * 101.0
|
||||||
gguf_writer.add_tensor_info("tensor0", tensor1)
|
tensor3 = np.ones((96,), dtype=np.float32) * 102.0
|
||||||
gguf_writer.add_tensor_info("tensor1", tensor2)
|
|
||||||
|
gguf_writer.add_tensor("tensor1", tensor1)
|
||||||
|
gguf_writer.add_tensor("tensor2", tensor2)
|
||||||
|
gguf_writer.add_tensor("tensor3", tensor3)
|
||||||
|
|
||||||
gguf_writer.write_header_to_file()
|
gguf_writer.write_header_to_file()
|
||||||
gguf_writer.write_kv_data_to_file()
|
gguf_writer.write_kv_data_to_file()
|
||||||
gguf_writer.write_ti_data_to_file()
|
gguf_writer.write_tensors_to_file()
|
||||||
gguf_writer.write_tensor_data(tensor1)
|
|
||||||
gguf_writer.write_tensor_data(tensor2)
|
|
||||||
|
|
||||||
gguf_writer.close()
|
gguf_writer.close()
|
||||||
|
Loading…
Reference in New Issue
Block a user