mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 20:04:35 +00:00
convert-gptneox-hf-to-gguf.py : fixes
This commit is contained in:
parent
fc3a523211
commit
b668cd3296
@ -89,8 +89,8 @@ if hparams["architectures"][0] != "GPTNeoXForCausalLM":
|
|||||||
# get number of model parts
|
# get number of model parts
|
||||||
num_parts = count_model_parts(dir_model)
|
num_parts = count_model_parts(dir_model)
|
||||||
|
|
||||||
llm_arch = "gptneox"
|
ARCH=gguf.MODEL_ARCH.GPTNEOX
|
||||||
gguf_writer = gguf.GGUFWriter(fname_out, arch=llm_arch)
|
gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH])
|
||||||
|
|
||||||
print("gguf: get model metadata")
|
print("gguf: get model metadata")
|
||||||
|
|
||||||
@ -194,7 +194,7 @@ if Path(dir_model + "/tokenizer.json").is_file():
|
|||||||
|
|
||||||
# TENSORS
|
# TENSORS
|
||||||
|
|
||||||
tensor_map = gguf.get_tensor_name_map(block_count)
|
tensor_map = gguf.get_tensor_name_map(ARCH,block_count)
|
||||||
|
|
||||||
# tensor info
|
# tensor info
|
||||||
print("gguf: get tensor metadata")
|
print("gguf: get tensor metadata")
|
||||||
@ -217,6 +217,8 @@ for part_name in part_names:
|
|||||||
if name.endswith(".attention.masked_bias") or name.endswith(".attention.bias") or name.endswith(".attention.rotary_emb.inv_freq"):
|
if name.endswith(".attention.masked_bias") or name.endswith(".attention.bias") or name.endswith(".attention.rotary_emb.inv_freq"):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
old_dtype = data.dtype
|
||||||
|
|
||||||
# convert any unsupported data types to float32
|
# convert any unsupported data types to float32
|
||||||
if data.dtype != torch.float16 and data.dtype != torch.float32:
|
if data.dtype != torch.float16 and data.dtype != torch.float32:
|
||||||
data = data.to(torch.float32)
|
data = data.to(torch.float32)
|
||||||
@ -233,24 +235,21 @@ for part_name in part_names:
|
|||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
n_dims = len(data.shape)
|
n_dims = len(data.shape)
|
||||||
data_dtype = data.dtype
|
data_dtype = data.dtype
|
||||||
old_dtype = data_dtype
|
|
||||||
|
|
||||||
# if f32 desired, convert any float16 to float32
|
# if f32 desired, convert any float16 to float32
|
||||||
if ftype == 0 and data.dtype == np.float16:
|
if ftype == 0 and data_dtype == np.float16:
|
||||||
data_dtype = np.float32
|
data = data.astype(np.float32)
|
||||||
|
|
||||||
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
|
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
|
||||||
if ftype == 1 and data.dtype == np.float16 and n_dims == 1:
|
if ftype == 1 and data_dtype == np.float16 and n_dims == 1:
|
||||||
data_dtype = np.float32
|
data = data.astype(np.float32)
|
||||||
|
|
||||||
# if f16 desired, convert any float32 2-dim weight tensors to float16
|
# if f16 desired, convert any float32 2-dim weight tensors to float16
|
||||||
if ftype == 1 and data.dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
||||||
data_dtype = np.float16
|
data = data.astype(np.float16)
|
||||||
|
|
||||||
print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data_dtype))
|
print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype))
|
||||||
|
|
||||||
data = data.astype(data_dtype)
|
|
||||||
|
|
||||||
gguf_writer.add_tensor(name, data)
|
gguf_writer.add_tensor(name, data)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user