From b668cd32960f32dc0c1481b44da6e1d15b046814 Mon Sep 17 00:00:00 2001 From: klosax <131523366+klosax@users.noreply.github.com> Date: Thu, 17 Aug 2023 21:42:26 +0200 Subject: [PATCH] convert-gptneox-hf-to-gguf.py : fixes --- convert-gptneox-hf-to-gguf.py | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/convert-gptneox-hf-to-gguf.py b/convert-gptneox-hf-to-gguf.py index 11cf19282..1704b1f41 100644 --- a/convert-gptneox-hf-to-gguf.py +++ b/convert-gptneox-hf-to-gguf.py @@ -89,8 +89,8 @@ if hparams["architectures"][0] != "GPTNeoXForCausalLM": # get number of model parts num_parts = count_model_parts(dir_model) -llm_arch = "gptneox" -gguf_writer = gguf.GGUFWriter(fname_out, arch=llm_arch) +ARCH=gguf.MODEL_ARCH.GPTNEOX +gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH]) print("gguf: get model metadata") @@ -194,7 +194,7 @@ if Path(dir_model + "/tokenizer.json").is_file(): # TENSORS -tensor_map = gguf.get_tensor_name_map(block_count) +tensor_map = gguf.get_tensor_name_map(ARCH,block_count) # tensor info print("gguf: get tensor metadata") @@ -217,6 +217,8 @@ for part_name in part_names: if name.endswith(".attention.masked_bias") or name.endswith(".attention.bias") or name.endswith(".attention.rotary_emb.inv_freq"): continue + old_dtype = data.dtype + # convert any unsupported data types to float32 if data.dtype != torch.float16 and data.dtype != torch.float32: data = data.to(torch.float32) @@ -233,24 +235,21 @@ for part_name in part_names: sys.exit() n_dims = len(data.shape) - data_dtype = data.dtype - old_dtype = data_dtype + data_dtype = data.dtype # if f32 desired, convert any float16 to float32 - if ftype == 0 and data.dtype == np.float16: - data_dtype = np.float32 + if ftype == 0 and data_dtype == np.float16: + data = data.astype(np.float32) # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32 - if ftype == 1 and data.dtype == np.float16 and n_dims == 1: - data_dtype = np.float32 + if ftype == 1 and data_dtype == np.float16 and n_dims == 1: + data = data.astype(np.float32) # if f16 desired, convert any float32 2-dim weight tensors to float16 - if ftype == 1 and data.dtype == np.float32 and name.endswith(".weight") and n_dims == 2: - data_dtype = np.float16 + if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2: + data = data.astype(np.float16) - print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data_dtype)) - - data = data.astype(data_dtype) + print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype)) gguf_writer.add_tensor(name, data)