From dc1f0510134ba743d5a87bf8c62c23023e1f44f7 Mon Sep 17 00:00:00 2001 From: klosax <131523366+klosax@users.noreply.github.com> Date: Mon, 21 Aug 2023 13:27:53 +0200 Subject: [PATCH] convert-llama-7b-pth-to-gguf.py : rope scale and added tokens --- convert-llama-7b-pth-to-gguf.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/convert-llama-7b-pth-to-gguf.py b/convert-llama-7b-pth-to-gguf.py index 77edd026c..9e2f2099e 100644 --- a/convert-llama-7b-pth-to-gguf.py +++ b/convert-llama-7b-pth-to-gguf.py @@ -118,6 +118,11 @@ gguf_writer.add_head_count(head_count) gguf_writer.add_head_count_kv(head_count_kv) gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"]) +if "rope_scaling" in hparams and "factor" in hparams["rope_scaling"]: + if "type" in hparams["rope_scaling"]: + if hparams["rope_scaling"]["type"] == "linear": + gguf_writer.add_rope_scale_linear(hparams["rope_scaling"]["factor"]) + # TOKENIZATION @@ -147,9 +152,7 @@ if Path(dir_model + "/tokenizer.model").is_file(): if tokenizer.is_control(i): toktype = 3 - # TODO: How to determinate if a token is user defined? - # ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto - # if tokenizer.is_user_defined(i): toktype = 4 + # toktype = 4 is user-defined = tokens from added_tokens.json if tokenizer.is_unused(i): toktype = 5 @@ -160,6 +163,17 @@ if Path(dir_model + "/tokenizer.model").is_file(): scores.append(score) toktypes.append(toktype) + if Path(dir_model + "/added_tokens.json").is_file(): + with open(dir_model + "/added_tokens.json", "r", encoding="utf-8") as f: + addtokens_json = json.load(f) + + print("gguf: get added tokens") + + for key in addtokens_json: + tokens.append( key.encode("utf-8") ) + scores.append(-1000.0) + toktypes.append(4) # user-defined token type + gguf_writer.add_tokenizer_model("llama") gguf_writer.add_token_list(tokens) gguf_writer.add_token_scores(scores)