mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 20:04:35 +00:00
convert-llama-7b-pth-to-gguf.py : add token types
This commit is contained in:
parent
cedb4870c6
commit
ab2cbd03ca
@ -110,8 +110,9 @@ gguf_writer.add_layer_norm_rms_eps(llm_arch, hparams["rms_norm_eps"])
|
|||||||
|
|
||||||
print("gguf: get tokenizer metadata")
|
print("gguf: get tokenizer metadata")
|
||||||
|
|
||||||
tokens: List[str] = []
|
tokens: List[bytes] = []
|
||||||
scores: List[float] = []
|
scores: List[float] = []
|
||||||
|
toktypes: List[int] = []
|
||||||
|
|
||||||
if Path(dir_model + "/tokenizer.model").is_file():
|
if Path(dir_model + "/tokenizer.model").is_file():
|
||||||
# vocab type sentencepiece
|
# vocab type sentencepiece
|
||||||
@ -121,26 +122,31 @@ if Path(dir_model + "/tokenizer.model").is_file():
|
|||||||
|
|
||||||
for i in range(tokenizer.vocab_size()):
|
for i in range(tokenizer.vocab_size()):
|
||||||
text: bytes
|
text: bytes
|
||||||
if tokenizer.is_unknown(i):
|
score: float
|
||||||
text = " \u2047 ".encode("utf-8")
|
|
||||||
elif tokenizer.is_control(i):
|
piece = tokenizer.id_to_piece(i)
|
||||||
text = b""
|
text = piece.encode("utf-8")
|
||||||
if tokenizer.is_byte(i):
|
score = tokenizer.get_score(i)
|
||||||
piece = tokenizer.id_to_piece(i)
|
|
||||||
if len(piece) != 6:
|
toktype = 1 # defualt to normal token type
|
||||||
raise Exception(f"Invalid token: {piece}")
|
if tokenizer.is_unknown(i): toktype = 2
|
||||||
byte_value = int(piece[3:-1], 16)
|
if tokenizer.is_control(i): toktype = 3
|
||||||
text = struct.pack("B", byte_value)
|
|
||||||
else:
|
# TODO: How to determinate if a token is user defined?
|
||||||
text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8")
|
# ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
|
||||||
score: float = tokenizer.get_score(i)
|
# if tokenizer.is_user_defined(i): toktype = 4
|
||||||
|
|
||||||
|
if tokenizer.is_unused(i): toktype = 5
|
||||||
|
if tokenizer.is_byte(i): toktype = 6
|
||||||
|
|
||||||
tokens.append(text)
|
tokens.append(text)
|
||||||
scores.append(score)
|
scores.append(score)
|
||||||
|
toktypes.append(toktype)
|
||||||
|
|
||||||
gguf_writer.add_tokenizer_model("llama")
|
gguf_writer.add_tokenizer_model("llama")
|
||||||
gguf_writer.add_token_list(tokens)
|
gguf_writer.add_token_list(tokens)
|
||||||
gguf_writer.add_token_scores(scores)
|
gguf_writer.add_token_scores(scores)
|
||||||
|
gguf_writer.add_token_types(toktypes)
|
||||||
|
|
||||||
if Path(dir_model + "/tokenizer.json").is_file():
|
if Path(dir_model + "/tokenizer.json").is_file():
|
||||||
with open(dir_model + "/tokenizer.json", "r", encoding="utf-8") as f:
|
with open(dir_model + "/tokenizer.json", "r", encoding="utf-8") as f:
|
||||||
|
Loading…
Reference in New Issue
Block a user