mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
convert_hf : identify more added control tokens for SPM tokenziers
This makes Gemma and Gemma-2 tokenize pretty much EVERYTHING correctly, including HTML tags and consecutive spaces, but it unfortunately requires model re-conversion. There seems to be a weird behavior of the HF tokenizer for Gemma, which prefers to use the 16-space token over more lengthy space tokens, while using the SentencePiece tokenizer does not do this. (the implementation in llama.cpp has the same behavior as SentencePiece) * llama : fix wrong pre-tokenization of byte tokens
This commit is contained in:
parent
6e351e0425
commit
f9d42c598b
@ -373,17 +373,28 @@ class Model:
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
raise NotImplementedError(f'Architecture {arch!r} not supported!') from None
|
raise NotImplementedError(f'Architecture {arch!r} not supported!') from None
|
||||||
|
|
||||||
def does_token_look_special(self, token: str) -> bool:
|
def does_token_look_special(self, token: str | bytes) -> bool:
|
||||||
|
if isinstance(token, (bytes, bytearray)):
|
||||||
|
token_text = token.decode(encoding="utf-8")
|
||||||
|
elif isinstance(token, memoryview):
|
||||||
|
token_text = token.tobytes().decode(encoding="utf-8")
|
||||||
|
else:
|
||||||
|
token_text = token
|
||||||
|
|
||||||
# Some models mark some added tokens which ought to be control tokens as not special.
|
# Some models mark some added tokens which ought to be control tokens as not special.
|
||||||
# (e.g. command-r, command-r-plus, deepseek-coder, gemma{,-2})
|
# (e.g. command-r, command-r-plus, deepseek-coder, gemma{,-2})
|
||||||
is_known_special = token in (
|
seems_special = token_text in (
|
||||||
"<pad>", # deepseek-coder
|
"<pad>", # deepseek-coder
|
||||||
"<mask>", "<2mass>", "[@BOS@]", # gemma{,-2}
|
"<mask>", "<2mass>", "[@BOS@]", # gemma{,-2}
|
||||||
)
|
)
|
||||||
# TODO: should these be marked as UNUSED instead?
|
|
||||||
is_known_special = is_known_special or (token.startswith("<unused") and token.endswith(">")) # gemma{,-2}
|
|
||||||
|
|
||||||
return is_known_special or (token.startswith(("<|", "<|")) and token.endswith(("|>", "|>")))
|
seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>"))
|
||||||
|
seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>")) # deepseek-coder
|
||||||
|
|
||||||
|
# TODO: should these be marked as UNUSED instead? (maybe not)
|
||||||
|
seems_special = seems_special or (token_text.startswith("<unused") and token_text.endswith(">")) # gemma{,-2}
|
||||||
|
|
||||||
|
return seems_special
|
||||||
|
|
||||||
# used for GPT-2 BPE and WordPiece vocabs
|
# used for GPT-2 BPE and WordPiece vocabs
|
||||||
def get_vocab_base(self) -> tuple[list[str], list[int], str]:
|
def get_vocab_base(self) -> tuple[list[str], list[int], str]:
|
||||||
@ -403,17 +414,18 @@ class Model:
|
|||||||
for i in range(vocab_size):
|
for i in range(vocab_size):
|
||||||
if i not in reverse_vocab:
|
if i not in reverse_vocab:
|
||||||
tokens.append(f"[PAD{i}]")
|
tokens.append(f"[PAD{i}]")
|
||||||
toktypes.append(gguf.TokenType.USER_DEFINED)
|
toktypes.append(gguf.TokenType.UNUSED)
|
||||||
elif reverse_vocab[i] in added_vocab:
|
else:
|
||||||
token: str = reverse_vocab[i]
|
token: str = reverse_vocab[i]
|
||||||
tokens.append(token)
|
if token in added_vocab:
|
||||||
if tokenizer.added_tokens_decoder[i].special or self.does_token_look_special(token):
|
if tokenizer.added_tokens_decoder[i].special or self.does_token_look_special(token):
|
||||||
toktypes.append(gguf.TokenType.CONTROL)
|
toktypes.append(gguf.TokenType.CONTROL)
|
||||||
else:
|
else:
|
||||||
|
token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
|
||||||
toktypes.append(gguf.TokenType.USER_DEFINED)
|
toktypes.append(gguf.TokenType.USER_DEFINED)
|
||||||
else:
|
else:
|
||||||
tokens.append(reverse_vocab[i])
|
|
||||||
toktypes.append(gguf.TokenType.NORMAL)
|
toktypes.append(gguf.TokenType.NORMAL)
|
||||||
|
tokens.append(token)
|
||||||
|
|
||||||
return tokens, toktypes, tokpre
|
return tokens, toktypes, tokpre
|
||||||
|
|
||||||
@ -572,7 +584,7 @@ class Model:
|
|||||||
for i in range(vocab_size):
|
for i in range(vocab_size):
|
||||||
if i not in reverse_vocab:
|
if i not in reverse_vocab:
|
||||||
tokens.append(f"[PAD{i}]")
|
tokens.append(f"[PAD{i}]")
|
||||||
toktypes.append(gguf.TokenType.USER_DEFINED)
|
toktypes.append(gguf.TokenType.UNUSED)
|
||||||
elif reverse_vocab[i] in added_vocab:
|
elif reverse_vocab[i] in added_vocab:
|
||||||
tokens.append(reverse_vocab[i])
|
tokens.append(reverse_vocab[i])
|
||||||
toktypes.append(gguf.TokenType.CONTROL)
|
toktypes.append(gguf.TokenType.CONTROL)
|
||||||
@ -657,6 +669,25 @@ class Model:
|
|||||||
scores[token_id] = -1000.0
|
scores[token_id] = -1000.0
|
||||||
toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
|
toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
|
||||||
|
|
||||||
|
tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
|
||||||
|
if tokenizer_config_file.is_file():
|
||||||
|
with open(tokenizer_config_file, "r", encoding="utf-8") as f:
|
||||||
|
tokenizer_config_json = json.load(f)
|
||||||
|
added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
|
||||||
|
for token_id, token_data in added_tokens_decoder.items():
|
||||||
|
token_id = int(token_id)
|
||||||
|
token: str = token_data["content"]
|
||||||
|
if toktypes[token_id] != SentencePieceTokenTypes.UNKNOWN:
|
||||||
|
assert tokens[token_id] == token.encode("utf-8")
|
||||||
|
if token_data.get("special") or self.does_token_look_special(token):
|
||||||
|
toktypes[token_id] = SentencePieceTokenTypes.CONTROL
|
||||||
|
else:
|
||||||
|
token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces
|
||||||
|
toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
|
||||||
|
|
||||||
|
scores[token_id] = -1000.0
|
||||||
|
tokens[token_id] = token.encode("utf-8")
|
||||||
|
|
||||||
if vocab_size > len(tokens):
|
if vocab_size > len(tokens):
|
||||||
pad_count = vocab_size - len(tokens)
|
pad_count = vocab_size - len(tokens)
|
||||||
logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
|
logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]")
|
||||||
@ -1280,7 +1311,7 @@ class StableLMModel(Model):
|
|||||||
if (self.dir_model / "tokenizer.json").is_file():
|
if (self.dir_model / "tokenizer.json").is_file():
|
||||||
self._set_vocab_gpt2()
|
self._set_vocab_gpt2()
|
||||||
else:
|
else:
|
||||||
# StableLM 2 1.6B uses a vocab in a similar format to Qwen's vocab
|
# StableLM 2 1.6B used to have a vocab in a similar format to Qwen's vocab
|
||||||
self._set_vocab_qwen()
|
self._set_vocab_qwen()
|
||||||
|
|
||||||
def set_gguf_parameters(self):
|
def set_gguf_parameters(self):
|
||||||
@ -1592,7 +1623,6 @@ class DbrxModel(Model):
|
|||||||
self.gguf_writer.add_rope_freq_base(attn_config["rope_theta"])
|
self.gguf_writer.add_rope_freq_base(attn_config["rope_theta"])
|
||||||
|
|
||||||
self.gguf_writer.add_clamp_kqv(attn_config["clip_qkv"])
|
self.gguf_writer.add_clamp_kqv(attn_config["clip_qkv"])
|
||||||
self.gguf_writer.add_file_type(self.ftype)
|
|
||||||
|
|
||||||
self.gguf_writer.add_expert_count(ffn_config["moe_num_experts"])
|
self.gguf_writer.add_expert_count(ffn_config["moe_num_experts"])
|
||||||
self.gguf_writer.add_expert_used_count(ffn_config["moe_top_k"])
|
self.gguf_writer.add_expert_used_count(ffn_config["moe_top_k"])
|
||||||
@ -2412,19 +2442,7 @@ class Gemma2Model(Model):
|
|||||||
model_arch = gguf.MODEL_ARCH.GEMMA2
|
model_arch = gguf.MODEL_ARCH.GEMMA2
|
||||||
|
|
||||||
def set_vocab(self):
|
def set_vocab(self):
|
||||||
tokens, scores, toktypes = self._create_vocab_sentencepiece()
|
self._set_vocab_sentencepiece()
|
||||||
# hack: This is required so that we can properly use start/end-of-turn for chat template
|
|
||||||
for i in range(108):
|
|
||||||
# including <unusedX>, <start_of_turn>, <end_of_turn>
|
|
||||||
toktypes[i] = SentencePieceTokenTypes.CONTROL
|
|
||||||
self.gguf_writer.add_tokenizer_model("llama")
|
|
||||||
self.gguf_writer.add_tokenizer_pre("default")
|
|
||||||
self.gguf_writer.add_token_list(tokens)
|
|
||||||
self.gguf_writer.add_token_scores(scores)
|
|
||||||
self.gguf_writer.add_token_types(toktypes)
|
|
||||||
|
|
||||||
special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
|
|
||||||
special_vocab.add_to_gguf(self.gguf_writer)
|
|
||||||
|
|
||||||
self.gguf_writer.add_add_space_prefix(False)
|
self.gguf_writer.add_add_space_prefix(False)
|
||||||
|
|
||||||
@ -3318,7 +3336,7 @@ class ChatGLMModel(Model):
|
|||||||
for i in range(vocab_size):
|
for i in range(vocab_size):
|
||||||
if i not in reverse_vocab:
|
if i not in reverse_vocab:
|
||||||
tokens.append(f"[PAD{i}]")
|
tokens.append(f"[PAD{i}]")
|
||||||
toktypes.append(gguf.TokenType.USER_DEFINED)
|
toktypes.append(gguf.TokenType.UNUSED)
|
||||||
elif reverse_vocab[i] in added_vocab:
|
elif reverse_vocab[i] in added_vocab:
|
||||||
tokens.append(reverse_vocab[i])
|
tokens.append(reverse_vocab[i])
|
||||||
if tokenizer.added_tokens_decoder[i].special:
|
if tokenizer.added_tokens_decoder[i].special:
|
||||||
|
@ -5640,7 +5640,7 @@ static void llm_load_vocab(
|
|||||||
// build special tokens cache
|
// build special tokens cache
|
||||||
{
|
{
|
||||||
for (llama_vocab::id id = 0; id < (llama_vocab::id)n_vocab; ++id) {
|
for (llama_vocab::id id = 0; id < (llama_vocab::id)n_vocab; ++id) {
|
||||||
if (!(vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_NORMAL)) {
|
if (vocab.id_to_token[id].attr & (LLAMA_TOKEN_ATTR_CONTROL | LLAMA_TOKEN_ATTR_USER_DEFINED)) {
|
||||||
vocab.cache_special_tokens.push_back(id);
|
vocab.cache_special_tokens.push_back(id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ from typing import Any, Iterator, cast
|
|||||||
from typing_extensions import Buffer
|
from typing_extensions import Buffer
|
||||||
|
|
||||||
import cffi
|
import cffi
|
||||||
from transformers import AutoTokenizer
|
from transformers import AutoTokenizer, PreTrainedTokenizer
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger("test-tokenizer-random")
|
logger = logging.getLogger("test-tokenizer-random")
|
||||||
@ -129,7 +129,7 @@ class Tokenizer:
|
|||||||
class TokenizerGroundtruth (Tokenizer):
|
class TokenizerGroundtruth (Tokenizer):
|
||||||
|
|
||||||
def __init__(self, dir_tokenizer: str):
|
def __init__(self, dir_tokenizer: str):
|
||||||
self.model = AutoTokenizer.from_pretrained(dir_tokenizer)
|
self.model: PreTrainedTokenizer = AutoTokenizer.from_pretrained(dir_tokenizer)
|
||||||
# guess BOS and EOS
|
# guess BOS and EOS
|
||||||
ids = self.encode("a")
|
ids = self.encode("a")
|
||||||
assert 1 <= len(ids) <= 3
|
assert 1 <= len(ids) <= 3
|
||||||
@ -143,7 +143,7 @@ class TokenizerGroundtruth (Tokenizer):
|
|||||||
self.vocab = list(sorted(self.vocab))
|
self.vocab = list(sorted(self.vocab))
|
||||||
# tokens and lists
|
# tokens and lists
|
||||||
self.special_tokens = list(self.model.all_special_tokens)
|
self.special_tokens = list(self.model.all_special_tokens)
|
||||||
self.added_tokens = list(self.model.added_tokens_encoder)
|
self.added_tokens = self.model.batch_decode(self.model.added_tokens_encoder.values(), skip_special_tokens=False)
|
||||||
self.bos_token = self.model.bos_token
|
self.bos_token = self.model.bos_token
|
||||||
self.eos_token = self.model.eos_token
|
self.eos_token = self.model.eos_token
|
||||||
|
|
||||||
@ -458,8 +458,8 @@ def compare_tokenizers(tokenizer1: TokenizerGroundtruth, tokenizer2: TokenizerLl
|
|||||||
i = find_first_mismatch(ids1, ids2)
|
i = find_first_mismatch(ids1, ids2)
|
||||||
ids1 = list(ids1)[max(0, i - 2) : i + 5 + 1]
|
ids1 = list(ids1)[max(0, i - 2) : i + 5 + 1]
|
||||||
ids2 = list(ids2)[max(0, i - 2) : i + 5 + 1]
|
ids2 = list(ids2)[max(0, i - 2) : i + 5 + 1]
|
||||||
logger.error(" Expected: " + str(ids1))
|
logger.error(" Expected: " + str(ids1) + f" {[tokenizer1.decode([id]) for id in ids1]}")
|
||||||
logger.error(" Result: " + str(ids2))
|
logger.error(" Result: " + str(ids2) + f" {[tokenizer2.decode([id]) for id in ids2]}")
|
||||||
encode_errors += 1
|
encode_errors += 1
|
||||||
logger.error(f" {encode_errors=}")
|
logger.error(f" {encode_errors=}")
|
||||||
if decode_errors < MAX_ERRORS and not check_detokenizer(text, text1, text2):
|
if decode_errors < MAX_ERRORS and not check_detokenizer(text, text1, text2):
|
||||||
|
Loading…
Reference in New Issue
Block a user