diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py index 30f87a9fe..da6d2ba9e 100755 --- a/convert_hf_to_gguf.py +++ b/convert_hf_to_gguf.py @@ -373,17 +373,28 @@ class Model: except KeyError: raise NotImplementedError(f'Architecture {arch!r} not supported!') from None - def does_token_look_special(self, token: str) -> bool: + def does_token_look_special(self, token: str | bytes) -> bool: + if isinstance(token, (bytes, bytearray)): + token_text = token.decode(encoding="utf-8") + elif isinstance(token, memoryview): + token_text = token.tobytes().decode(encoding="utf-8") + else: + token_text = token + # Some models mark some added tokens which ought to be control tokens as not special. # (e.g. command-r, command-r-plus, deepseek-coder, gemma{,-2}) - is_known_special = token in ( + seems_special = token_text in ( "", # deepseek-coder "", "<2mass>", "[@BOS@]", # gemma{,-2} ) - # TODO: should these be marked as UNUSED instead? - is_known_special = is_known_special or (token.startswith("")) # gemma{,-2} - return is_known_special or (token.startswith(("<|", "<|")) and token.endswith(("|>", "|>"))) + seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>")) + seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>")) # deepseek-coder + + # TODO: should these be marked as UNUSED instead? (maybe not) + seems_special = seems_special or (token_text.startswith("")) # gemma{,-2} + + return seems_special # used for GPT-2 BPE and WordPiece vocabs def get_vocab_base(self) -> tuple[list[str], list[int], str]: @@ -403,17 +414,18 @@ class Model: for i in range(vocab_size): if i not in reverse_vocab: tokens.append(f"[PAD{i}]") - toktypes.append(gguf.TokenType.USER_DEFINED) - elif reverse_vocab[i] in added_vocab: - token: str = reverse_vocab[i] - tokens.append(token) - if tokenizer.added_tokens_decoder[i].special or self.does_token_look_special(token): - toktypes.append(gguf.TokenType.CONTROL) - else: - toktypes.append(gguf.TokenType.USER_DEFINED) + toktypes.append(gguf.TokenType.UNUSED) else: - tokens.append(reverse_vocab[i]) - toktypes.append(gguf.TokenType.NORMAL) + token: str = reverse_vocab[i] + if token in added_vocab: + if tokenizer.added_tokens_decoder[i].special or self.does_token_look_special(token): + toktypes.append(gguf.TokenType.CONTROL) + else: + token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces + toktypes.append(gguf.TokenType.USER_DEFINED) + else: + toktypes.append(gguf.TokenType.NORMAL) + tokens.append(token) return tokens, toktypes, tokpre @@ -572,7 +584,7 @@ class Model: for i in range(vocab_size): if i not in reverse_vocab: tokens.append(f"[PAD{i}]") - toktypes.append(gguf.TokenType.USER_DEFINED) + toktypes.append(gguf.TokenType.UNUSED) elif reverse_vocab[i] in added_vocab: tokens.append(reverse_vocab[i]) toktypes.append(gguf.TokenType.CONTROL) @@ -657,6 +669,25 @@ class Model: scores[token_id] = -1000.0 toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED + tokenizer_config_file = self.dir_model / 'tokenizer_config.json' + if tokenizer_config_file.is_file(): + with open(tokenizer_config_file, "r", encoding="utf-8") as f: + tokenizer_config_json = json.load(f) + added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {}) + for token_id, token_data in added_tokens_decoder.items(): + token_id = int(token_id) + token: str = token_data["content"] + if toktypes[token_id] != SentencePieceTokenTypes.UNKNOWN: + assert tokens[token_id] == token.encode("utf-8") + if token_data.get("special") or self.does_token_look_special(token): + toktypes[token_id] = SentencePieceTokenTypes.CONTROL + else: + token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces + toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED + + scores[token_id] = -1000.0 + tokens[token_id] = token.encode("utf-8") + if vocab_size > len(tokens): pad_count = vocab_size - len(tokens) logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]") @@ -1280,7 +1311,7 @@ class StableLMModel(Model): if (self.dir_model / "tokenizer.json").is_file(): self._set_vocab_gpt2() else: - # StableLM 2 1.6B uses a vocab in a similar format to Qwen's vocab + # StableLM 2 1.6B used to have a vocab in a similar format to Qwen's vocab self._set_vocab_qwen() def set_gguf_parameters(self): @@ -1592,7 +1623,6 @@ class DbrxModel(Model): self.gguf_writer.add_rope_freq_base(attn_config["rope_theta"]) self.gguf_writer.add_clamp_kqv(attn_config["clip_qkv"]) - self.gguf_writer.add_file_type(self.ftype) self.gguf_writer.add_expert_count(ffn_config["moe_num_experts"]) self.gguf_writer.add_expert_used_count(ffn_config["moe_top_k"]) @@ -2412,19 +2442,7 @@ class Gemma2Model(Model): model_arch = gguf.MODEL_ARCH.GEMMA2 def set_vocab(self): - tokens, scores, toktypes = self._create_vocab_sentencepiece() - # hack: This is required so that we can properly use start/end-of-turn for chat template - for i in range(108): - # including , , - toktypes[i] = SentencePieceTokenTypes.CONTROL - self.gguf_writer.add_tokenizer_model("llama") - self.gguf_writer.add_tokenizer_pre("default") - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_scores(scores) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) - special_vocab.add_to_gguf(self.gguf_writer) + self._set_vocab_sentencepiece() self.gguf_writer.add_add_space_prefix(False) @@ -3318,7 +3336,7 @@ class ChatGLMModel(Model): for i in range(vocab_size): if i not in reverse_vocab: tokens.append(f"[PAD{i}]") - toktypes.append(gguf.TokenType.USER_DEFINED) + toktypes.append(gguf.TokenType.UNUSED) elif reverse_vocab[i] in added_vocab: tokens.append(reverse_vocab[i]) if tokenizer.added_tokens_decoder[i].special: diff --git a/src/llama.cpp b/src/llama.cpp index 11147eb11..c30d0adfe 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -5640,7 +5640,7 @@ static void llm_load_vocab( // build special tokens cache { for (llama_vocab::id id = 0; id < (llama_vocab::id)n_vocab; ++id) { - if (!(vocab.id_to_token[id].attr & LLAMA_TOKEN_ATTR_NORMAL)) { + if (vocab.id_to_token[id].attr & (LLAMA_TOKEN_ATTR_CONTROL | LLAMA_TOKEN_ATTR_USER_DEFINED)) { vocab.cache_special_tokens.push_back(id); } } diff --git a/tests/test-tokenizer-random.py b/tests/test-tokenizer-random.py index c50a8ca32..cdfc2b12c 100644 --- a/tests/test-tokenizer-random.py +++ b/tests/test-tokenizer-random.py @@ -20,7 +20,7 @@ from typing import Any, Iterator, cast from typing_extensions import Buffer import cffi -from transformers import AutoTokenizer +from transformers import AutoTokenizer, PreTrainedTokenizer logger = logging.getLogger("test-tokenizer-random") @@ -129,7 +129,7 @@ class Tokenizer: class TokenizerGroundtruth (Tokenizer): def __init__(self, dir_tokenizer: str): - self.model = AutoTokenizer.from_pretrained(dir_tokenizer) + self.model: PreTrainedTokenizer = AutoTokenizer.from_pretrained(dir_tokenizer) # guess BOS and EOS ids = self.encode("a") assert 1 <= len(ids) <= 3 @@ -143,7 +143,7 @@ class TokenizerGroundtruth (Tokenizer): self.vocab = list(sorted(self.vocab)) # tokens and lists self.special_tokens = list(self.model.all_special_tokens) - self.added_tokens = list(self.model.added_tokens_encoder) + self.added_tokens = self.model.batch_decode(self.model.added_tokens_encoder.values(), skip_special_tokens=False) self.bos_token = self.model.bos_token self.eos_token = self.model.eos_token @@ -458,8 +458,8 @@ def compare_tokenizers(tokenizer1: TokenizerGroundtruth, tokenizer2: TokenizerLl i = find_first_mismatch(ids1, ids2) ids1 = list(ids1)[max(0, i - 2) : i + 5 + 1] ids2 = list(ids2)[max(0, i - 2) : i + 5 + 1] - logger.error(" Expected: " + str(ids1)) - logger.error(" Result: " + str(ids2)) + logger.error(" Expected: " + str(ids1) + f" {[tokenizer1.decode([id]) for id in ids1]}") + logger.error(" Result: " + str(ids2) + f" {[tokenizer2.decode([id]) for id in ids2]}") encode_errors += 1 logger.error(f" {encode_errors=}") if decode_errors < MAX_ERRORS and not check_detokenizer(text, text1, text2):