mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 20:04:35 +00:00
convert : update convert-new.py with tokenizer fixes (#2614)
* Merge tokenizer fixes into the gguf branch. * Add test vocabularies * Adapt convert-new.py (and fix a clang-cl compiler error on windows)
This commit is contained in:
parent
ec1b100720
commit
afc4ca2889
104
convert-new.py
104
convert-new.py
@ -205,25 +205,58 @@ class Params:
|
|||||||
return params
|
return params
|
||||||
|
|
||||||
|
|
||||||
class SentencePieceVocab:
|
class BpeVocab:
|
||||||
def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path], vocabtype: Optional[str]) -> None:
|
def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path]) -> None:
|
||||||
self.vocabtype = vocabtype
|
self.bpe_tokenizer = json.loads(open(str(fname_tokenizer), encoding="utf-8").read())
|
||||||
if self.vocabtype == "bpe":
|
|
||||||
self.sentencepiece_tokenizer = json.loads(open(str(fname_tokenizer)).read())
|
|
||||||
else:
|
|
||||||
self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer))
|
|
||||||
|
|
||||||
added_tokens: Dict[str, int]
|
added_tokens: Dict[str, int]
|
||||||
if fname_added_tokens is not None:
|
if fname_added_tokens is not None:
|
||||||
added_tokens = json.load(open(fname_added_tokens))
|
added_tokens = json.load(open(fname_added_tokens, encoding="utf-8"))
|
||||||
else:
|
else:
|
||||||
added_tokens = {}
|
added_tokens = {}
|
||||||
|
vocab_size: int = len(self.bpe_tokenizer)
|
||||||
|
expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
|
||||||
|
actual_ids = sorted(added_tokens.values())
|
||||||
|
if expected_ids != actual_ids:
|
||||||
|
raise Exception(f"Expected added token IDs to be sequential and start at {len(added_tokens)}; got {actual_ids}")
|
||||||
|
items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1])
|
||||||
|
self.added_tokens_list = [text for (text, idx) in items]
|
||||||
|
self.vocab_size_base: int = vocab_size
|
||||||
|
self.vocab_size: int = self.vocab_size_base + len(self.added_tokens_list)
|
||||||
|
self.fname_tokenizer = fname_tokenizer
|
||||||
|
self.fname_added_tokens = fname_added_tokens
|
||||||
|
|
||||||
if self.vocabtype == "bpe":
|
def bpe_tokens(self) -> Iterable[Tuple[bytes, float]]:
|
||||||
vocab_size: int = len(self.sentencepiece_tokenizer)
|
tokenizer = self.bpe_tokenizer
|
||||||
|
from transformers.models.gpt2 import tokenization_gpt2
|
||||||
|
byte_encoder = tokenization_gpt2.bytes_to_unicode()
|
||||||
|
byte_decoder = {v: k for k, v in byte_encoder.items()}
|
||||||
|
for i, item in enumerate(tokenizer):
|
||||||
|
text: bytes = item.encode("utf-8")
|
||||||
|
score: float = -i
|
||||||
|
yield text, score
|
||||||
|
|
||||||
|
def added_tokens(self) -> Iterable[Tuple[bytes, float]]:
|
||||||
|
for text in self.added_tokens_list:
|
||||||
|
score = -1000.0
|
||||||
|
yield text.encode("utf-8"), score
|
||||||
|
|
||||||
|
def all_tokens(self) -> Iterable[Tuple[bytes, float]]:
|
||||||
|
yield from self.bpe_tokens()
|
||||||
|
yield from self.added_tokens()
|
||||||
|
|
||||||
|
def __repr__(self) -> str:
|
||||||
|
return f"BpeVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
|
||||||
|
|
||||||
|
|
||||||
|
class SentencePieceVocab:
|
||||||
|
def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path]) -> None:
|
||||||
|
self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer))
|
||||||
|
added_tokens: Dict[str, int]
|
||||||
|
if fname_added_tokens is not None:
|
||||||
|
added_tokens = json.load(open(fname_added_tokens, encoding="utf-8"))
|
||||||
else:
|
else:
|
||||||
vocab_size: int = self.sentencepiece_tokenizer.vocab_size()
|
added_tokens = {}
|
||||||
|
vocab_size: int = self.sentencepiece_tokenizer.vocab_size()
|
||||||
expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
|
expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
|
||||||
actual_ids = sorted(added_tokens.values())
|
actual_ids = sorted(added_tokens.values())
|
||||||
if expected_ids != actual_ids:
|
if expected_ids != actual_ids:
|
||||||
@ -238,32 +271,11 @@ class SentencePieceVocab:
|
|||||||
|
|
||||||
def sentencepiece_tokens(self) -> Iterable[Tuple[bytes, float]]:
|
def sentencepiece_tokens(self) -> Iterable[Tuple[bytes, float]]:
|
||||||
tokenizer = self.sentencepiece_tokenizer
|
tokenizer = self.sentencepiece_tokenizer
|
||||||
if self.vocabtype == "bpe":
|
for i in range(tokenizer.vocab_size()):
|
||||||
from transformers.models.gpt2 import tokenization_gpt2
|
piece = tokenizer.id_to_piece(i)
|
||||||
byte_encoder = tokenization_gpt2.bytes_to_unicode()
|
text: bytes = piece.encode("utf-8")
|
||||||
byte_decoder = {v: k for k, v in byte_encoder.items()}
|
score: float = tokenizer.get_score(i)
|
||||||
for i, item in enumerate(tokenizer):
|
yield text, score
|
||||||
text: bytes
|
|
||||||
text = b''.join([x.to_bytes(1, byteorder='big') for x in [byte_decoder[y] for y in item]])
|
|
||||||
score: float = -i
|
|
||||||
yield text, score
|
|
||||||
else:
|
|
||||||
for i in range(tokenizer.vocab_size()):
|
|
||||||
text: bytes
|
|
||||||
if tokenizer.is_unknown(i):
|
|
||||||
text = " \u2047 ".encode("utf-8")
|
|
||||||
elif tokenizer.is_control(i):
|
|
||||||
text = b""
|
|
||||||
elif tokenizer.is_byte(i):
|
|
||||||
piece = tokenizer.id_to_piece(i)
|
|
||||||
if len(piece) != 6:
|
|
||||||
raise Exception(f"Invalid token: {piece}")
|
|
||||||
byte_value = int(piece[3:-1], 16)
|
|
||||||
text = struct.pack("B", byte_value)
|
|
||||||
else:
|
|
||||||
text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8")
|
|
||||||
score: float = tokenizer.get_score(i)
|
|
||||||
yield text, score
|
|
||||||
|
|
||||||
def added_tokens(self) -> Iterable[Tuple[bytes, float]]:
|
def added_tokens(self) -> Iterable[Tuple[bytes, float]]:
|
||||||
for text in self.added_tokens_list:
|
for text in self.added_tokens_list:
|
||||||
@ -278,7 +290,7 @@ class SentencePieceVocab:
|
|||||||
return f"<SentencePieceVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
|
return f"<SentencePieceVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
|
||||||
|
|
||||||
|
|
||||||
Vocab = Union[SentencePieceVocab]
|
Vocab = Union[BpeVocab, SentencePieceVocab]
|
||||||
|
|
||||||
|
|
||||||
def permute(weights: NDArray, n_head: int) -> NDArray:
|
def permute(weights: NDArray, n_head: int) -> NDArray:
|
||||||
@ -679,7 +691,7 @@ def bounded_parallel_map(func: Callable[[In], Out], iterable: Iterable[In], conc
|
|||||||
|
|
||||||
def check_vocab_size(params: Params, vocab: Vocab) -> None:
|
def check_vocab_size(params: Params, vocab: Vocab) -> None:
|
||||||
if params.n_vocab != vocab.vocab_size:
|
if params.n_vocab != vocab.vocab_size:
|
||||||
assert isinstance(vocab, SentencePieceVocab)
|
assert isinstance(vocab, BpeVocab) or isinstance(vocab, SentencePieceVocab)
|
||||||
if params.n_vocab == vocab.vocab_size_base:
|
if params.n_vocab == vocab.vocab_size_base:
|
||||||
print("Ignoring added_tokens.json since model matches vocab size without it.")
|
print("Ignoring added_tokens.json since model matches vocab size without it.")
|
||||||
vocab.added_tokens_list = []
|
vocab.added_tokens_list = []
|
||||||
@ -853,7 +865,7 @@ def filter_and_sort_tensors(model: LazyModel) -> LazyModel:
|
|||||||
return {name: model[name] for name in TENSORS_LIST if name in model}
|
return {name: model[name] for name in TENSORS_LIST if name in model}
|
||||||
|
|
||||||
|
|
||||||
def load_vocab(path: Path, vocabtype: Optional[str]) -> SentencePieceVocab:
|
def load_vocab(path: Path, vocabtype: Optional[str]) -> Union[BpeVocab, SentencePieceVocab]:
|
||||||
print(f"vocabtype: {vocabtype}")
|
print(f"vocabtype: {vocabtype}")
|
||||||
# Be extra-friendly and accept either a file or a directory. Also, if it's
|
# Be extra-friendly and accept either a file or a directory. Also, if it's
|
||||||
# a directory, it might be the model directory, and tokenizer.model might
|
# a directory, it might be the model directory, and tokenizer.model might
|
||||||
@ -875,8 +887,12 @@ def load_vocab(path: Path, vocabtype: Optional[str]) -> SentencePieceVocab:
|
|||||||
"if it's in another directory, pass the directory as --vocab-dir")
|
"if it's in another directory, pass the directory as --vocab-dir")
|
||||||
added_tokens_path = path.parent / "added_tokens.json"
|
added_tokens_path = path.parent / "added_tokens.json"
|
||||||
print(f"Loading vocab file {path}")
|
print(f"Loading vocab file {path}")
|
||||||
return SentencePieceVocab(path, added_tokens_path if added_tokens_path.exists() else None,
|
if vocabtype == "bpe":
|
||||||
vocabtype)
|
return BpeVocab(path, added_tokens_path if added_tokens_path.exists() else None)
|
||||||
|
elif vocabtype == "spm":
|
||||||
|
return SentencePieceVocab(path, added_tokens_path if added_tokens_path.exists() else None)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported vocabulary type {vocabtype}")
|
||||||
|
|
||||||
|
|
||||||
def default_outfile(model_paths: List[Path], file_type: GGMLFileType) -> Path:
|
def default_outfile(model_paths: List[Path], file_type: GGMLFileType) -> Path:
|
||||||
|
@ -307,7 +307,7 @@ int main(int argc, char ** argv) {
|
|||||||
auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
|
auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
|
||||||
return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
|
return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
|
||||||
};
|
};
|
||||||
SetConsoleCtrlHandler(static_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
|
SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
fprintf(stderr, "%s: interactive mode on.\n", __func__);
|
fprintf(stderr, "%s: interactive mode on.\n", __func__);
|
||||||
|
Loading…
Reference in New Issue
Block a user