vocab : minor style
Some checks are pending
Python check requirements.txt / check-requirements (push) Waiting to run
flake8 Lint / Lint (push) Waiting to run
Python Type-Check / pyright type-check (push) Waiting to run

ggml-ci
This commit is contained in:
Georgi Gerganov 2024-09-25 20:39:37 +03:00
parent 866c0113fb
commit 84f56f3c45
No known key found for this signature in database
GPG Key ID: 449E073F9DC10735

View File

@ -1477,7 +1477,7 @@ std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab & vocab,
{ {
llm_tokenizer_ugm tokenizer(vocab); llm_tokenizer_ugm tokenizer(vocab);
if (add_special && vocab.tokenizer_add_bos != 0) { if (add_special && vocab.tokenizer_add_bos) {
GGML_ASSERT(vocab.special_bos_id != -1); GGML_ASSERT(vocab.special_bos_id != -1);
output.push_back(vocab.special_bos_id); output.push_back(vocab.special_bos_id);
} }
@ -1494,14 +1494,14 @@ std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab & vocab,
} }
} }
if (add_special && vocab.tokenizer_add_bos != 0 && output.size() >= 2 && output[1] == vocab.special_bos_id) { if (add_special && vocab.tokenizer_add_bos && output.size() >= 2 && output[1] == vocab.special_bos_id) {
LLAMA_LOG_WARN( LLAMA_LOG_WARN(
"%s: Added a BOS token to the prompt as specified by the model but the prompt " "%s: Added a BOS token to the prompt as specified by the model but the prompt "
"also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. " "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. "
"Are you sure this is what you want?\n", __FUNCTION__); "Are you sure this is what you want?\n", __FUNCTION__);
} }
if (add_special && vocab.tokenizer_add_eos == 1) { if (add_special && vocab.tokenizer_add_eos) {
GGML_ASSERT(vocab.special_eos_id != -1); GGML_ASSERT(vocab.special_eos_id != -1);
output.push_back(vocab.special_eos_id); output.push_back(vocab.special_eos_id);
} }
@ -1713,11 +1713,13 @@ int32_t llama_token_to_piece_impl(const struct llama_vocab & vocab, llama_token
// suppressing them like CONTROL tokens. // suppressing them like CONTROL tokens.
if (attr & (attr_special | LLAMA_TOKEN_ATTR_USER_DEFINED)) { if (attr & (attr_special | LLAMA_TOKEN_ATTR_USER_DEFINED)) {
return _try_copy(token_text.data(), token_text.size()); return _try_copy(token_text.data(), token_text.size());
} else if (attr & LLAMA_TOKEN_ATTR_NORMAL) { }
if (attr & LLAMA_TOKEN_ATTR_NORMAL) {
std::string result = token_text; std::string result = token_text;
llama_unescape_whitespace(result); llama_unescape_whitespace(result);
return _try_copy(result.data(), result.size()); return _try_copy(result.data(), result.size());
} else if (attr & LLAMA_TOKEN_ATTR_BYTE) { }
if (attr & LLAMA_TOKEN_ATTR_BYTE) {
char byte = (char) llama_token_to_byte(vocab, token); char byte = (char) llama_token_to_byte(vocab, token);
return _try_copy((char*) &byte, 1); return _try_copy((char*) &byte, 1);
} }
@ -1728,7 +1730,8 @@ int32_t llama_token_to_piece_impl(const struct llama_vocab & vocab, llama_token
// suppressing them like CONTROL tokens. // suppressing them like CONTROL tokens.
if (attr & (attr_special | LLAMA_TOKEN_ATTR_USER_DEFINED)) { if (attr & (attr_special | LLAMA_TOKEN_ATTR_USER_DEFINED)) {
return _try_copy(token_text.data(), token_text.size()); return _try_copy(token_text.data(), token_text.size());
} else if (attr & LLAMA_TOKEN_ATTR_NORMAL) { }
if (attr & LLAMA_TOKEN_ATTR_NORMAL) {
std::string result = llama_decode_text(token_text); std::string result = llama_decode_text(token_text);
return _try_copy(result.data(), result.size()); return _try_copy(result.data(), result.size());
} }