mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 02:44:36 +00:00
llama : avoid fprintf in favor of LLAMA_LOG (#3538)
This commit is contained in:
parent
8402566a7c
commit
e1675d133c
@ -2327,13 +2327,13 @@ static void llm_load_vocab(
|
||||
}
|
||||
|
||||
if (special_tokens_definition_mismatch || special_tokens_count_from_verification != special_tokens_count_by_type) {
|
||||
fprintf(stderr, "%s: warning: Mismatch in special tokens definition ( %u/%zu vs %u/%zu ).\n",
|
||||
LLAMA_LOG_WARN("%s: mismatch in special tokens definition ( %u/%zu vs %u/%zu ).\n",
|
||||
__func__,
|
||||
special_tokens_count_from_verification, vocab.id_to_token.size(),
|
||||
special_tokens_count_by_type, vocab.id_to_token.size()
|
||||
);
|
||||
} else {
|
||||
fprintf(stderr, "%s: Special tokens definition check successful ( %u/%zu ).\n",
|
||||
LLAMA_LOG_INFO("%s: special tokens definition check successful ( %u/%zu ).\n",
|
||||
__func__,
|
||||
special_tokens_count_from_verification, vocab.id_to_token.size()
|
||||
);
|
||||
|
Loading…
Reference in New Issue
Block a user