mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-13 14:29:52 +00:00
Fix: `sentencepiece` tokenizers with added tokens failed with an incorrect assertion
This commit is contained in:
parent
1faaae8c2b
commit
3a716b4dae
@ -8200,7 +8200,9 @@ int llama_token_to_piece(const struct llama_model * model, llama_token token, ch
|
|||||||
buf[0] = llama_token_to_byte(model->vocab, token);
|
buf[0] = llama_token_to_byte(model->vocab, token);
|
||||||
return 1;
|
return 1;
|
||||||
} else {
|
} else {
|
||||||
GGML_ASSERT(false);
|
// TODO: for now we accept all unsupported token types,
|
||||||
|
// suppressing them like CONTROL tokens.
|
||||||
|
// GGML_ASSERT(false);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -8216,7 +8218,9 @@ int llama_token_to_piece(const struct llama_model * model, llama_token token, ch
|
|||||||
} else if (llama_is_control_token(model->vocab, token)) {
|
} else if (llama_is_control_token(model->vocab, token)) {
|
||||||
;
|
;
|
||||||
} else {
|
} else {
|
||||||
GGML_ASSERT(false);
|
// TODO: for now we accept all unsupported token types,
|
||||||
|
// suppressing them like CONTROL tokens.
|
||||||
|
// GGML_ASSERT(false);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user