If first token generated from the server is the stop word the server will crash (#7038)

This will reproduce the issue in llama13b
{
'prompt': 'Q: hello world \nA: ',
 'stop': ['\n'],
 'temperature': 0.0,
 'n_predict': 10,
 'cache_prompt': True,
 'n_probs': 10
}
This commit is contained in:
maor-ps 2024-05-04 12:06:40 +03:00 committed by GitHub
parent 92139b90af
commit 03fb8a002d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -1383,9 +1383,10 @@ struct server_context {
if (!slot.params.stream && slot.stopped_word) { if (!slot.params.stream && slot.stopped_word) {
const std::vector<llama_token> stop_word_toks = llama_tokenize(ctx, slot.stopping_word, false); const std::vector<llama_token> stop_word_toks = llama_tokenize(ctx, slot.stopping_word, false);
size_t safe_offset = std::min(slot.generated_token_probs.size(), stop_word_toks.size());
probs = std::vector<completion_token_output>( probs = std::vector<completion_token_output>(
slot.generated_token_probs.begin(), slot.generated_token_probs.begin(),
slot.generated_token_probs.end() - stop_word_toks.size()); slot.generated_token_probs.end() - safe_offset);
} else { } else {
probs = std::vector<completion_token_output>( probs = std::vector<completion_token_output>(
slot.generated_token_probs.begin(), slot.generated_token_probs.begin(),