mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-14 06:49:54 +00:00
simple-chat : only add bos on first prompt
This commit is contained in:
parent
a6744e43e8
commit
2a229879c2
@ -96,7 +96,7 @@ int main(int argc, char ** argv) {
|
|||||||
// tokenize the prompt
|
// tokenize the prompt
|
||||||
const int n_prompt_tokens = -llama_tokenize(model, prompt.c_str(), prompt.size(), NULL, 0, true, true);
|
const int n_prompt_tokens = -llama_tokenize(model, prompt.c_str(), prompt.size(), NULL, 0, true, true);
|
||||||
std::vector<llama_token> prompt_tokens(n_prompt_tokens);
|
std::vector<llama_token> prompt_tokens(n_prompt_tokens);
|
||||||
if (llama_tokenize(model, prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), true, true) < 0) {
|
if (llama_tokenize(model, prompt.c_str(), prompt.size(), prompt_tokens.data(), prompt_tokens.size(), llama_get_kv_cache_used_cells(ctx) == 0, true) < 0) {
|
||||||
GGML_ABORT("failed to tokenize the prompt\n");
|
GGML_ABORT("failed to tokenize the prompt\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user