mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
server : bach has to be allocated for n_parallel sequences
This commit is contained in:
parent
6b2437e32d
commit
113dd60005
@ -631,7 +631,9 @@ struct llama_server_context
|
||||
LOG_TEE(" -> Slot %i - max context: %i\n", slot.id, max_ctx_per_slot);
|
||||
slots.push_back(slot);
|
||||
}
|
||||
batch = llama_batch_init(n_ctx, 0, 1);
|
||||
|
||||
batch = llama_batch_init(n_ctx, 0, params.n_parallel);
|
||||
|
||||
// empty system prompt
|
||||
system_prompt = "";
|
||||
num_tokens_system = 0;
|
||||
|
Loading…
Reference in New Issue
Block a user