mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 10:54:36 +00:00
server : fix segfault on long system prompt (#8987)
* server : fix segfault on long system prompt * server : fix parallel generation with very small batch sizes * server : fix typo in comment
This commit is contained in:
parent
43bdd3ce18
commit
98a532d474
@ -754,13 +754,13 @@ struct server_context {
|
|||||||
default_generation_settings_for_props = get_formated_generation(slots.front());
|
default_generation_settings_for_props = get_formated_generation(slots.front());
|
||||||
default_generation_settings_for_props["seed"] = -1;
|
default_generation_settings_for_props["seed"] = -1;
|
||||||
|
|
||||||
// the update_slots() logic will always submit a maximum of n_batch tokens
|
// the update_slots() logic will always submit a maximum of n_batch or n_parallel tokens
|
||||||
// note that n_batch can be > n_ctx (e.g. for non-causal attention models such as BERT where the KV cache is not used)
|
// note that n_batch can be > n_ctx (e.g. for non-causal attention models such as BERT where the KV cache is not used)
|
||||||
{
|
{
|
||||||
const int32_t n_batch = llama_n_batch(ctx);
|
const int32_t n_batch = llama_n_batch(ctx);
|
||||||
|
|
||||||
// only a single seq_id per token is needed
|
// only a single seq_id per token is needed
|
||||||
batch = llama_batch_init(n_batch, 0, 1);
|
batch = llama_batch_init(std::max(n_batch, params.n_parallel), 0, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics.init();
|
metrics.init();
|
||||||
@ -1137,28 +1137,19 @@ struct server_context {
|
|||||||
if (!system_prompt.empty()) {
|
if (!system_prompt.empty()) {
|
||||||
system_tokens = ::llama_tokenize(ctx, system_prompt, true);
|
system_tokens = ::llama_tokenize(ctx, system_prompt, true);
|
||||||
|
|
||||||
|
const int32_t n_batch = llama_n_batch(ctx);
|
||||||
|
const int32_t n_tokens_prompt = system_tokens.size();
|
||||||
|
|
||||||
|
for (int32_t i = 0; i < n_tokens_prompt; i += n_batch) {
|
||||||
|
const int32_t n_tokens = std::min(n_batch, n_tokens_prompt - i);
|
||||||
|
|
||||||
llama_batch_clear(batch);
|
llama_batch_clear(batch);
|
||||||
|
|
||||||
for (int i = 0; i < (int)system_tokens.size(); ++i) {
|
for (int32_t j = 0; j < n_tokens; ++j) {
|
||||||
llama_batch_add(batch, system_tokens[i], i, { 0 }, false);
|
llama_batch_add(batch, system_tokens[i + j], i + j, { 0 }, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
const int32_t n_batch = llama_n_batch(ctx);
|
if (llama_decode(ctx, batch) != 0) {
|
||||||
|
|
||||||
for (int32_t i = 0; i < batch.n_tokens; i += n_batch) {
|
|
||||||
const int32_t n_tokens = std::min(params.n_batch, batch.n_tokens - i);
|
|
||||||
llama_batch batch_view = {
|
|
||||||
n_tokens,
|
|
||||||
batch.token + i,
|
|
||||||
nullptr,
|
|
||||||
batch.pos + i,
|
|
||||||
batch.n_seq_id + i,
|
|
||||||
batch.seq_id + i,
|
|
||||||
batch.logits + i,
|
|
||||||
0, 0, 0, // unused
|
|
||||||
};
|
|
||||||
|
|
||||||
if (llama_decode(ctx, batch_view) != 0) {
|
|
||||||
LOG_ERROR("llama_decode() failed", {});
|
LOG_ERROR("llama_decode() failed", {});
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user