mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
imatrix : fix segfault when using a single chunk per batch
This commit is contained in:
parent
bce54642c8
commit
347247a24e
@ -564,7 +564,7 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params, cons
|
|||||||
if (params.compute_ppl) {
|
if (params.compute_ppl) {
|
||||||
const int first = n_ctx/2;
|
const int first = n_ctx/2;
|
||||||
for (int seq = 0; seq < n_seq_batch; seq++) {
|
for (int seq = 0; seq < n_seq_batch; seq++) {
|
||||||
const float * all_logits = num_batches > 1 ? logits.data() : llama_get_logits_ith(ctx, seq*n_ctx + first);
|
const float * all_logits = num_batches > 1 ? logits.data() : llama_get_logits_ith(ctx, seq*n_ctx);
|
||||||
|
|
||||||
llama_token * tokens_data = tokens.data() + start + seq*n_ctx + first;
|
llama_token * tokens_data = tokens.data() + start + seq*n_ctx + first;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user