mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-10 10:41:47 +00:00
perplexity : remove extra new lines after chunks
This commit is contained in:
parent
c35e586ea5
commit
3b6470f228
@ -444,7 +444,6 @@ static results_perplexity perplexity_v2(llama_context * ctx, const gpt_params &
|
||||
}
|
||||
LOG("%.2f minutes\n", total_seconds / 60.0);
|
||||
}
|
||||
LOG("\n");
|
||||
|
||||
//LOG_DBG("%s: using tokens %d...%d\n",__func__,params.n_ctx - params.ppl_stride + start, params.n_ctx + start);
|
||||
for (int j = n_ctx - params.ppl_stride - 1; j < n_ctx - 1; ++j) {
|
||||
@ -638,7 +637,6 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
|
||||
}
|
||||
LOG("%.2f minutes\n", total_seconds / 60.0);
|
||||
}
|
||||
LOG("\n");
|
||||
|
||||
for (int seq = 0; seq < n_seq_batch; seq++) {
|
||||
const float * all_logits = num_batches > 1 ? logits.data() : llama_get_logits_ith(ctx, seq*n_ctx + first);
|
||||
|
Loading…
Reference in New Issue
Block a user