perplexity : fix kv cache handling for hellaswag (#4981)

ggml-ci
This commit is contained in:
Georgi Gerganov 2024-01-16 19:34:54 +02:00 committed by GitHub
parent c37b3474e6
commit 959ef0c0df
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -428,6 +428,7 @@ static std::vector<float> hellaswag_evaluate_tokens(
for (size_t i_chunk = 0; i_chunk < n_chunk; ++i_chunk) { for (size_t i_chunk = 0; i_chunk < n_chunk; ++i_chunk) {
size_t n_tokens = tokens.size() - i_chunk * n_batch; size_t n_tokens = tokens.size() - i_chunk * n_batch;
n_tokens = std::min(n_tokens, size_t(n_batch)); n_tokens = std::min(n_tokens, size_t(n_batch));
llama_kv_cache_seq_rm(ctx, 0, n_past, -1);
if (llama_decode(ctx, llama_batch_get_one(tokens.data() + i_chunk * n_batch, n_tokens, n_past, 0))) { if (llama_decode(ctx, llama_batch_get_one(tokens.data() + i_chunk * n_batch, n_tokens, n_past, 0))) {
fprintf(stderr, "%s : failed to eval\n", __func__); fprintf(stderr, "%s : failed to eval\n", __func__);
return {}; return {};