From c062ffd18cbbf7a7e905223bfee87fefe9746db3 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 11 Oct 2023 19:24:59 +0300 Subject: [PATCH] batched-bench : init warm-up batch --- examples/batched-bench/batched-bench.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/examples/batched-bench/batched-bench.cpp b/examples/batched-bench/batched-bench.cpp index 58b738cac..b2ffdd987 100644 --- a/examples/batched-bench/batched-bench.cpp +++ b/examples/batched-bench/batched-bench.cpp @@ -102,6 +102,14 @@ int main(int argc, char ** argv) { // warm up { batch.n_tokens = 16; + + for (int i = 0; i < batch.n_tokens; ++i) { + batch.token[i] = 0; + batch.pos[i] = i; + batch.seq_id[i] = 0; + batch.logits[i] = false; + } + if (!decode_helper(ctx, batch, ctx_params.n_batch)) { LOG_TEE("%s: llama_decode() failed\n", __func__); return 1;