From fb62f924336c9746da9976c6ab3c2e6460258d54 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 12 May 2023 21:44:20 +0300 Subject: [PATCH] llama : fix --mtest option (close #1414) --- examples/main/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/main/main.cpp b/examples/main/main.cpp index bd1c4ab55..8543414dd 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -121,7 +121,7 @@ int main(int argc, char ** argv) { // uncomment the "used_mem" line in llama.cpp to see the results if (params.mem_test) { { - const std::vector tmp(params.n_batch, 0); + const std::vector tmp(params.n_batch, llama_token_bos()); llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads); }