From 8f429fa5111901f9646cf998643ac5310846d487 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 3 Sep 2023 13:42:56 +0300 Subject: [PATCH] perplexity : fix ETA by warming up the model with an empty run --- common/common.cpp | 8 ++++++++ examples/main/main.cpp | 8 -------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 41fc59ced..a1c3dc780 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -752,6 +752,14 @@ std::tuple llama_init_from_gpt_par params.logit_bias[llama_token_eos(lctx)] = -INFINITY; } + { + LOG("warming up the model with an empty run\n"); + + const std::vector tmp = { llama_token_bos(lctx), }; + llama_eval(lctx, tmp.data(), tmp.size(), 0, params.n_threads); + llama_reset_timings(lctx); + } + return std::make_tuple(model, lctx); } diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 7117db4b0..db98312ca 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -492,14 +492,6 @@ int main(int argc, char ** argv) { std::vector embd; std::vector embd_guidance; - { - LOG("warming up the model with an empty run\n"); - - const std::vector tmp = { llama_token_bos(ctx), }; - llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads); - llama_reset_timings(ctx); - } - while ((n_remain != 0 && !is_antiprompt) || params.interactive) { // predict if (embd.size() > 0) {