From 5a5aeb1e91009c72bf816400b758bb8a305616d7 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 13 May 2023 16:55:14 +0300 Subject: [PATCH] llama : fix unused warning --- llama.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/llama.cpp b/llama.cpp index 73b932a74..98f49abd7 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1053,6 +1053,8 @@ static void llama_model_load_internal( fprintf(stderr, "%s: [cublas] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024); } +#else + (void) n_gpu_layers; #endif // loading time will be recalculate after the first eval, so