From 899f9a5350937db8b80900a1604edec5997e4b01 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Thu, 17 Aug 2023 19:49:21 +0300 Subject: [PATCH] llama : fix lambda capture ggml-ci --- llama.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llama.cpp b/llama.cpp index 753d130d6..c8403e9c5 100644 --- a/llama.cpp +++ b/llama.cpp @@ -3576,7 +3576,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s new_data = work.data(); std::vector hist_cur(1 << 4, 0); - const int chunk_size = 32 * 512; + static const int chunk_size = 32 * 512; const int nchunk = (nelements + chunk_size - 1)/chunk_size; const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1; if (nthread_use < 2) { @@ -3584,7 +3584,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } else { size_t counter = 0; new_size = 0; - auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements, chunk_size]() { // NOLINT + auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements]() { std::vector local_hist; size_t local_size = 0; while (true) {