llama : fix lambda capture

ggml-ci
This commit is contained in:
Georgi Gerganov 2023-08-17 19:49:21 +03:00
parent 93f285bdf1
commit 899f9a5350
No known key found for this signature in database
GPG Key ID: 449E073F9DC10735

View File

@ -3576,7 +3576,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
new_data = work.data();
std::vector<int64_t> hist_cur(1 << 4, 0);
const int chunk_size = 32 * 512;
static const int chunk_size = 32 * 512;
const int nchunk = (nelements + chunk_size - 1)/chunk_size;
const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1;
if (nthread_use < 2) {
@ -3584,7 +3584,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
} else {
size_t counter = 0;
new_size = 0;
auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements, chunk_size]() { // NOLINT
auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements]() {
std::vector<int64_t> local_hist;
size_t local_size = 0;
while (true) {