mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 11:24:35 +00:00
llama : fix lambda capture
ggml-ci
This commit is contained in:
parent
93f285bdf1
commit
899f9a5350
@ -3576,7 +3576,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||
new_data = work.data();
|
||||
std::vector<int64_t> hist_cur(1 << 4, 0);
|
||||
|
||||
const int chunk_size = 32 * 512;
|
||||
static const int chunk_size = 32 * 512;
|
||||
const int nchunk = (nelements + chunk_size - 1)/chunk_size;
|
||||
const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1;
|
||||
if (nthread_use < 2) {
|
||||
@ -3584,7 +3584,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||
} else {
|
||||
size_t counter = 0;
|
||||
new_size = 0;
|
||||
auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements, chunk_size]() { // NOLINT
|
||||
auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements]() {
|
||||
std::vector<int64_t> local_hist;
|
||||
size_t local_size = 0;
|
||||
while (true) {
|
||||
|
Loading…
Reference in New Issue
Block a user