mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
llama : refactor quantization to avoid <mutex> header
ggml-ci
This commit is contained in:
parent
83e633c27e
commit
b5af7ad84f
73
llama.cpp
73
llama.cpp
@ -68,7 +68,6 @@
|
|||||||
#include <initializer_list>
|
#include <initializer_list>
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <mutex>
|
|
||||||
#include <numeric>
|
#include <numeric>
|
||||||
#include <queue>
|
#include <queue>
|
||||||
#include <random>
|
#include <random>
|
||||||
@ -9085,7 +9084,6 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
|
|
||||||
std::vector<std::thread> workers;
|
std::vector<std::thread> workers;
|
||||||
workers.reserve(nthread);
|
workers.reserve(nthread);
|
||||||
std::mutex mutex;
|
|
||||||
|
|
||||||
int idx = 0;
|
int idx = 0;
|
||||||
|
|
||||||
@ -9159,7 +9157,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
new_size = ggml_nbytes(tensor);
|
new_size = ggml_nbytes(tensor);
|
||||||
LLAMA_LOG_INFO("size = %8.3f MB\n", ggml_nbytes(tensor)/1024.0/1024.0);
|
LLAMA_LOG_INFO("size = %8.3f MB\n", ggml_nbytes(tensor)/1024.0/1024.0);
|
||||||
} else {
|
} else {
|
||||||
const size_t nelements = ggml_nelements(tensor);
|
const size_t ne = ggml_nelements(tensor);
|
||||||
|
|
||||||
float * f32_data;
|
float * f32_data;
|
||||||
|
|
||||||
@ -9168,53 +9166,60 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
} else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) {
|
} else if (ggml_is_quantized(tensor->type) && !params->allow_requantize) {
|
||||||
throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type)));
|
throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor->type)));
|
||||||
} else {
|
} else {
|
||||||
llama_convert_tensor_internal(tensor, f32_conv_buf, workers, nelements, nthread);
|
llama_convert_tensor_internal(tensor, f32_conv_buf, workers, ne, nthread);
|
||||||
f32_data = (float *) f32_conv_buf.data();
|
f32_data = (float *) f32_conv_buf.data();
|
||||||
}
|
}
|
||||||
|
|
||||||
LLAMA_LOG_INFO("quantizing to %s .. ", ggml_type_name(new_type));
|
LLAMA_LOG_INFO("quantizing to %s .. ", ggml_type_name(new_type));
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
|
|
||||||
if (work.size() < nelements * 4) {
|
if (work.size() < ne * 4) {
|
||||||
work.resize(nelements * 4); // upper bound on size
|
work.resize(ne * 4); // upper bound on size
|
||||||
}
|
}
|
||||||
new_data = work.data();
|
new_data = work.data();
|
||||||
|
|
||||||
std::array<int64_t, 1 << 4> hist_cur = {};
|
std::array<int64_t, 1 << 4> hist_cur = {};
|
||||||
|
|
||||||
static const int chunk_size = 32 * 512;
|
{
|
||||||
const int nchunk = (nelements + chunk_size - 1)/chunk_size;
|
static const size_t chunk_size = 32*512;
|
||||||
|
|
||||||
|
const int nchunk = GGML_PAD(ne, chunk_size)/chunk_size;
|
||||||
const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1;
|
const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1;
|
||||||
if (nthread_use < 2) {
|
|
||||||
new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, nelements, hist_cur.data());
|
std::vector<size_t> size_th(nthread_use, 0);
|
||||||
} else {
|
std::vector<std::array<int64_t, 1 << 4>> hist_cur_th(nthread_use);
|
||||||
size_t counter = 0;
|
|
||||||
new_size = 0;
|
auto compute = [&size_th, &hist_cur_th, new_type, f32_data, new_data, ne, nchunk, nthread_use](int tid) {
|
||||||
auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements]() {
|
auto & local_size = size_th[tid];
|
||||||
std::array<int64_t, 1 << 4> local_hist = {};
|
auto & local_hist = hist_cur_th[tid];
|
||||||
size_t local_size = 0;
|
|
||||||
while (true) {
|
for (int ch = tid; ch < nchunk; ch += nthread_use) {
|
||||||
std::unique_lock<std::mutex> lock(mutex);
|
const size_t first = ch * chunk_size;
|
||||||
size_t first = counter; counter += chunk_size;
|
const size_t last = std::min(ne, first + chunk_size);
|
||||||
if (first >= nelements) {
|
|
||||||
if (local_size > 0) {
|
|
||||||
for (int j=0; j<int(local_hist.size()); ++j) {
|
|
||||||
hist_cur[j] += local_hist[j];
|
|
||||||
}
|
|
||||||
new_size += local_size;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
lock.unlock();
|
|
||||||
size_t last = std::min(nelements, first + chunk_size);
|
|
||||||
local_size += ggml_quantize_chunk(new_type, f32_data, new_data, first, last - first, local_hist.data());
|
local_size += ggml_quantize_chunk(new_type, f32_data, new_data, first, last - first, local_hist.data());
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
for (int it = 0; it < nthread_use - 1; ++it) {
|
for (int it = 0; it < nthread_use - 1; ++it) {
|
||||||
workers.emplace_back(compute);
|
workers.emplace_back(compute, it);
|
||||||
}
|
}
|
||||||
compute();
|
|
||||||
for (auto & w : workers) { w.join(); }
|
compute(nthread_use - 1);
|
||||||
|
|
||||||
|
for (auto & w : workers) {
|
||||||
|
w.join();
|
||||||
|
}
|
||||||
|
|
||||||
workers.clear();
|
workers.clear();
|
||||||
|
|
||||||
|
new_size = 0;
|
||||||
|
for (int it = 0; it < nthread_use; ++it) {
|
||||||
|
for (int j = 0; j < int(hist_cur.size()); ++j) {
|
||||||
|
hist_cur[j] += hist_cur_th[it][j];
|
||||||
|
}
|
||||||
|
new_size += size_th[it];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB | hist: ", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
|
LLAMA_LOG_INFO("size = %8.2f MiB -> %8.2f MiB | hist: ", ggml_nbytes(tensor)/1024.0/1024.0, new_size/1024.0/1024.0);
|
||||||
@ -9226,7 +9231,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
|
|
||||||
if (tot_count > 0) {
|
if (tot_count > 0) {
|
||||||
for (size_t i = 0; i < hist_cur.size(); i++) {
|
for (size_t i = 0; i < hist_cur.size(); i++) {
|
||||||
LLAMA_LOG_INFO("%5.3f ", hist_cur[i] / float(nelements));
|
LLAMA_LOG_INFO("%5.3f ", hist_cur[i] / float(ne));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
LLAMA_LOG_INFO("\n");
|
LLAMA_LOG_INFO("\n");
|
||||||
|
Loading…
Reference in New Issue
Block a user