mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-24 10:24:35 +00:00
Slightly faster imatrix (#5050)
* imatrix: speedup by avoiding unnecessary allocations and copies * imatrix: add --no-ppl option to skip PPL calculations altogether --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
This commit is contained in:
parent
942c0107a7
commit
726c0fa9a2
@ -248,7 +248,7 @@ static void process_logits(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
|
static bool compute_imatrix(llama_context * ctx, const gpt_params & params, bool compute_ppl) {
|
||||||
|
|
||||||
const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
|
const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
|
||||||
const int n_ctx = llama_n_ctx(ctx);
|
const int n_ctx = llama_n_ctx(ctx);
|
||||||
@ -269,10 +269,12 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::vector<float> logit_history;
|
std::vector<float> logit_history;
|
||||||
logit_history.resize(tokens.size());
|
|
||||||
|
|
||||||
std::vector<float> prob_history;
|
std::vector<float> prob_history;
|
||||||
prob_history.resize(tokens.size());
|
|
||||||
|
if (compute_ppl) {
|
||||||
|
logit_history.resize(tokens.size());
|
||||||
|
prob_history.resize(tokens.size());
|
||||||
|
}
|
||||||
|
|
||||||
const int n_chunk_max = tokens.size() / n_ctx;
|
const int n_chunk_max = tokens.size() / n_ctx;
|
||||||
|
|
||||||
@ -288,12 +290,17 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
|
|||||||
|
|
||||||
std::vector<std::thread> workers(std::thread::hardware_concurrency() - 1);
|
std::vector<std::thread> workers(std::thread::hardware_concurrency() - 1);
|
||||||
|
|
||||||
|
const int num_batches = (n_ctx + n_batch - 1) / n_batch;
|
||||||
|
|
||||||
|
std::vector<float> logits;
|
||||||
|
if (compute_ppl && num_batches > 1) {
|
||||||
|
logits.reserve((size_t)n_ctx * n_vocab);
|
||||||
|
}
|
||||||
|
|
||||||
for (int i = 0; i < n_chunk; ++i) {
|
for (int i = 0; i < n_chunk; ++i) {
|
||||||
const int start = i * n_ctx;
|
const int start = i * n_ctx;
|
||||||
const int end = start + n_ctx;
|
const int end = start + n_ctx;
|
||||||
|
|
||||||
const int num_batches = (n_ctx + n_batch - 1) / n_batch;
|
|
||||||
|
|
||||||
std::vector<float> logits;
|
std::vector<float> logits;
|
||||||
|
|
||||||
const auto t_start = std::chrono::high_resolution_clock::now();
|
const auto t_start = std::chrono::high_resolution_clock::now();
|
||||||
@ -321,8 +328,10 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
|
|||||||
// restore the original token in case it was set to BOS
|
// restore the original token in case it was set to BOS
|
||||||
tokens[batch_start] = token_org;
|
tokens[batch_start] = token_org;
|
||||||
|
|
||||||
const auto * batch_logits = llama_get_logits(ctx);
|
if (compute_ppl && num_batches > 1) {
|
||||||
logits.insert(logits.end(), batch_logits, batch_logits + batch_size * n_vocab);
|
const auto * batch_logits = llama_get_logits(ctx);
|
||||||
|
logits.insert(logits.end(), batch_logits, batch_logits + batch_size * n_vocab);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto t_end = std::chrono::high_resolution_clock::now();
|
const auto t_end = std::chrono::high_resolution_clock::now();
|
||||||
@ -338,25 +347,32 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
|
|||||||
fprintf(stderr, "%.2f minutes\n", total_seconds / 60.0);
|
fprintf(stderr, "%.2f minutes\n", total_seconds / 60.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
const int first = n_ctx/2;
|
if (compute_ppl) {
|
||||||
process_logits(n_vocab, logits.data() + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first,
|
const int first = n_ctx/2;
|
||||||
workers, nll, nll2, logit_history.data() + start + first, prob_history.data() + start + first);
|
const auto all_logits = num_batches > 1 ? logits.data() : llama_get_logits(ctx);
|
||||||
count += n_ctx - first - 1;
|
process_logits(n_vocab, all_logits + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first,
|
||||||
|
workers, nll, nll2, logit_history.data() + start + first, prob_history.data() + start + first);
|
||||||
|
count += n_ctx - first - 1;
|
||||||
|
|
||||||
printf("[%d]%.4lf,", i + 1, std::exp(nll / count));
|
printf("[%d]%.4lf,", i + 1, std::exp(nll / count));
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
|
|
||||||
|
logits.clear();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
printf("\n");
|
printf("\n");
|
||||||
|
|
||||||
nll2 /= count;
|
if (compute_ppl) {
|
||||||
nll /= count;
|
nll2 /= count;
|
||||||
const double ppl = exp(nll);
|
nll /= count;
|
||||||
nll2 -= nll * nll;
|
const double ppl = exp(nll);
|
||||||
if (nll2 > 0) {
|
nll2 -= nll * nll;
|
||||||
nll2 = sqrt(nll2/(count-1));
|
if (nll2 > 0) {
|
||||||
printf("Final estimate: PPL = %.4lf +/- %.5lf\n", ppl, nll2*ppl);
|
nll2 = sqrt(nll2/(count-1));
|
||||||
} else {
|
printf("Final estimate: PPL = %.4lf +/- %.5lf\n", ppl, nll2*ppl);
|
||||||
printf("Unexpected negative standard deviation of log(prob)\n");
|
} else {
|
||||||
|
printf("Unexpected negative standard deviation of log(prob)\n");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
@ -365,6 +381,7 @@ static bool compute_imatrix(llama_context * ctx, const gpt_params & params) {
|
|||||||
int main(int argc, char ** argv) {
|
int main(int argc, char ** argv) {
|
||||||
|
|
||||||
StatParams sparams;
|
StatParams sparams;
|
||||||
|
bool compute_ppl = true;
|
||||||
std::vector<char*> args;
|
std::vector<char*> args;
|
||||||
args.push_back(argv[0]);
|
args.push_back(argv[0]);
|
||||||
int iarg = 1;
|
int iarg = 1;
|
||||||
@ -381,12 +398,19 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
else if (arg == "--verbosity") {
|
else if (arg == "--verbosity") {
|
||||||
sparams.verbosity = std::stoi(argv[++iarg]);
|
sparams.verbosity = std::stoi(argv[++iarg]);
|
||||||
|
} else if (arg == "--no-ppl") {
|
||||||
|
compute_ppl = false;
|
||||||
} else {
|
} else {
|
||||||
args.push_back(argv[iarg]);
|
args.push_back(argv[iarg]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (iarg < argc) {
|
if (iarg < argc) {
|
||||||
args.push_back(argv[iarg]);
|
std::string arg{argv[iarg]};
|
||||||
|
if (arg == "--no-ppl") {
|
||||||
|
compute_ppl = false;
|
||||||
|
} else {
|
||||||
|
args.push_back(argv[iarg]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
gpt_params params;
|
gpt_params params;
|
||||||
@ -448,7 +472,7 @@ int main(int argc, char ** argv) {
|
|||||||
fprintf(stderr, "%s\n", get_system_info(params).c_str());
|
fprintf(stderr, "%s\n", get_system_info(params).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool OK = compute_imatrix(ctx, params);
|
bool OK = compute_imatrix(ctx, params, compute_ppl);
|
||||||
if (!OK) {
|
if (!OK) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user