mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
Move llama_context setup + perplexity back to main.cpp
Signed-off-by: Thiago Padilha <thiago@padilha.cc>
This commit is contained in:
parent
d7d53b84db
commit
b7f1fa6d8c
124
main.cpp
124
main.cpp
@ -1,5 +1,127 @@
|
||||
#include "run.h"
|
||||
#include "ggml.h"
|
||||
|
||||
|
||||
std::vector<double> softmax(const std::vector<float>& logits) {
|
||||
std::vector<double> probs(logits.size());
|
||||
float max_logit = logits[0];
|
||||
for (float v : logits) max_logit = std::max(max_logit, v);
|
||||
double sum_exp = 0.0;
|
||||
for (size_t i = 0; i < logits.size(); i++) {
|
||||
// Subtract the maximum logit value from the current logit value for numerical stability
|
||||
float logit = logits[i] - max_logit;
|
||||
double exp_logit = std::exp(logit);
|
||||
sum_exp += exp_logit;
|
||||
probs[i] = exp_logit;
|
||||
}
|
||||
for (size_t i = 0; i < probs.size(); i++) probs[i] /= sum_exp;
|
||||
return probs;
|
||||
}
|
||||
|
||||
void perplexity(llama_context * ctx, const gpt_params & params) {
|
||||
// Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
|
||||
// Run `./main --perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
|
||||
// Output: `perplexity: 13.5106 [114/114]`
|
||||
auto tokens = ::llama_tokenize(ctx, params.prompt, true);
|
||||
|
||||
int count = 0;
|
||||
double nll = 0.0;
|
||||
int seq_count = tokens.size() / params.n_ctx;
|
||||
|
||||
fprintf(stderr, "%s : calculating perplexity over %d chunks\n", __func__, seq_count);
|
||||
|
||||
for (int i = 0; i < seq_count; ++i) {
|
||||
int start = i * params.n_ctx;
|
||||
int end = start + params.n_ctx - 1;
|
||||
std::vector<llama_token> embd(tokens.begin() + start, tokens.begin() + end);
|
||||
auto start_t = std::chrono::high_resolution_clock::now();
|
||||
if (llama_eval(ctx, embd.data(), embd.size(), 0, params.n_threads)) {
|
||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||
return;
|
||||
}
|
||||
auto end_t = std::chrono::high_resolution_clock::now();
|
||||
if (i == 0) {
|
||||
double seconds = std::chrono::duration<double>(end_t - start_t).count();
|
||||
printf("%.2f seconds per pass - ETA %.2f hours\n", seconds, (seconds * seq_count) / (60.0*60.0));
|
||||
}
|
||||
// We get the logits for all the tokens in the context window (params.n_ctx)
|
||||
// from llama_eval above. Now, based on https://huggingface.co/docs/transformers/perplexity,
|
||||
// calculate the perplexity over the last half the window (so the model always has
|
||||
// some context to predict the token).
|
||||
//
|
||||
// We rely on the fact that attention in the forward pass only looks at previous
|
||||
// tokens here, so the logits returned for each token are an accurate representation
|
||||
// of what the model would have predicted at that point.
|
||||
//
|
||||
// Example, we have a context window of 512, we will compute perplexity for each of the
|
||||
// last 256 tokens. Then, we split the input up into context window size chunks to
|
||||
// process the entire prompt.
|
||||
|
||||
auto logits = llama_get_logits(ctx);
|
||||
for (int j = params.n_ctx / 2; j < params.n_ctx - 1; ++j) {
|
||||
// Calculate probability of next token, given the previous ones.
|
||||
int n_vocab = llama_n_vocab(ctx);
|
||||
std::vector<float> tok_logits(
|
||||
logits + j * n_vocab,
|
||||
logits + (j + 1) * n_vocab);
|
||||
double prob = softmax(tok_logits)[tokens[start + j + 1]];
|
||||
nll += -std::log(prob);
|
||||
++count;
|
||||
}
|
||||
// perplexity is e^(average negative log-likelihood)
|
||||
printf("[%d]%.4lf,", i + 1, std::exp(nll / count));
|
||||
fflush(stdout);
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
return run(argc, argv);
|
||||
// has to be called once at the start of the program to init ggml stuff
|
||||
ggml_time_init();
|
||||
|
||||
gpt_params params;
|
||||
params.model = "models/llama-7B/ggml-model.bin";
|
||||
|
||||
if (gpt_params_parse(argc, argv, params) == false) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (params.n_ctx > 2048) {
|
||||
fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
|
||||
"expect poor results\n", __func__, params.n_ctx);
|
||||
}
|
||||
|
||||
llama_context * ctx;
|
||||
|
||||
// load the model
|
||||
{
|
||||
auto lparams = llama_context_default_params();
|
||||
|
||||
lparams.n_ctx = params.n_ctx;
|
||||
lparams.n_parts = params.n_parts;
|
||||
lparams.seed = params.seed;
|
||||
lparams.f16_kv = params.memory_f16;
|
||||
lparams.logits_all = params.perplexity;
|
||||
|
||||
ctx = llama_init_from_file(params.model.c_str(), lparams);
|
||||
|
||||
if (ctx == NULL) {
|
||||
fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
// print system information
|
||||
{
|
||||
fprintf(stderr, "\n");
|
||||
fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
|
||||
params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
|
||||
}
|
||||
|
||||
if (params.perplexity) {
|
||||
perplexity(ctx, params);
|
||||
exit(0);
|
||||
}
|
||||
|
||||
return run(ctx, params);
|
||||
}
|
||||
|
122
run.cpp
122
run.cpp
@ -1,5 +1,4 @@
|
||||
#include "utils.h"
|
||||
#include "ggml.h"
|
||||
#include "llama.h"
|
||||
|
||||
#include <cassert>
|
||||
@ -65,79 +64,6 @@ void set_console_state(console_state new_st)
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<double> softmax(const std::vector<float>& logits) {
|
||||
std::vector<double> probs(logits.size());
|
||||
float max_logit = logits[0];
|
||||
for (float v : logits) max_logit = std::max(max_logit, v);
|
||||
double sum_exp = 0.0;
|
||||
for (size_t i = 0; i < logits.size(); i++) {
|
||||
// Subtract the maximum logit value from the current logit value for numerical stability
|
||||
float logit = logits[i] - max_logit;
|
||||
double exp_logit = std::exp(logit);
|
||||
sum_exp += exp_logit;
|
||||
probs[i] = exp_logit;
|
||||
}
|
||||
for (size_t i = 0; i < probs.size(); i++) probs[i] /= sum_exp;
|
||||
return probs;
|
||||
}
|
||||
|
||||
void perplexity(llama_context * ctx, const gpt_params & params) {
|
||||
// Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
|
||||
// Run `./main --perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
|
||||
// Output: `perplexity: 13.5106 [114/114]`
|
||||
auto tokens = ::llama_tokenize(ctx, params.prompt, true);
|
||||
|
||||
int count = 0;
|
||||
double nll = 0.0;
|
||||
int seq_count = tokens.size() / params.n_ctx;
|
||||
|
||||
fprintf(stderr, "%s : calculating perplexity over %d chunks\n", __func__, seq_count);
|
||||
|
||||
for (int i = 0; i < seq_count; ++i) {
|
||||
int start = i * params.n_ctx;
|
||||
int end = start + params.n_ctx - 1;
|
||||
std::vector<llama_token> embd(tokens.begin() + start, tokens.begin() + end);
|
||||
auto start_t = std::chrono::high_resolution_clock::now();
|
||||
if (llama_eval(ctx, embd.data(), embd.size(), 0, params.n_threads)) {
|
||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||
return;
|
||||
}
|
||||
auto end_t = std::chrono::high_resolution_clock::now();
|
||||
if (i == 0) {
|
||||
double seconds = std::chrono::duration<double>(end_t - start_t).count();
|
||||
printf("%.2f seconds per pass - ETA %.2f hours\n", seconds, (seconds * seq_count) / (60.0*60.0));
|
||||
}
|
||||
// We get the logits for all the tokens in the context window (params.n_ctx)
|
||||
// from llama_eval above. Now, based on https://huggingface.co/docs/transformers/perplexity,
|
||||
// calculate the perplexity over the last half the window (so the model always has
|
||||
// some context to predict the token).
|
||||
//
|
||||
// We rely on the fact that attention in the forward pass only looks at previous
|
||||
// tokens here, so the logits returned for each token are an accurate representation
|
||||
// of what the model would have predicted at that point.
|
||||
//
|
||||
// Example, we have a context window of 512, we will compute perplexity for each of the
|
||||
// last 256 tokens. Then, we split the input up into context window size chunks to
|
||||
// process the entire prompt.
|
||||
|
||||
auto logits = llama_get_logits(ctx);
|
||||
for (int j = params.n_ctx / 2; j < params.n_ctx - 1; ++j) {
|
||||
// Calculate probability of next token, given the previous ones.
|
||||
int n_vocab = llama_n_vocab(ctx);
|
||||
std::vector<float> tok_logits(
|
||||
logits + j * n_vocab,
|
||||
logits + (j + 1) * n_vocab);
|
||||
double prob = softmax(tok_logits)[tokens[start + j + 1]];
|
||||
nll += -std::log(prob);
|
||||
++count;
|
||||
}
|
||||
// perplexity is e^(average negative log-likelihood)
|
||||
printf("[%d]%.4lf,", i + 1, std::exp(nll / count));
|
||||
fflush(stdout);
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
static bool is_interacting = false;
|
||||
|
||||
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
|
||||
@ -154,21 +80,7 @@ void sigint_handler(int signo) {
|
||||
}
|
||||
#endif
|
||||
|
||||
int run(int argc, char ** argv) {
|
||||
// has to be called once at the start of the program to init ggml stuff
|
||||
ggml_time_init();
|
||||
|
||||
gpt_params params;
|
||||
params.model = "models/llama-7B/ggml-model.bin";
|
||||
|
||||
if (gpt_params_parse(argc, argv, params) == false) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (params.n_ctx > 2048) {
|
||||
fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
|
||||
"expect poor results\n", __func__, params.n_ctx);
|
||||
}
|
||||
int run(llama_context * ctx, gpt_params params) {
|
||||
|
||||
if (params.seed <= 0) {
|
||||
params.seed = time(NULL);
|
||||
@ -188,33 +100,6 @@ int run(int argc, char ** argv) {
|
||||
// params.prompt = R"(// this function checks if the number n is prime
|
||||
//bool is_prime(int n) {)";
|
||||
|
||||
llama_context * ctx;
|
||||
|
||||
// load the model
|
||||
{
|
||||
auto lparams = llama_context_default_params();
|
||||
|
||||
lparams.n_ctx = params.n_ctx;
|
||||
lparams.n_parts = params.n_parts;
|
||||
lparams.seed = params.seed;
|
||||
lparams.f16_kv = params.memory_f16;
|
||||
lparams.logits_all = params.perplexity;
|
||||
|
||||
ctx = llama_init_from_file(params.model.c_str(), lparams);
|
||||
|
||||
if (ctx == NULL) {
|
||||
fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
// print system information
|
||||
{
|
||||
fprintf(stderr, "\n");
|
||||
fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
|
||||
params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
|
||||
}
|
||||
|
||||
// determine the required inference memory per token:
|
||||
// TODO: better way to do that
|
||||
{
|
||||
@ -222,11 +107,6 @@ int run(int argc, char ** argv) {
|
||||
llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
|
||||
}
|
||||
|
||||
if (params.perplexity) {
|
||||
perplexity(ctx, params);
|
||||
exit(0);
|
||||
}
|
||||
|
||||
int n_past = 0;
|
||||
|
||||
// Add a space in front of the first character to match OG llama tokenizer behavior
|
||||
|
Loading…
Reference in New Issue
Block a user