2023-03-25 18:26:40 +00:00
|
|
|
#include "common.h"
|
|
|
|
#include "llama.h"
|
|
|
|
|
|
|
|
int main(int argc, char ** argv) {
|
|
|
|
gpt_params params;
|
|
|
|
params.model = "models/llama-7B/ggml-model.bin";
|
|
|
|
|
|
|
|
if (gpt_params_parse(argc, argv, params) == false) {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
params.embedding = true;
|
|
|
|
|
|
|
|
if (params.n_ctx > 2048) {
|
|
|
|
fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
|
|
|
|
"expect poor results\n", __func__, params.n_ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params.seed <= 0) {
|
|
|
|
params.seed = time(NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
|
|
|
|
|
|
|
|
std::mt19937 rng(params.seed);
|
|
|
|
if (params.random_prompt) {
|
|
|
|
params.prompt = gpt_random_prompt(rng);
|
|
|
|
}
|
|
|
|
|
|
|
|
llama_context * ctx;
|
|
|
|
|
|
|
|
// load the model
|
|
|
|
{
|
|
|
|
auto lparams = llama_context_default_params();
|
|
|
|
|
|
|
|
lparams.n_ctx = params.n_ctx;
|
|
|
|
lparams.n_parts = params.n_parts;
|
|
|
|
lparams.seed = params.seed;
|
|
|
|
lparams.f16_kv = params.memory_f16;
|
|
|
|
lparams.logits_all = params.perplexity;
|
Rewrite loading code to try to satisfy everyone:
- Support all three formats (ggml, ggmf, ggjt). (However, I didn't
include the hack needed to support GPT4All files without conversion.
Those can still be used after converting them with convert.py from my
other PR.)
- Support both mmap and read (mmap is used by default, but can be
disabled with `--no-mmap`, and is automatically disabled for pre-ggjt
files or on platforms where mmap is not supported).
- Support multi-file models like before, but automatically determine the
number of parts rather than requiring `--n_parts`.
- Improve validation and error checking.
- Stop using the per-file type field (f16) entirely in favor of just
relying on the per-tensor type/size fields. This has no immediate
benefit, but makes it easier to experiment with different formats, and
should make it easier to support the new GPTQ-for-LLaMa models in the
future (I have some work in progress on that front).
- Support VirtualLock on Windows (using the same `--mlock` option as on
Unix).
- Indicate loading progress when using mmap + mlock. (Which led me
to the interesting observation that on my Linux machine, with a
warm file cache, mlock actually takes some time, whereas mmap
without mlock starts almost instantly...)
- To help implement this, move mlock support from ggml to the
loading code.
- madvise/PrefetchVirtualMemory support (based on #740)
- Switch from ifstream to the `fopen` family of functions to avoid
unnecessary copying and, when mmap is enabled, allow reusing the same
file descriptor for both metadata reads and mmap (whereas the existing
implementation opens the file a second time to mmap).
- Quantization now produces a single-file output even with multi-file
inputs (not really a feature as much as 'it was easier this way').
Implementation notes:
I tried to factor the code into more discrete pieces than before.
Regarding code style: I tried to follow the code style, but I'm naughty
and used a few advanced C++ features repeatedly:
- Destructors to make it easier to ensure everything gets cleaned up.
- Exceptions. I don't even usually use exceptions when writing C++, and
I can remove them if desired... but here they make the loading code
much more succinct while still properly handling a variety of errors,
ranging from API calls failing to integer overflow and allocation
failure. The exceptions are converted to error codes at the
API boundary.)
Co-authored-by: Pavol Rusnak <pavol@rusnak.io> (for the bit I copied from #740)
2023-04-08 19:24:37 +00:00
|
|
|
lparams.use_mmap = params.use_mmap;
|
2023-03-25 18:26:40 +00:00
|
|
|
lparams.use_mlock = params.use_mlock;
|
|
|
|
lparams.embedding = params.embedding;
|
|
|
|
|
|
|
|
ctx = llama_init_from_file(params.model.c_str(), lparams);
|
|
|
|
|
|
|
|
if (ctx == NULL) {
|
|
|
|
fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, params.model.c_str());
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// print system information
|
|
|
|
{
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
fprintf(stderr, "system_info: n_threads = %d / %d | %s\n",
|
|
|
|
params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
|
|
|
|
}
|
|
|
|
|
|
|
|
int n_past = 0;
|
|
|
|
|
|
|
|
// Add a space in front of the first character to match OG llama tokenizer behavior
|
|
|
|
params.prompt.insert(0, 1, ' ');
|
|
|
|
|
|
|
|
// tokenize the prompt
|
|
|
|
auto embd_inp = ::llama_tokenize(ctx, params.prompt, true);
|
|
|
|
|
|
|
|
// determine newline token
|
|
|
|
auto llama_token_newline = ::llama_tokenize(ctx, "\n", false);
|
|
|
|
|
|
|
|
if (params.verbose_prompt) {
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
|
|
|
|
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
|
|
|
for (int i = 0; i < (int) embd_inp.size(); i++) {
|
|
|
|
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i]));
|
|
|
|
}
|
|
|
|
fprintf(stderr, "\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params.embedding){
|
|
|
|
if (embd_inp.size() > 0) {
|
|
|
|
if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past, params.n_threads)) {
|
|
|
|
fprintf(stderr, "%s : failed to eval\n", __func__);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-25 18:51:14 +00:00
|
|
|
const int n_embd = llama_n_embd(ctx);
|
2023-03-25 18:26:40 +00:00
|
|
|
const auto embeddings = llama_get_embeddings(ctx);
|
|
|
|
|
2023-03-25 18:51:14 +00:00
|
|
|
for (int i = 0; i < n_embd; i++) {
|
|
|
|
printf("%f ", embeddings[i]);
|
|
|
|
}
|
|
|
|
printf("\n");
|
2023-03-25 18:26:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
llama_print_timings(ctx);
|
|
|
|
llama_free(ctx);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|