mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-11 21:39:52 +00:00
cb6c44c5e0
* Do not use _GNU_SOURCE gratuitously. What is needed to build llama.cpp and examples is availability of stuff defined in The Open Group Base Specifications Issue 6 (https://pubs.opengroup.org/onlinepubs/009695399/) known also as Single Unix Specification v3 (SUSv3) or POSIX.1-2001 + XSI extensions, plus some stuff from BSD that is not specified in POSIX.1. Well, that was true until NUMA support was added recently, so enable GNU libc extensions for Linux builds to cover that. Not having feature test macros in source code gives greater flexibility to those wanting to reuse it in 3rd party app, as they can build it with FTMs set by Makefile here or other FTMs depending on their needs. It builds without issues in Alpine (musl libc), Ubuntu (glibc), MSYS2. * make : enable Darwin extensions for macOS to expose RLIMIT_MEMLOCK * make : enable BSD extensions for DragonFlyBSD to expose RLIMIT_MEMLOCK * make : use BSD-specific FTMs to enable alloca on BSDs * make : fix OpenBSD build by exposing newer POSIX definitions * cmake : follow recent FTM improvements from Makefile
876 lines
34 KiB
C++
876 lines
34 KiB
C++
#include "common.h"
|
|
|
|
#include "console.h"
|
|
#include "llama.h"
|
|
#include "build-info.h"
|
|
#include "grammar-parser.h"
|
|
|
|
#include <cassert>
|
|
#include <cinttypes>
|
|
#include <cmath>
|
|
#include <cstdio>
|
|
#include <cstring>
|
|
#include <ctime>
|
|
#include <fstream>
|
|
#include <iostream>
|
|
#include <sstream>
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
|
|
#include <signal.h>
|
|
#include <unistd.h>
|
|
#elif defined (_WIN32)
|
|
#define WIN32_LEAN_AND_MEAN
|
|
#ifndef NOMINMAX
|
|
#define NOMINMAX
|
|
#endif
|
|
#include <windows.h>
|
|
#include <signal.h>
|
|
#endif
|
|
|
|
#if defined(_MSC_VER)
|
|
#pragma warning(disable: 4244 4267) // possible loss of data
|
|
#endif
|
|
|
|
static llama_context ** g_ctx;
|
|
static llama_model ** g_model;
|
|
static gpt_params * g_params;
|
|
static std::vector<llama_token> * g_input_tokens;
|
|
static std::ostringstream * g_output_ss;
|
|
static std::vector<llama_token> * g_output_tokens;
|
|
static bool is_interacting = false;
|
|
|
|
void write_logfile(
|
|
const llama_context * ctx, const gpt_params & params, const llama_model * model,
|
|
const std::vector<llama_token> & input_tokens, const std::string & output,
|
|
const std::vector<llama_token> & output_tokens
|
|
) {
|
|
if (params.logdir.empty()) {
|
|
return;
|
|
}
|
|
|
|
const std::string timestamp = get_sortable_timestamp();
|
|
|
|
const bool success = create_directory_with_parents(params.logdir);
|
|
if (!success) {
|
|
fprintf(stderr, "%s: warning: failed to create logdir %s, cannot write logfile\n",
|
|
__func__, params.logdir.c_str());
|
|
return;
|
|
}
|
|
|
|
const std::string logfile_path = params.logdir + timestamp + ".yml";
|
|
FILE * logfile = fopen(logfile_path.c_str(), "w");
|
|
|
|
if (logfile == NULL) {
|
|
fprintf(stderr, "%s: failed to open logfile %s\n", __func__, logfile_path.c_str());
|
|
return;
|
|
}
|
|
|
|
fprintf(logfile, "binary: main\n");
|
|
char model_desc[128];
|
|
llama_model_desc(model, model_desc, sizeof(model_desc));
|
|
dump_non_result_info_yaml(logfile, params, ctx, timestamp, input_tokens, model_desc);
|
|
|
|
fprintf(logfile, "\n");
|
|
fprintf(logfile, "######################\n");
|
|
fprintf(logfile, "# Generation Results #\n");
|
|
fprintf(logfile, "######################\n");
|
|
fprintf(logfile, "\n");
|
|
|
|
dump_string_yaml_multiline(logfile, "output", output.c_str());
|
|
dump_vector_int_yaml(logfile, "output_tokens", output_tokens);
|
|
|
|
llama_dump_timing_info_yaml(logfile, ctx);
|
|
fclose(logfile);
|
|
}
|
|
|
|
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
|
|
void sigint_handler(int signo) {
|
|
if (signo == SIGINT) {
|
|
if (!is_interacting) {
|
|
is_interacting = true;
|
|
} else {
|
|
console::cleanup();
|
|
printf("\n");
|
|
llama_print_timings(*g_ctx);
|
|
write_logfile(*g_ctx, *g_params, *g_model, *g_input_tokens, g_output_ss->str(), *g_output_tokens);
|
|
_exit(130);
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
|
|
int main(int argc, char ** argv) {
|
|
gpt_params params;
|
|
g_params = ¶ms;
|
|
|
|
if (!gpt_params_parse(argc, argv, params)) {
|
|
return 1;
|
|
}
|
|
|
|
#ifndef LOG_DISABLE_LOGS
|
|
log_set_target(log_filename_generator("main", "log"));
|
|
LOG_TEE("Log start\n");
|
|
log_dump_cmdline(argc, argv);
|
|
#endif // LOG_DISABLE_LOGS
|
|
|
|
// TODO: Dump params ?
|
|
//LOG("Params perplexity: %s\n", LOG_TOSTR(params.perplexity));
|
|
|
|
// save choice to use color for later
|
|
// (note for later: this is a slightly awkward choice)
|
|
console::init(params.simple_io, params.use_color);
|
|
atexit([]() { console::cleanup(); });
|
|
|
|
if (params.perplexity) {
|
|
printf("\n************\n");
|
|
printf("%s: please use the 'perplexity' tool for perplexity calculations\n", __func__);
|
|
printf("************\n\n");
|
|
|
|
return 0;
|
|
}
|
|
|
|
if (params.embedding) {
|
|
printf("\n************\n");
|
|
printf("%s: please use the 'embedding' tool for embedding calculations\n", __func__);
|
|
printf("************\n\n");
|
|
|
|
return 0;
|
|
}
|
|
|
|
if (params.rope_freq_base != 10000.0) {
|
|
LOG_TEE("%s: warning: changing RoPE frequency base to %g (default 10000.0)\n", __func__, params.rope_freq_base);
|
|
}
|
|
|
|
if (params.rope_freq_scale != 1.0) {
|
|
LOG_TEE("%s: warning: scaling RoPE frequency by %g (default 1.0)\n", __func__, params.rope_freq_scale);
|
|
}
|
|
|
|
LOG_TEE("%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
|
|
|
|
if (params.seed == LLAMA_DEFAULT_SEED) {
|
|
params.seed = time(NULL);
|
|
}
|
|
|
|
LOG_TEE("%s: seed = %u\n", __func__, params.seed);
|
|
|
|
std::mt19937 rng(params.seed);
|
|
if (params.random_prompt) {
|
|
params.prompt = gpt_random_prompt(rng);
|
|
}
|
|
|
|
LOG("%s: llama backend init\n", __func__);
|
|
llama_backend_init(params.numa);
|
|
|
|
llama_model * model;
|
|
llama_context * ctx;
|
|
llama_context * ctx_guidance = NULL;
|
|
g_model = &model;
|
|
g_ctx = &ctx;
|
|
|
|
// load the model and apply lora adapter, if any
|
|
LOG("%s: load the model and apply lora adapter, if any\n", __func__);
|
|
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
|
if (params.cfg_scale > 1.f) {
|
|
struct llama_context_params lparams = llama_context_params_from_gpt_params(params);
|
|
ctx_guidance = llama_new_context_with_model(model, lparams);
|
|
}
|
|
|
|
if (model == NULL) {
|
|
LOG_TEE("%s: error: unable to load model\n", __func__);
|
|
return 1;
|
|
}
|
|
|
|
if (params.n_ctx > llama_n_ctx(ctx)) {
|
|
LOG_TEE("%s: warning: base model only supports context sizes no greater than %d tokens (%d specified)\n", __func__, llama_n_ctx(ctx), params.n_ctx);
|
|
} else if (params.n_ctx < 8) {
|
|
LOG_TEE("%s: warning: minimum context size is 8, using minimum size.\n", __func__);
|
|
params.n_ctx = 8;
|
|
}
|
|
|
|
// print system information
|
|
{
|
|
LOG_TEE("\n");
|
|
LOG_TEE("system_info: n_threads = %d / %d | %s\n",
|
|
params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
|
|
}
|
|
|
|
// determine the maximum memory usage needed to do inference for the given n_batch and n_ctx parameters
|
|
// uncomment the "used_mem" line in llama.cpp to see the results
|
|
if (params.mem_test) {
|
|
{
|
|
LOG_TEE("%s: testing memory usage for n_batch = %d, n_ctx = %d\n", __func__, params.n_batch, params.n_ctx);
|
|
|
|
const std::vector<llama_token> tmp(params.n_batch, llama_token_bos(ctx));
|
|
llama_eval(ctx, tmp.data(), tmp.size(), params.n_ctx, params.n_threads);
|
|
}
|
|
|
|
llama_print_timings(ctx);
|
|
llama_free(ctx);
|
|
llama_free_model(model);
|
|
|
|
return 0;
|
|
}
|
|
|
|
// export the cgraph and exit
|
|
if (params.export_cgraph) {
|
|
llama_eval_export(ctx, "llama.ggml");
|
|
llama_free(ctx);
|
|
llama_free_model(model);
|
|
|
|
return 0;
|
|
}
|
|
|
|
std::string path_session = params.path_prompt_cache;
|
|
std::vector<llama_token> session_tokens;
|
|
|
|
if (!path_session.empty()) {
|
|
LOG_TEE("%s: attempting to load saved session from '%s'\n", __func__, path_session.c_str());
|
|
|
|
// fopen to check for existing session
|
|
FILE * fp = std::fopen(path_session.c_str(), "rb");
|
|
if (fp != NULL) {
|
|
std::fclose(fp);
|
|
|
|
session_tokens.resize(params.n_ctx);
|
|
size_t n_token_count_out = 0;
|
|
if (!llama_load_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.capacity(), &n_token_count_out)) {
|
|
LOG_TEE("%s: error: failed to load session file '%s'\n", __func__, path_session.c_str());
|
|
return 1;
|
|
}
|
|
session_tokens.resize(n_token_count_out);
|
|
llama_set_rng_seed(ctx, params.seed);
|
|
|
|
LOG_TEE("%s: loaded a session with prompt size of %d tokens\n", __func__, (int) session_tokens.size());
|
|
} else {
|
|
LOG_TEE("%s: session file does not exist, will create\n", __func__);
|
|
}
|
|
}
|
|
|
|
const bool add_bos = llama_vocab_type(ctx) == LLAMA_VOCAB_TYPE_SPM;
|
|
LOG("add_bos: %d\n", add_bos);
|
|
|
|
std::vector<llama_token> embd_inp;
|
|
|
|
if (params.interactive_first || params.instruct || !params.prompt.empty() || session_tokens.empty()) {
|
|
LOG("tokenize the prompt\n");
|
|
embd_inp = ::llama_tokenize(ctx, params.prompt, add_bos);
|
|
} else {
|
|
LOG("use session tokens\n");
|
|
embd_inp = session_tokens;
|
|
}
|
|
|
|
LOG("prompt: \"%s\"\n", log_tostr(params.prompt));
|
|
LOG("tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp));
|
|
|
|
// Should not run without any tokens
|
|
if (embd_inp.empty()) {
|
|
embd_inp.push_back(llama_token_bos(ctx));
|
|
LOG("embd_inp was considered empty and bos was added: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp));
|
|
}
|
|
|
|
// Tokenize negative prompt
|
|
std::vector<llama_token> guidance_inp;
|
|
int guidance_offset = 0;
|
|
int original_prompt_len = 0;
|
|
if (ctx_guidance) {
|
|
LOG("cfg_negative_prompt: \"%s\"\n", log_tostr(params.cfg_negative_prompt));
|
|
|
|
guidance_inp = ::llama_tokenize(ctx_guidance, params.cfg_negative_prompt, add_bos);
|
|
LOG("guidance_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_guidance, guidance_inp));
|
|
|
|
std::vector<llama_token> original_inp = ::llama_tokenize(ctx, params.prompt, add_bos);
|
|
LOG("original_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, original_inp));
|
|
|
|
original_prompt_len = original_inp.size();
|
|
guidance_offset = (int)guidance_inp.size() - original_prompt_len;
|
|
LOG("original_prompt_len: %s", log_tostr(original_prompt_len));
|
|
LOG("guidance_offset: %s", log_tostr(guidance_offset));
|
|
}
|
|
|
|
const int n_ctx = llama_n_ctx(ctx);
|
|
LOG("n_ctx: %d\n", n_ctx);
|
|
|
|
if ((int) embd_inp.size() > n_ctx - 4) {
|
|
LOG_TEE("%s: error: prompt is too long (%d tokens, max %d)\n", __func__, (int) embd_inp.size(), n_ctx - 4);
|
|
return 1;
|
|
}
|
|
|
|
// debug message about similarity of saved session, if applicable
|
|
size_t n_matching_session_tokens = 0;
|
|
if (!session_tokens.empty()) {
|
|
for (llama_token id : session_tokens) {
|
|
if (n_matching_session_tokens >= embd_inp.size() || id != embd_inp[n_matching_session_tokens]) {
|
|
break;
|
|
}
|
|
n_matching_session_tokens++;
|
|
}
|
|
if (params.prompt.empty() && n_matching_session_tokens == embd_inp.size()) {
|
|
LOG_TEE("%s: using full prompt from session file\n", __func__);
|
|
} else if (n_matching_session_tokens >= embd_inp.size()) {
|
|
LOG_TEE("%s: session file has exact match for prompt!\n", __func__);
|
|
} else if (n_matching_session_tokens < (embd_inp.size() / 2)) {
|
|
LOG_TEE("%s: warning: session file has low similarity to prompt (%zu / %zu tokens); will mostly be reevaluated\n",
|
|
__func__, n_matching_session_tokens, embd_inp.size());
|
|
} else {
|
|
LOG_TEE("%s: session file matches %zu / %zu tokens of prompt\n",
|
|
__func__, n_matching_session_tokens, embd_inp.size());
|
|
}
|
|
}
|
|
|
|
LOGLN(
|
|
"recalculate the cached logits (check): embd_inp.empty() %s, n_matching_session_tokens %zu, embd_inp.size() %zu, session_tokens.size() %zu, embd_inp.size() %zu",
|
|
log_tostr(embd_inp.empty()), n_matching_session_tokens, embd_inp.size(), session_tokens.size(), embd_inp.size());
|
|
|
|
// if we will use the cache for the full prompt without reaching the end of the cache, force
|
|
// reevaluation of the last token token to recalculate the cached logits
|
|
if (!embd_inp.empty() && n_matching_session_tokens == embd_inp.size() && session_tokens.size() > embd_inp.size()) {
|
|
LOGLN("recalculate the cached logits (do): session_tokens.resize( %zu )", embd_inp.size() - 1);
|
|
|
|
session_tokens.resize(embd_inp.size() - 1);
|
|
}
|
|
|
|
// number of tokens to keep when resetting context
|
|
if (params.n_keep < 0 || params.n_keep > (int) embd_inp.size() || params.instruct) {
|
|
params.n_keep = (int)embd_inp.size();
|
|
}
|
|
|
|
// prefix & suffix for instruct mode
|
|
const auto inp_pfx = ::llama_tokenize(ctx, "\n\n### Instruction:\n\n", add_bos);
|
|
const auto inp_sfx = ::llama_tokenize(ctx, "\n\n### Response:\n\n", false);
|
|
|
|
LOG("inp_pfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_pfx));
|
|
LOG("inp_sfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_sfx));
|
|
|
|
// in instruct mode, we inject a prefix and a suffix to each input by the user
|
|
if (params.instruct) {
|
|
params.interactive_first = true;
|
|
params.antiprompt.push_back("### Instruction:\n\n");
|
|
}
|
|
|
|
// enable interactive mode if interactive start is specified
|
|
if (params.interactive_first) {
|
|
params.interactive = true;
|
|
}
|
|
|
|
if (params.verbose_prompt) {
|
|
LOG_TEE("\n");
|
|
LOG_TEE("%s: prompt: '%s'\n", __func__, params.prompt.c_str());
|
|
LOG_TEE("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
|
for (int i = 0; i < (int) embd_inp.size(); i++) {
|
|
LOG_TEE("%6d -> '%s'\n", embd_inp[i], llama_token_to_piece(ctx, embd_inp[i]).c_str());
|
|
}
|
|
|
|
if (ctx_guidance) {
|
|
LOG_TEE("\n");
|
|
LOG_TEE("%s: negative prompt: '%s'\n", __func__, params.cfg_negative_prompt.c_str());
|
|
LOG_TEE("%s: number of tokens in negative prompt = %zu\n", __func__, guidance_inp.size());
|
|
for (int i = 0; i < (int) guidance_inp.size(); i++) {
|
|
LOG_TEE("%6d -> '%s'\n", guidance_inp[i], llama_token_to_piece(ctx, guidance_inp[i]).c_str());
|
|
}
|
|
}
|
|
|
|
if (params.n_keep > 0) {
|
|
LOG_TEE("%s: static prompt based on n_keep: '", __func__);
|
|
for (int i = 0; i < params.n_keep; i++) {
|
|
LOG_TEE("%s", llama_token_to_piece(ctx, embd_inp[i]).c_str());
|
|
}
|
|
LOG_TEE("'\n");
|
|
}
|
|
LOG_TEE("\n");
|
|
}
|
|
|
|
if (params.interactive) {
|
|
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
|
|
struct sigaction sigint_action;
|
|
sigint_action.sa_handler = sigint_handler;
|
|
sigemptyset (&sigint_action.sa_mask);
|
|
sigint_action.sa_flags = 0;
|
|
sigaction(SIGINT, &sigint_action, NULL);
|
|
#elif defined (_WIN32)
|
|
auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
|
|
return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
|
|
};
|
|
SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
|
|
#endif
|
|
|
|
LOG_TEE("%s: interactive mode on.\n", __func__);
|
|
|
|
if (!params.antiprompt.empty()) {
|
|
for (const auto & antiprompt : params.antiprompt) {
|
|
LOG_TEE("Reverse prompt: '%s'\n", antiprompt.c_str());
|
|
}
|
|
}
|
|
|
|
if (params.input_prefix_bos) {
|
|
LOG_TEE("Input prefix with BOS\n");
|
|
}
|
|
|
|
if (!params.input_prefix.empty()) {
|
|
LOG_TEE("Input prefix: '%s'\n", params.input_prefix.c_str());
|
|
}
|
|
|
|
if (!params.input_suffix.empty()) {
|
|
LOG_TEE("Input suffix: '%s'\n", params.input_suffix.c_str());
|
|
}
|
|
}
|
|
LOG_TEE("sampling: repeat_last_n = %d, repeat_penalty = %f, presence_penalty = %f, frequency_penalty = %f, top_k = %d, tfs_z = %f, top_p = %f, typical_p = %f, temp = %f, mirostat = %d, mirostat_lr = %f, mirostat_ent = %f\n",
|
|
params.repeat_last_n, params.repeat_penalty, params.presence_penalty, params.frequency_penalty, params.top_k, params.tfs_z, params.top_p, params.typical_p, params.temp, params.mirostat, params.mirostat_eta, params.mirostat_tau);
|
|
LOG_TEE("generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
|
|
LOG_TEE("\n\n");
|
|
|
|
struct llama_grammar * grammar = NULL;
|
|
grammar_parser::parse_state parsed_grammar;
|
|
|
|
if (!params.grammar.empty()) {
|
|
parsed_grammar = grammar_parser::parse(params.grammar.c_str());
|
|
// will be empty (default) if there are parse errors
|
|
if (parsed_grammar.rules.empty()) {
|
|
return 1;
|
|
}
|
|
LOG_TEE("%s: grammar:\n", __func__);
|
|
grammar_parser::print_grammar(stderr, parsed_grammar);
|
|
LOG_TEE("\n");
|
|
|
|
{
|
|
auto it = params.logit_bias.find(llama_token_eos(ctx));
|
|
if (it != params.logit_bias.end() && it->second == -INFINITY) {
|
|
LOG_TEE("%s: warning: EOS token is disabled, which will cause most grammars to fail\n", __func__);
|
|
}
|
|
}
|
|
|
|
std::vector<const llama_grammar_element *> grammar_rules(parsed_grammar.c_rules());
|
|
grammar = llama_grammar_init(
|
|
grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root"));
|
|
}
|
|
|
|
// TODO: replace with ring-buffer
|
|
std::vector<llama_token> last_tokens(n_ctx);
|
|
std::fill(last_tokens.begin(), last_tokens.end(), 0);
|
|
|
|
if (params.interactive) {
|
|
const char *control_message;
|
|
if (params.multiline_input) {
|
|
control_message = " - To return control to LLaMa, end your input with '\\'.\n"
|
|
" - To return control without starting a new line, end your input with '/'.\n";
|
|
} else {
|
|
control_message = " - Press Return to return control to LLaMa.\n"
|
|
" - To return control without starting a new line, end your input with '/'.\n"
|
|
" - If you want to submit another line, end your input with '\\'.\n";
|
|
}
|
|
LOG_TEE("== Running in interactive mode. ==\n");
|
|
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
|
|
LOG_TEE( " - Press Ctrl+C to interject at any time.\n");
|
|
#endif
|
|
LOG_TEE( "%s\n", control_message);
|
|
|
|
is_interacting = params.interactive_first;
|
|
}
|
|
|
|
bool is_antiprompt = false;
|
|
bool input_echo = true;
|
|
bool need_to_save_session = !path_session.empty() && n_matching_session_tokens < embd_inp.size();
|
|
|
|
int n_past = 0;
|
|
int n_remain = params.n_predict;
|
|
int n_consumed = 0;
|
|
int n_session_consumed = 0;
|
|
int n_past_guidance = 0;
|
|
|
|
std::vector<int> input_tokens; g_input_tokens = &input_tokens;
|
|
std::vector<int> output_tokens; g_output_tokens = &output_tokens;
|
|
std::ostringstream output_ss; g_output_ss = &output_ss;
|
|
|
|
// the first thing we will do is to output the prompt, so set color accordingly
|
|
console::set_display(console::prompt);
|
|
|
|
std::vector<llama_token> embd;
|
|
std::vector<llama_token> embd_guidance;
|
|
|
|
const int n_vocab = llama_n_vocab(ctx);
|
|
|
|
std::vector<llama_token_data> candidates;
|
|
candidates.reserve(n_vocab);
|
|
|
|
while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
|
|
// predict
|
|
if (!embd.empty()) {
|
|
// Note: n_ctx - 4 here is to match the logic for commandline prompt handling via
|
|
// --prompt or --file which uses the same value.
|
|
int max_embd_size = n_ctx - 4;
|
|
|
|
// Ensure the input doesn't exceed the context size by truncating embd if necessary.
|
|
if ((int) embd.size() > max_embd_size) {
|
|
const int skipped_tokens = (int) embd.size() - max_embd_size;
|
|
embd.resize(max_embd_size);
|
|
|
|
console::set_display(console::error);
|
|
printf("<<input too long: skipped %d token%s>>", skipped_tokens, skipped_tokens != 1 ? "s" : "");
|
|
console::set_display(console::reset);
|
|
fflush(stdout);
|
|
}
|
|
|
|
// infinite text generation via context swapping
|
|
// if we run out of context:
|
|
// - take the n_keep first tokens from the original prompt (via n_past)
|
|
// - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches
|
|
if (n_past + (int) embd.size() + std::max<int>(0, guidance_offset) > n_ctx) {
|
|
if (params.n_predict == -2) {
|
|
LOG_TEE("\n\n%s: context full and n_predict == -%d => stopping\n", __func__, params.n_predict);
|
|
break;
|
|
}
|
|
|
|
const int n_left = n_past - params.n_keep;
|
|
LOG("context full, swapping: n_past = %d, n_left = %d, n_ctx = %d, n_keep = %d\n", n_past, n_left, n_ctx, params.n_keep);
|
|
|
|
// always keep the first token - BOS
|
|
n_past = std::max(1, params.n_keep);
|
|
n_past_guidance = std::max(1, params.n_keep + guidance_offset);
|
|
|
|
LOG("after swap: n_past = %d, n_past_guidance = %d\n", n_past, n_past_guidance);
|
|
|
|
// insert n_left/2 tokens at the start of embd from last_tokens
|
|
embd.insert(embd.begin(), last_tokens.begin() + n_ctx - n_left/2 - embd.size(), last_tokens.end() - embd.size());
|
|
|
|
LOG("embd: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd));
|
|
|
|
LOG("clear session path\n");
|
|
path_session.clear();
|
|
}
|
|
|
|
// try to reuse a matching prefix from the loaded session instead of re-eval (via n_past)
|
|
if (n_session_consumed < (int) session_tokens.size()) {
|
|
size_t i = 0;
|
|
for ( ; i < embd.size(); i++) {
|
|
if (embd[i] != session_tokens[n_session_consumed]) {
|
|
session_tokens.resize(n_session_consumed);
|
|
break;
|
|
}
|
|
|
|
n_past++;
|
|
n_session_consumed++;
|
|
|
|
if (n_session_consumed >= (int) session_tokens.size()) {
|
|
++i;
|
|
break;
|
|
}
|
|
}
|
|
if (i > 0) {
|
|
embd.erase(embd.begin(), embd.begin() + i);
|
|
}
|
|
}
|
|
|
|
// evaluate tokens in batches
|
|
// embd is typically prepared beforehand to fit within a batch, but not always
|
|
|
|
if (ctx_guidance) {
|
|
int input_size = 0;
|
|
llama_token * input_buf = NULL;
|
|
|
|
if (n_past_guidance < (int) guidance_inp.size()) {
|
|
// Guidance context should have the same data with these modifications:
|
|
//
|
|
// * Replace the initial prompt
|
|
// * Shift everything by guidance_offset
|
|
embd_guidance = guidance_inp;
|
|
if (embd.begin() + original_prompt_len < embd.end()) {
|
|
embd_guidance.insert(
|
|
embd_guidance.end(),
|
|
embd.begin() + original_prompt_len,
|
|
embd.end()
|
|
);
|
|
}
|
|
|
|
input_buf = embd_guidance.data();
|
|
input_size = embd_guidance.size();
|
|
|
|
LOG("guidance context: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_guidance));
|
|
} else {
|
|
input_buf = embd.data();
|
|
input_size = embd.size();
|
|
}
|
|
|
|
for (int i = 0; i < input_size; i += params.n_batch) {
|
|
int n_eval = std::min(input_size - i, params.n_batch);
|
|
if (llama_eval(ctx_guidance, input_buf + i, n_eval, n_past_guidance, params.n_threads)) {
|
|
LOG_TEE("%s : failed to eval\n", __func__);
|
|
return 1;
|
|
}
|
|
|
|
n_past_guidance += n_eval;
|
|
}
|
|
}
|
|
|
|
for (int i = 0; i < (int) embd.size(); i += params.n_batch) {
|
|
int n_eval = (int) embd.size() - i;
|
|
if (n_eval > params.n_batch) {
|
|
n_eval = params.n_batch;
|
|
}
|
|
|
|
LOG("eval: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd));
|
|
|
|
if (llama_eval(ctx, &embd[i], n_eval, n_past, params.n_threads)) {
|
|
LOG_TEE("%s : failed to eval\n", __func__);
|
|
return 1;
|
|
}
|
|
|
|
n_past += n_eval;
|
|
|
|
LOG("n_past = %d\n", n_past);
|
|
}
|
|
|
|
if (!embd.empty() && !path_session.empty()) {
|
|
session_tokens.insert(session_tokens.end(), embd.begin(), embd.end());
|
|
n_session_consumed = session_tokens.size();
|
|
}
|
|
}
|
|
|
|
embd.clear();
|
|
embd_guidance.clear();
|
|
|
|
if ((int) embd_inp.size() <= n_consumed && !is_interacting) {
|
|
// optionally save the session on first sample (for faster prompt loading next time)
|
|
if (!path_session.empty() && need_to_save_session && !params.prompt_cache_ro) {
|
|
need_to_save_session = false;
|
|
llama_save_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
|
|
|
|
LOG("saved session to %s\n", path_session.c_str());
|
|
}
|
|
|
|
const llama_token id = llama_sample_token(ctx, ctx_guidance, grammar, params, last_tokens, candidates);
|
|
|
|
last_tokens.erase(last_tokens.begin());
|
|
last_tokens.push_back(id);
|
|
|
|
LOG("last: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, last_tokens));
|
|
|
|
embd.push_back(id);
|
|
|
|
// echo this to console
|
|
input_echo = true;
|
|
|
|
// decrement remaining sampling budget
|
|
--n_remain;
|
|
|
|
LOG("n_remain: %d\n", n_remain);
|
|
} else {
|
|
// some user input remains from prompt or interaction, forward it to processing
|
|
LOG("embd_inp.size(): %d, n_consumed: %d\n", (int) embd_inp.size(), n_consumed);
|
|
while ((int) embd_inp.size() > n_consumed) {
|
|
embd.push_back(embd_inp[n_consumed]);
|
|
last_tokens.erase(last_tokens.begin());
|
|
last_tokens.push_back(embd_inp[n_consumed]);
|
|
++n_consumed;
|
|
if ((int) embd.size() >= params.n_batch) {
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
// display text
|
|
if (input_echo) {
|
|
for (auto id : embd) {
|
|
const std::string token_str = llama_token_to_piece(ctx, id);
|
|
printf("%s", token_str.c_str());
|
|
|
|
if (embd.size() > 1) {
|
|
input_tokens.push_back(id);
|
|
} else {
|
|
output_tokens.push_back(id);
|
|
output_ss << token_str;
|
|
}
|
|
}
|
|
fflush(stdout);
|
|
}
|
|
// reset color to default if we there is no pending user input
|
|
if (input_echo && (int) embd_inp.size() == n_consumed) {
|
|
console::set_display(console::reset);
|
|
}
|
|
|
|
// if not currently processing queued inputs;
|
|
if ((int) embd_inp.size() <= n_consumed) {
|
|
// check for reverse prompt
|
|
if (!params.antiprompt.empty()) {
|
|
std::string last_output;
|
|
for (auto id : last_tokens) {
|
|
last_output += llama_token_to_piece(ctx, id);
|
|
}
|
|
|
|
is_antiprompt = false;
|
|
// Check if each of the reverse prompts appears at the end of the output.
|
|
// If we're not running interactively, the reverse prompt might be tokenized with some following characters
|
|
// so we'll compensate for that by widening the search window a bit.
|
|
for (std::string & antiprompt : params.antiprompt) {
|
|
size_t extra_padding = params.interactive ? 0 : 2;
|
|
size_t search_start_pos = last_output.length() > static_cast<size_t>(antiprompt.length() + extra_padding)
|
|
? last_output.length() - static_cast<size_t>(antiprompt.length() + extra_padding)
|
|
: 0;
|
|
|
|
if (last_output.find(antiprompt, search_start_pos) != std::string::npos) {
|
|
if (params.interactive) {
|
|
is_interacting = true;
|
|
console::set_display(console::user_input);
|
|
}
|
|
is_antiprompt = true;
|
|
fflush(stdout);
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (is_antiprompt) {
|
|
LOG("found antiprompt: %s\n", last_output.c_str());
|
|
}
|
|
}
|
|
|
|
// deal with end of text token in interactive mode
|
|
if (last_tokens.back() == llama_token_eos(ctx)) {
|
|
LOG("found EOS token\n");
|
|
|
|
if (params.interactive) {
|
|
if (!params.antiprompt.empty()) {
|
|
// tokenize and inject first reverse prompt
|
|
const auto first_antiprompt = ::llama_tokenize(ctx, params.antiprompt.front(), false);
|
|
embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());
|
|
is_antiprompt = true;
|
|
}
|
|
|
|
is_interacting = true;
|
|
printf("\n");
|
|
console::set_display(console::user_input);
|
|
fflush(stdout);
|
|
} else if (params.instruct) {
|
|
is_interacting = true;
|
|
}
|
|
}
|
|
|
|
if (n_past > 0 && is_interacting) {
|
|
LOG("waiting for user input\n");
|
|
|
|
if (params.instruct) {
|
|
printf("\n> ");
|
|
}
|
|
|
|
if (params.input_prefix_bos) {
|
|
LOG("adding input prefix BOS token\n");
|
|
embd_inp.push_back(llama_token_bos(ctx));
|
|
}
|
|
|
|
std::string buffer;
|
|
if (!params.input_prefix.empty()) {
|
|
LOG("appending input prefix: '%s'\n", params.input_prefix.c_str());
|
|
buffer += params.input_prefix;
|
|
printf("%s", buffer.c_str());
|
|
}
|
|
|
|
std::string line;
|
|
bool another_line = true;
|
|
do {
|
|
another_line = console::readline(line, params.multiline_input);
|
|
buffer += line;
|
|
} while (another_line);
|
|
|
|
// done taking input, reset color
|
|
console::set_display(console::reset);
|
|
|
|
// Add tokens to embd only if the input buffer is non-empty
|
|
// Entering a empty line lets the user pass control back
|
|
if (buffer.length() > 1) {
|
|
// append input suffix if any
|
|
if (!params.input_suffix.empty()) {
|
|
LOG("appending input suffix: '%s'\n", params.input_suffix.c_str());
|
|
buffer += params.input_suffix;
|
|
printf("%s", params.input_suffix.c_str());
|
|
}
|
|
|
|
LOG("buffer: '%s'\n", buffer.c_str());
|
|
|
|
const size_t original_size = embd_inp.size();
|
|
|
|
// instruct mode: insert instruction prefix
|
|
if (params.instruct && !is_antiprompt) {
|
|
LOG("inserting instruction prefix\n");
|
|
n_consumed = embd_inp.size();
|
|
embd_inp.insert(embd_inp.end(), inp_pfx.begin(), inp_pfx.end());
|
|
}
|
|
|
|
const auto line_inp = ::llama_tokenize(ctx, buffer, false);
|
|
LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp));
|
|
|
|
embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
|
|
|
|
// instruct mode: insert response suffix
|
|
if (params.instruct) {
|
|
LOG("inserting instruction suffix\n");
|
|
embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
|
|
}
|
|
|
|
for (size_t i = original_size; i < embd_inp.size(); ++i) {
|
|
const llama_token token = embd_inp[i];
|
|
output_tokens.push_back(token);
|
|
output_ss << llama_token_to_piece(ctx, token);
|
|
}
|
|
|
|
n_remain -= line_inp.size();
|
|
LOG("n_remain: %d\n", n_remain);
|
|
} else {
|
|
LOG("empty line, passing control back\n");
|
|
}
|
|
|
|
input_echo = false; // do not echo this again
|
|
}
|
|
|
|
if (n_past > 0) {
|
|
if (is_interacting) {
|
|
// reset grammar state if we're restarting generation
|
|
if (grammar != NULL) {
|
|
llama_grammar_free(grammar);
|
|
|
|
std::vector<const llama_grammar_element *> grammar_rules(parsed_grammar.c_rules());
|
|
grammar = llama_grammar_init(
|
|
grammar_rules.data(), grammar_rules.size(),
|
|
parsed_grammar.symbol_ids.at("root"));
|
|
}
|
|
}
|
|
is_interacting = false;
|
|
}
|
|
}
|
|
|
|
// end of text token
|
|
if (!embd.empty() && embd.back() == llama_token_eos(ctx) && !(params.instruct || params.interactive)) {
|
|
LOG_TEE(" [end of text]\n");
|
|
break;
|
|
}
|
|
|
|
// In interactive mode, respect the maximum number of tokens and drop back to user input when reached.
|
|
// We skip this logic when n_predict == -1 (infinite) or -2 (stop at context size).
|
|
if (params.interactive && n_remain <= 0 && params.n_predict >= 0) {
|
|
n_remain = params.n_predict;
|
|
is_interacting = true;
|
|
}
|
|
}
|
|
|
|
if (!path_session.empty() && params.prompt_cache_all && !params.prompt_cache_ro) {
|
|
LOG_TEE("\n%s: saving final output to session file '%s'\n", __func__, path_session.c_str());
|
|
llama_save_session_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
|
|
}
|
|
|
|
llama_print_timings(ctx);
|
|
write_logfile(ctx, params, model, input_tokens, output_ss.str(), output_tokens);
|
|
|
|
if (ctx_guidance) { llama_free(ctx_guidance); }
|
|
llama_free(ctx);
|
|
llama_free_model(model);
|
|
|
|
if (grammar != NULL) {
|
|
llama_grammar_free(grammar);
|
|
}
|
|
llama_backend_free();
|
|
|
|
#ifndef LOG_DISABLE_LOGS
|
|
LOG_TEE("Log end\n")
|
|
#endif // LOG_DISABLE_LOGS
|
|
|
|
return 0;
|
|
}
|