2024-09-09 21:36:09 +00:00
# include "arg.h"
2023-10-02 07:42:02 +00:00
# include "common.h"
# include "console.h"
2024-09-09 21:36:09 +00:00
# include "sampling.h"
2024-09-15 17:46:12 +00:00
# include "log.h"
2023-10-02 07:42:02 +00:00
# include "llama.h"
# include <cassert>
# include <cinttypes>
# include <cmath>
# include <cstdio>
# include <cstring>
# include <ctime>
# include <fstream>
# include <iostream>
# include <sstream>
# include <string>
# include <vector>
# if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
# include <signal.h>
# include <unistd.h>
# elif defined (_WIN32)
# define WIN32_LEAN_AND_MEAN
# ifndef NOMINMAX
# define NOMINMAX
# endif
# include <windows.h>
# include <signal.h>
# endif
# if defined(_MSC_VER)
# pragma warning(disable: 4244 4267) // possible loss of data
# endif
static llama_context * * g_ctx ;
static llama_model * * g_model ;
2024-09-07 12:16:19 +00:00
static gpt_sampler * * g_smpl ;
2023-10-02 07:42:02 +00:00
static gpt_params * g_params ;
static std : : vector < llama_token > * g_input_tokens ;
static std : : ostringstream * g_output_ss ;
static std : : vector < llama_token > * g_output_tokens ;
2023-10-20 18:07:23 +00:00
static bool is_interacting = false ;
2023-10-02 07:42:02 +00:00
static void write_logfile (
const llama_context * ctx , const gpt_params & params , const llama_model * model ,
const std : : vector < llama_token > & input_tokens , const std : : string & output ,
const std : : vector < llama_token > & output_tokens
) {
if ( params . logdir . empty ( ) ) {
return ;
}
2024-05-22 17:04:20 +00:00
const std : : string timestamp = string_get_sortable_timestamp ( ) ;
2023-10-02 07:42:02 +00:00
2024-05-22 17:04:20 +00:00
const bool success = fs_create_directory_with_parents ( params . logdir ) ;
2023-10-02 07:42:02 +00:00
if ( ! success ) {
2024-09-15 17:46:12 +00:00
LOG_ERR ( " %s: warning: failed to create logdir %s, cannot write logfile \n " ,
2023-10-02 07:42:02 +00:00
__func__ , params . logdir . c_str ( ) ) ;
return ;
}
const std : : string logfile_path = params . logdir + timestamp + " .yml " ;
FILE * logfile = fopen ( logfile_path . c_str ( ) , " w " ) ;
if ( logfile = = NULL ) {
2024-09-15 17:46:12 +00:00
LOG_ERR ( " %s: failed to open logfile %s \n " , __func__ , logfile_path . c_str ( ) ) ;
2023-10-02 07:42:02 +00:00
return ;
}
fprintf ( logfile , " binary: infill \n " ) ;
char model_desc [ 128 ] ;
llama_model_desc ( model , model_desc , sizeof ( model_desc ) ) ;
2024-05-22 17:04:20 +00:00
yaml_dump_non_result_info ( logfile , params , ctx , timestamp , input_tokens , model_desc ) ;
2023-10-02 07:42:02 +00:00
fprintf ( logfile , " \n " ) ;
fprintf ( logfile , " ###################### \n " ) ;
fprintf ( logfile , " # Generation Results # \n " ) ;
fprintf ( logfile , " ###################### \n " ) ;
fprintf ( logfile , " \n " ) ;
2024-05-22 17:04:20 +00:00
yaml_dump_string_multiline ( logfile , " output " , output . c_str ( ) ) ;
yaml_dump_vector_int ( logfile , " output_tokens " , output_tokens ) ;
2023-10-02 07:42:02 +00:00
2024-09-07 12:16:19 +00:00
llama_perf_dump_yaml ( logfile , ctx ) ;
2023-10-02 07:42:02 +00:00
fclose ( logfile ) ;
}
# if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
static void sigint_handler ( int signo ) {
if ( signo = = SIGINT ) {
if ( ! is_interacting ) {
is_interacting = true ;
} else {
console : : cleanup ( ) ;
2024-09-15 17:46:12 +00:00
LOG ( " \n " ) ;
2024-09-07 12:16:19 +00:00
gpt_perf_print ( * g_ctx , * g_smpl ) ;
2023-10-02 07:42:02 +00:00
write_logfile ( * g_ctx , * g_params , * g_model , * g_input_tokens , g_output_ss - > str ( ) , * g_output_tokens ) ;
2024-09-20 08:46:56 +00:00
// make sure all logs are flushed
LOG ( " Interrupted by user \n " ) ;
gpt_log_pause ( gpt_log_main ( ) ) ;
2023-10-02 07:42:02 +00:00
_exit ( 130 ) ;
}
}
}
# endif
int main ( int argc , char * * argv ) {
gpt_params params ;
g_params = & params ;
2024-09-09 21:36:09 +00:00
if ( ! gpt_params_parse ( argc , argv , params , LLAMA_EXAMPLE_INFILL ) ) {
2023-10-02 07:42:02 +00:00
return 1 ;
}
2024-09-15 17:46:12 +00:00
gpt_init ( ) ;
2024-09-07 12:16:19 +00:00
2024-09-15 17:46:12 +00:00
auto & sparams = params . sparams ;
2023-10-02 07:42:02 +00:00
console : : init ( params . simple_io , params . use_color ) ;
atexit ( [ ] ( ) { console : : cleanup ( ) ; } ) ;
if ( params . logits_all ) {
2024-09-15 17:46:12 +00:00
LOG_ERR ( " \n ************ \n " ) ;
LOG_ERR ( " %s: please use the 'perplexity' tool for perplexity calculations \n " , __func__ ) ;
LOG_ERR ( " ************ \n \n " ) ;
2023-10-02 07:42:02 +00:00
return 0 ;
}
if ( params . embedding ) {
2024-09-15 17:46:12 +00:00
LOG_ERR ( " \n ************ \n " ) ;
LOG_ERR ( " %s: please use the 'embedding' tool for embedding calculations \n " , __func__ ) ;
LOG_ERR ( " ************ \n \n " ) ;
2023-10-02 07:42:02 +00:00
return 0 ;
}
if ( params . n_ctx ! = 0 & & params . n_ctx < 8 ) {
2024-09-15 17:46:12 +00:00
LOG_WRN ( " %s: minimum context size is 8, using minimum size. \n " , __func__ ) ;
2023-10-02 07:42:02 +00:00
params . n_ctx = 8 ;
}
2024-09-15 17:46:12 +00:00
2023-10-02 07:42:02 +00:00
if ( ! params . interactive_first & & ( params . input_prefix . empty ( ) & & params . input_suffix . empty ( ) ) ) {
2024-09-15 17:46:12 +00:00
LOG_ERR ( " \n ************ \n " ) ;
LOG_ERR ( " %s: please use '--interactive_first' or specify '--in_prefix' and/or '--in_suffix' \n " , __func__ ) ;
LOG_ERR ( " ************ \n \n " ) ;
2023-10-02 07:42:02 +00:00
return 0 ;
}
if ( params . rope_freq_base ! = 0.0 ) {
2024-09-15 17:46:12 +00:00
LOG_WRN ( " %s: changing RoPE frequency base to %g. \n " , __func__ , params . rope_freq_base ) ;
2023-10-02 07:42:02 +00:00
}
if ( params . rope_freq_scale ! = 0.0 ) {
2024-09-15 17:46:12 +00:00
LOG_WRN ( " %s: scaling RoPE frequency by %g. \n " , __func__ , params . rope_freq_scale ) ;
2023-10-02 07:42:02 +00:00
}
2024-09-15 17:46:12 +00:00
LOG_INF ( " %s: llama backend init \n " , __func__ ) ;
2024-02-16 09:31:07 +00:00
llama_backend_init ( ) ;
llama_numa_init ( params . numa ) ;
2023-10-02 07:42:02 +00:00
2024-09-07 12:16:19 +00:00
llama_model * model = nullptr ;
llama_context * ctx = nullptr ;
gpt_sampler * smpl = nullptr ;
2024-06-04 18:23:39 +00:00
2023-10-02 07:42:02 +00:00
g_model = & model ;
g_ctx = & ctx ;
2024-09-07 12:16:19 +00:00
g_smpl = & smpl ;
2023-10-02 07:42:02 +00:00
// load the model and apply lora adapter, if any
2024-09-15 17:46:12 +00:00
LOG_INF ( " %s: load the model and apply lora adapter, if any \n " , __func__ ) ;
2024-08-05 16:14:10 +00:00
llama_init_result llama_init = llama_init_from_gpt_params ( params ) ;
model = llama_init . model ;
ctx = llama_init . context ;
2023-10-02 07:42:02 +00:00
if ( model = = NULL ) {
2024-09-15 17:46:12 +00:00
LOG_ERR ( " %s: unable to load model \n " , __func__ ) ;
2023-10-02 07:42:02 +00:00
return 1 ;
}
const int n_ctx_train = llama_n_ctx_train ( model ) ;
const int n_ctx = llama_n_ctx ( ctx ) ;
2024-09-15 17:46:12 +00:00
LOG_DBG ( " n_ctx: %d \n " , n_ctx ) ;
2023-10-02 07:42:02 +00:00
if ( n_ctx > n_ctx_train ) {
2024-09-15 17:46:12 +00:00
LOG_WRN ( " %s: model was trained on only %d context tokens (%d specified) \n " , __func__ , n_ctx_train , n_ctx ) ;
2023-10-02 07:42:02 +00:00
}
// print system information
{
2024-09-15 17:46:12 +00:00
LOG_INF ( " \n " ) ;
LOG_INF ( " %s \n " , gpt_params_get_system_info ( params ) . c_str ( ) ) ;
2023-10-02 07:42:02 +00:00
}
2024-08-15 07:23:23 +00:00
const bool add_bos = llama_add_bos_token ( model ) ;
GGML_ASSERT ( ! llama_add_eos_token ( model ) ) ;
2023-10-02 07:42:02 +00:00
std : : vector < llama_token > embd_inp ;
2024-06-28 10:53:43 +00:00
std : : vector < llama_token > embd_end ;
2023-10-10 07:31:21 +00:00
std : : vector < llama_token > inp_pfx = : : llama_tokenize ( ctx , params . input_prefix , false ) ;
std : : vector < llama_token > inp_sfx = : : llama_tokenize ( ctx , params . input_suffix , false ) ;
2024-07-08 06:34:35 +00:00
GGML_ASSERT ( llama_token_prefix ( model ) > = 0 ) ;
GGML_ASSERT ( llama_token_suffix ( model ) > = 0 ) ;
2023-10-23 19:40:03 +00:00
inp_pfx . insert ( inp_pfx . begin ( ) , llama_token_prefix ( model ) ) ;
2024-06-28 10:53:43 +00:00
inp_sfx . insert ( inp_sfx . begin ( ) , llama_token_suffix ( model ) ) ;
2024-07-08 06:34:35 +00:00
2024-06-28 10:53:43 +00:00
embd_inp = params . spm_infill ? inp_sfx : inp_pfx ;
embd_end = params . spm_infill ? inp_pfx : inp_sfx ;
2023-10-10 07:31:21 +00:00
if ( add_bos ) {
2024-06-28 10:53:43 +00:00
embd_inp . insert ( embd_inp . begin ( ) , llama_token_bos ( model ) ) ;
2023-10-10 07:31:21 +00:00
}
2024-06-28 10:53:43 +00:00
embd_inp . insert ( embd_inp . end ( ) , embd_end . begin ( ) , embd_end . end ( ) ) ;
2024-06-18 12:19:45 +00:00
const llama_token middle_token = llama_token_middle ( model ) ;
if ( middle_token > = 0 ) {
embd_inp . push_back ( middle_token ) ;
}
2023-10-02 07:42:02 +00:00
2024-09-15 17:46:12 +00:00
LOG_DBG ( " add_bos: %d \n " , add_bos ) ;
LOG_DBG ( " prefix: \" %s \" \n " , params . input_prefix . c_str ( ) ) ;
LOG_DBG ( " suffix: \" %s \" \n " , params . input_suffix . c_str ( ) ) ;
LOG_DBG ( " tokens: %s \n " , string_from ( ctx , embd_inp ) . c_str ( ) ) ;
2023-10-02 07:42:02 +00:00
// Should not run without any tokens
if ( embd_inp . empty ( ) ) {
2023-10-23 19:40:03 +00:00
embd_inp . push_back ( llama_token_bos ( model ) ) ;
2024-09-15 17:46:12 +00:00
LOG_WRN ( " embd_inp was considered empty and bos was added: %s \n " , string_from ( ctx , embd_inp ) . c_str ( ) ) ;
2023-10-02 07:42:02 +00:00
}
if ( ( int ) embd_inp . size ( ) > n_ctx - 4 ) {
2024-09-15 17:46:12 +00:00
LOG_ERR ( " %s: prompt is too long (%d tokens, max %d) \n " , __func__ , ( int ) embd_inp . size ( ) , n_ctx - 4 ) ;
2023-10-02 07:42:02 +00:00
return 1 ;
}
// number of tokens to keep when resetting context
if ( params . n_keep < 0 | | params . n_keep > ( int ) embd_inp . size ( ) ) {
params . n_keep = ( int ) embd_inp . size ( ) ;
}
2024-09-15 17:46:12 +00:00
LOG_INF ( " inp_pfx: %s \n " , string_from ( ctx , inp_pfx ) . c_str ( ) ) ;
LOG_INF ( " inp_sfx: %s \n " , string_from ( ctx , inp_sfx ) . c_str ( ) ) ;
2023-10-02 07:42:02 +00:00
// enable interactive mode if interactive start is specified
if ( params . interactive_first ) {
params . interactive = true ;
}
if ( params . verbose_prompt ) {
2024-09-15 17:46:12 +00:00
LOG_INF ( " \n " ) ;
LOG_INF ( " %s: prompt: '%s' \n " , __func__ , params . prompt . c_str ( ) ) ;
LOG_INF ( " %s: number of tokens in prompt = %zu \n " , __func__ , embd_inp . size ( ) ) ;
2023-10-02 07:42:02 +00:00
for ( int i = 0 ; i < ( int ) embd_inp . size ( ) ; i + + ) {
2024-09-15 17:46:12 +00:00
LOG_INF ( " %6d -> '%s' \n " , embd_inp [ i ] , llama_token_to_piece ( ctx , embd_inp [ i ] ) . c_str ( ) ) ;
2023-10-02 07:42:02 +00:00
}
if ( params . n_keep > 0 ) {
2024-09-15 17:46:12 +00:00
LOG_INF ( " %s: static prompt based on n_keep: ' " , __func__ ) ;
2023-10-02 07:42:02 +00:00
for ( int i = 0 ; i < params . n_keep ; i + + ) {
2024-09-15 17:46:12 +00:00
LOG ( " %s " , llama_token_to_piece ( ctx , embd_inp [ i ] ) . c_str ( ) ) ;
2023-10-02 07:42:02 +00:00
}
2024-09-15 17:46:12 +00:00
LOG ( " ' \n " ) ;
2023-10-02 07:42:02 +00:00
}
2024-09-15 17:46:12 +00:00
LOG_INF ( " \n " ) ;
2023-10-02 07:42:02 +00:00
}
if ( params . interactive ) {
# if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
struct sigaction sigint_action ;
sigint_action . sa_handler = sigint_handler ;
sigemptyset ( & sigint_action . sa_mask ) ;
sigint_action . sa_flags = 0 ;
sigaction ( SIGINT , & sigint_action , NULL ) ;
# elif defined (_WIN32)
auto console_ctrl_handler = + [ ] ( DWORD ctrl_type ) - > BOOL {
return ( ctrl_type = = CTRL_C_EVENT ) ? ( sigint_handler ( SIGINT ) , true ) : false ;
} ;
SetConsoleCtrlHandler ( reinterpret_cast < PHANDLER_ROUTINE > ( console_ctrl_handler ) , true ) ;
# endif
2024-09-15 17:46:12 +00:00
LOG_INF ( " %s: interactive mode on. \n " , __func__ ) ;
2023-10-02 07:42:02 +00:00
if ( params . input_prefix_bos ) {
2024-09-15 17:46:12 +00:00
LOG_INF ( " Input prefix with BOS \n " ) ;
2023-10-02 07:42:02 +00:00
}
if ( ! params . input_prefix . empty ( ) ) {
2024-09-15 17:46:12 +00:00
LOG_INF ( " Input prefix: '%s' \n " , params . input_prefix . c_str ( ) ) ;
2023-10-02 07:42:02 +00:00
}
if ( ! params . input_suffix . empty ( ) ) {
2024-09-15 17:46:12 +00:00
LOG_INF ( " Input suffix: '%s' \n " , params . input_suffix . c_str ( ) ) ;
2023-10-02 07:42:02 +00:00
}
}
2024-09-10 16:04:25 +00:00
smpl = gpt_sampler_init ( model , sparams ) ;
2024-09-15 17:46:12 +00:00
LOG_INF ( " sampler seed: %u \n " , gpt_sampler_get_seed ( smpl ) ) ;
LOG_INF ( " sampler params: \n %s \n " , sparams . print ( ) . c_str ( ) ) ;
LOG_INF ( " sampler chain: %s \n " , gpt_sampler_print ( smpl ) . c_str ( ) ) ;
LOG_INF ( " generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d \n " , n_ctx , params . n_batch , params . n_predict , params . n_keep ) ;
2023-10-02 07:42:02 +00:00
2024-09-15 17:46:12 +00:00
LOG ( " \n " ) ;
LOG ( " \n ##### Infill mode ##### \n \n " ) ;
2023-10-02 07:42:02 +00:00
if ( params . interactive ) {
const char * control_message ;
if ( params . multiline_input ) {
2024-03-02 17:27:26 +00:00
control_message = " - To return control to LLaMA, end your input with ' \\ '. \n "
2023-10-02 07:42:02 +00:00
" - To return control without starting a new line, end your input with '/'. \n " ;
} else {
2024-03-02 17:27:26 +00:00
control_message = " - Press Return to return control to LLaMA. \n "
2023-10-02 07:42:02 +00:00
" - To return control without starting a new line, end your input with '/'. \n "
" - If you want to submit another line, end your input with ' \\ '. \n " ;
}
2024-09-15 17:46:12 +00:00
LOG ( " == Running in interactive mode. == \n " ) ;
2023-10-02 07:42:02 +00:00
# if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
2024-09-15 17:46:12 +00:00
LOG ( " - Press Ctrl+C to interject at any time. \n " ) ;
2023-10-02 07:42:02 +00:00
# endif
2024-09-15 17:46:12 +00:00
LOG ( " %s \n " , control_message ) ;
2023-10-02 07:42:02 +00:00
is_interacting = params . interactive_first ;
}
2024-06-04 18:23:39 +00:00
bool input_echo = true ;
2023-10-02 07:42:02 +00:00
2024-06-04 18:23:39 +00:00
int n_past = 0 ;
int n_remain = params . n_predict ;
int n_consumed = 0 ;
2023-10-02 07:42:02 +00:00
std : : vector < int > input_tokens ; g_input_tokens = & input_tokens ;
std : : vector < int > output_tokens ; g_output_tokens = & output_tokens ;
std : : ostringstream output_ss ; g_output_ss = & output_ss ;
// the first thing we will do is to output the prompt, so set color accordingly
console : : set_display ( console : : prompt ) ;
std : : vector < llama_token > embd ;
while ( n_remain ! = 0 | | params . interactive ) {
// predict
if ( ! embd . empty ( ) ) {
// Note: n_ctx - 4 here is to match the logic for commandline prompt handling via
// --prompt or --file which uses the same value.
int max_embd_size = n_ctx - 4 ;
// Ensure the input doesn't exceed the context size by truncating embd if necessary.
if ( ( int ) embd . size ( ) > max_embd_size ) {
const int skipped_tokens = ( int ) embd . size ( ) - max_embd_size ;
embd . resize ( max_embd_size ) ;
console : : set_display ( console : : error ) ;
2024-09-15 17:46:12 +00:00
LOG_WRN ( " <<input too long: skipped %d token%s>> " , skipped_tokens , skipped_tokens ! = 1 ? " s " : " " ) ;
2023-10-02 07:42:02 +00:00
console : : set_display ( console : : reset ) ;
}
// infinite text generation via context swapping
// if we run out of context:
// - take the n_keep first tokens from the original prompt (via n_past)
// - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches
2024-06-04 18:23:39 +00:00
if ( n_past + ( int ) embd . size ( ) > n_ctx ) {
2023-10-02 07:42:02 +00:00
if ( params . n_predict = = - 2 ) {
2024-09-15 17:46:12 +00:00
LOG_DBG ( " \n \n %s: context full and n_predict == -%d => stopping \n " , __func__ , params . n_predict ) ;
2023-10-02 07:42:02 +00:00
break ;
}
const int n_left = n_past - params . n_keep - 1 ;
const int n_discard = n_left / 2 ;
2024-09-15 17:46:12 +00:00
LOG_DBG ( " context full, swapping: n_past = %d, n_left = %d, n_ctx = %d, n_keep = %d, n_discard = %d \n " ,
2023-10-02 07:42:02 +00:00
n_past , n_left , n_ctx , params . n_keep , n_discard ) ;
2024-02-25 20:12:24 +00:00
llama_kv_cache_seq_rm ( ctx , 0 , params . n_keep + 1 , params . n_keep + n_discard + 1 ) ;
llama_kv_cache_seq_add ( ctx , 0 , params . n_keep + 1 + n_discard , n_past , - n_discard ) ;
2023-10-02 07:42:02 +00:00
n_past - = n_discard ;
2024-09-15 17:46:12 +00:00
LOG_DBG ( " after swap: n_past = %d \n " , n_past ) ;
2023-10-02 07:42:02 +00:00
2024-09-15 17:46:12 +00:00
LOG_DBG ( " embd: %s \n " , string_from ( ctx , embd ) . c_str ( ) ) ;
2023-10-02 07:42:02 +00:00
}
// evaluate tokens in batches
// embd is typically prepared beforehand to fit within a batch, but not always
for ( int i = 0 ; i < ( int ) embd . size ( ) ; i + = params . n_batch ) {
int n_eval = ( int ) embd . size ( ) - i ;
if ( n_eval > params . n_batch ) {
n_eval = params . n_batch ;
}
2024-09-15 17:46:12 +00:00
LOG_DBG ( " eval: %s \n " , string_from ( ctx , embd ) . c_str ( ) ) ;
2023-10-02 07:42:02 +00:00
if ( llama_decode ( ctx , llama_batch_get_one ( & embd [ i ] , n_eval , n_past , 0 ) ) ) {
2024-09-15 17:46:12 +00:00
LOG_ERR ( " %s : failed to eval \n " , __func__ ) ;
2023-10-02 07:42:02 +00:00
return 1 ;
}
n_past + = n_eval ;
2024-09-15 17:46:12 +00:00
LOG_DBG ( " n_past = %d \n " , n_past ) ;
2023-10-02 07:42:02 +00:00
}
}
embd . clear ( ) ;
if ( ( int ) embd_inp . size ( ) < = n_consumed & & ! is_interacting ) {
2024-09-07 12:16:19 +00:00
const llama_token id = gpt_sampler_sample ( smpl , ctx , - 1 ) ;
2023-10-02 07:42:02 +00:00
2024-09-07 12:16:19 +00:00
gpt_sampler_accept ( smpl , id , true ) ;
2023-10-02 07:42:02 +00:00
2024-09-15 17:46:12 +00:00
// LOG_DBG("last: %s\n", string_from(ctx, smpl->prev.to_vector()).c_str());
2023-10-02 07:42:02 +00:00
embd . push_back ( id ) ;
// echo this to console
input_echo = true ;
// decrement remaining sampling budget
- - n_remain ;
2024-09-15 17:46:12 +00:00
LOG_DBG ( " n_remain: %d \n " , n_remain ) ;
2023-10-02 07:42:02 +00:00
} else {
// some user input remains from prompt or interaction, forward it to processing
2024-09-15 17:46:12 +00:00
LOG_DBG ( " embd_inp.size(): %d, n_consumed: %d \n " , ( int ) embd_inp . size ( ) , n_consumed ) ;
2023-10-02 07:42:02 +00:00
while ( ( int ) embd_inp . size ( ) > n_consumed ) {
embd . push_back ( embd_inp [ n_consumed ] ) ;
2023-10-20 18:07:23 +00:00
// push the prompt in the sampling context in order to apply repetition penalties later
// for the prompt, we don't apply grammar rules
2024-09-07 12:16:19 +00:00
gpt_sampler_accept ( smpl , embd_inp [ n_consumed ] , false ) ;
2023-10-20 18:07:23 +00:00
2023-10-02 07:42:02 +00:00
+ + n_consumed ;
if ( ( int ) embd . size ( ) > = params . n_batch ) {
break ;
}
}
}
// display text
if ( input_echo ) {
for ( auto id : embd ) {
const std : : string token_str = llama_token_to_piece ( ctx , id ) ;
2024-09-15 17:46:12 +00:00
LOG ( " %s " , token_str . c_str ( ) ) ;
2023-10-02 07:42:02 +00:00
if ( embd . size ( ) > 1 ) {
input_tokens . push_back ( id ) ;
} else {
output_tokens . push_back ( id ) ;
output_ss < < token_str ;
}
}
}
// reset color to default if we there is no pending user input
if ( input_echo & & ( int ) embd_inp . size ( ) = = n_consumed ) {
console : : set_display ( console : : reset ) ;
}
// if not currently processing queued inputs;
if ( ( int ) embd_inp . size ( ) < = n_consumed ) {
// deal with eot token in infill mode
2024-09-07 12:16:19 +00:00
if ( ( gpt_sampler_last ( smpl ) = = llama_token_eot ( model ) | | is_interacting ) & & params . interactive ) {
2024-04-21 11:50:41 +00:00
if ( is_interacting & & ! params . interactive_first ) {
2023-10-02 07:42:02 +00:00
// print an eot token
2024-09-15 17:46:12 +00:00
LOG ( " %s " , llama_token_to_piece ( ctx , llama_token_eot ( model ) ) . c_str ( ) ) ;
2023-10-02 07:42:02 +00:00
}
2024-09-15 17:46:12 +00:00
LOG ( " \n " ) ;
2023-10-02 07:42:02 +00:00
console : : set_display ( console : : user_input ) ;
std : : string buffer ;
std : : string line ;
bool another_line = true ;
// set a new prefix via stdin
do {
another_line = console : : readline ( line , params . multiline_input ) ;
buffer + = line ;
} while ( another_line ) ;
// check if we got an empty line, if so we use the old input
2023-10-20 18:07:23 +00:00
if ( ! buffer . empty ( ) & & ! ( buffer . length ( ) = = 1 & & buffer [ 0 ] = = ' \n ' ) ) {
2023-10-02 07:42:02 +00:00
params . input_prefix = buffer ;
}
buffer . clear ( ) ;
// set a new suffix via stdin
do {
another_line = console : : readline ( line , params . multiline_input ) ;
buffer + = line ;
} while ( another_line ) ;
// check if we got an empty line
2023-10-20 18:07:23 +00:00
if ( ! buffer . empty ( ) & & ! ( buffer . length ( ) = = 1 & & buffer [ 0 ] = = ' \n ' ) ) {
2023-10-02 07:42:02 +00:00
params . input_suffix = buffer ;
}
buffer . clear ( ) ;
// done taking input, reset color
console : : set_display ( console : : reset ) ;
2023-10-10 07:31:21 +00:00
if ( params . escape ) {
//process escape sequences, for the initial prompt this is done in common.cpp when we load the params, but for the interactive mode we need to do it here
2024-05-22 17:04:20 +00:00
string_process_escapes ( params . input_prefix ) ;
string_process_escapes ( params . input_suffix ) ;
2023-10-10 07:31:21 +00:00
}
2024-07-08 06:34:35 +00:00
2023-10-02 07:42:02 +00:00
// tokenize new prefix and suffix
2023-10-10 07:31:21 +00:00
std : : vector < llama_token > inp_pfx = : : llama_tokenize ( ctx , params . input_prefix , false ) ;
std : : vector < llama_token > inp_sfx = : : llama_tokenize ( ctx , params . input_suffix , false ) ;
2024-07-08 06:34:35 +00:00
2023-10-23 19:40:03 +00:00
inp_pfx . insert ( inp_pfx . begin ( ) , llama_token_prefix ( model ) ) ;
2024-06-28 10:53:43 +00:00
inp_sfx . insert ( inp_sfx . begin ( ) , llama_token_suffix ( model ) ) ;
2024-07-08 06:34:35 +00:00
2024-06-28 10:53:43 +00:00
embd_inp = params . spm_infill ? inp_sfx : inp_pfx ;
embd_end = params . spm_infill ? inp_pfx : inp_sfx ;
2023-10-10 07:31:21 +00:00
if ( add_bos ) {
2024-06-28 10:53:43 +00:00
embd_inp . insert ( embd_inp . begin ( ) , llama_token_bos ( model ) ) ;
2023-10-10 07:31:21 +00:00
}
2024-06-28 10:53:43 +00:00
embd_inp . insert ( embd_inp . end ( ) , embd_end . begin ( ) , embd_end . end ( ) ) ;
2024-06-18 12:19:45 +00:00
if ( middle_token > = 0 ) {
embd_inp . push_back ( middle_token ) ;
}
2023-10-02 07:42:02 +00:00
embd . clear ( ) ;
n_remain = params . n_predict ;
n_past = 0 ;
n_consumed = 0 ;
is_interacting = false ;
}
2024-04-21 11:50:41 +00:00
// deal with end of generation tokens in interactive mode
2024-09-07 12:16:19 +00:00
else if ( llama_token_is_eog ( model , gpt_sampler_last ( smpl ) ) ) {
2024-09-15 17:46:12 +00:00
LOG_DBG ( " found EOS token \n " ) ;
2023-10-02 07:42:02 +00:00
if ( params . interactive ) {
is_interacting = true ;
2024-09-15 17:46:12 +00:00
LOG ( " \n " ) ;
2023-10-02 07:42:02 +00:00
console : : set_display ( console : : user_input ) ;
}
}
if ( n_past > 0 & & is_interacting & & ! params . interactive ) {
2024-09-15 17:46:12 +00:00
LOG_DBG ( " waiting for user input \n " ) ;
2023-10-02 07:42:02 +00:00
if ( params . input_prefix_bos ) {
2024-09-15 17:46:12 +00:00
LOG_DBG ( " adding input prefix BOS token \n " ) ;
2023-10-23 19:40:03 +00:00
embd_inp . push_back ( llama_token_bos ( model ) ) ;
2023-10-02 07:42:02 +00:00
}
std : : string buffer ;
if ( ! params . input_prefix . empty ( ) ) {
2024-09-15 17:46:12 +00:00
LOG_DBG ( " appending input prefix: '%s' \n " , params . input_prefix . c_str ( ) ) ;
2023-10-02 07:42:02 +00:00
buffer + = params . input_prefix ;
2024-09-15 17:46:12 +00:00
LOG ( " %s " , buffer . c_str ( ) ) ;
2023-10-02 07:42:02 +00:00
}
std : : string line ;
bool another_line = true ;
do {
another_line = console : : readline ( line , params . multiline_input ) ;
buffer + = line ;
} while ( another_line ) ;
// done taking input, reset color
console : : set_display ( console : : reset ) ;
// Add tokens to embd only if the input buffer is non-empty
// Entering a empty line lets the user pass control back
if ( buffer . length ( ) > 1 ) {
// append input suffix if any
if ( ! params . input_suffix . empty ( ) ) {
2024-09-15 17:46:12 +00:00
LOG_DBG ( " appending input suffix: '%s' \n " , params . input_suffix . c_str ( ) ) ;
2023-10-02 07:42:02 +00:00
buffer + = params . input_suffix ;
2024-09-15 17:46:12 +00:00
LOG ( " %s " , params . input_suffix . c_str ( ) ) ;
2023-10-02 07:42:02 +00:00
}
2024-09-15 17:46:12 +00:00
LOG_DBG ( " buffer: '%s' \n " , buffer . c_str ( ) ) ;
2023-10-02 07:42:02 +00:00
const size_t original_size = embd_inp . size ( ) ;
const auto line_inp = : : llama_tokenize ( ctx , buffer , false ) ;
2024-09-15 17:46:12 +00:00
LOG_DBG ( " input tokens: %s \n " , string_from ( ctx , line_inp ) . c_str ( ) ) ;
2023-10-02 07:42:02 +00:00
embd_inp . insert ( embd_inp . end ( ) , line_inp . begin ( ) , line_inp . end ( ) ) ;
for ( size_t i = original_size ; i < embd_inp . size ( ) ; + + i ) {
const llama_token token = embd_inp [ i ] ;
output_tokens . push_back ( token ) ;
output_ss < < llama_token_to_piece ( ctx , token ) ;
}
n_remain - = line_inp . size ( ) ;
2024-09-15 17:46:12 +00:00
LOG_DBG ( " n_remain: %d \n " , n_remain ) ;
2023-10-02 07:42:02 +00:00
} else {
2024-09-15 17:46:12 +00:00
LOG_DBG ( " empty line, passing control back \n " ) ;
2023-10-02 07:42:02 +00:00
}
input_echo = false ; // do not echo this again
}
if ( n_past > 0 ) {
if ( is_interacting ) {
2024-09-07 12:16:19 +00:00
gpt_sampler_reset ( smpl ) ;
2023-10-02 07:42:02 +00:00
}
is_interacting = false ;
}
}
2024-04-21 11:50:41 +00:00
// end of generation
if ( ! embd . empty ( ) & & llama_token_is_eog ( model , embd . back ( ) ) & & ! params . interactive ) {
2023-10-02 07:42:02 +00:00
break ;
}
// In interactive mode, respect the maximum number of tokens and drop back to user input when reached.
// We skip this logic when n_predict == -1 (infinite) or -2 (stop at context size).
if ( params . interactive & & n_remain < = 0 & & params . n_predict > = 0 ) {
n_remain = params . n_predict ;
is_interacting = true ;
}
}
if ( ! params . interactive & & n_remain < = 0 ) {
2024-09-15 17:46:12 +00:00
LOG ( " %s " , llama_token_to_piece ( ctx , llama_token_eot ( model ) ) . c_str ( ) ) ;
2023-10-02 07:42:02 +00:00
}
2024-09-15 17:46:12 +00:00
LOG ( " \n " ) ;
2024-09-07 12:16:19 +00:00
gpt_perf_print ( ctx , smpl ) ;
2023-10-02 07:42:02 +00:00
write_logfile ( ctx , params , model , input_tokens , output_ss . str ( ) , output_tokens ) ;
llama_free ( ctx ) ;
llama_free_model ( model ) ;
2024-09-07 12:16:19 +00:00
gpt_sampler_free ( smpl ) ;
2023-10-02 07:42:02 +00:00
llama_backend_free ( ) ;
return 0 ;
}