mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-24 02:14:35 +00:00
main: replace --no-special with --special (#7534)
This also flips the default behavior of the output to not include control token by default.
This commit is contained in:
parent
32a28217f4
commit
d298382ad9
@ -904,8 +904,8 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
||||
params.interactive_specials = true;
|
||||
return true;
|
||||
}
|
||||
if (arg == "--no-special") {
|
||||
params.no_special = true;
|
||||
if (arg == "--special") {
|
||||
params.special = true;
|
||||
return true;
|
||||
}
|
||||
if (arg == "--embedding") {
|
||||
@ -1366,9 +1366,9 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
|
||||
printf(" -h, --help show this help message and exit\n");
|
||||
printf(" --version show version and build info\n");
|
||||
printf(" -i, --interactive run in interactive mode\n");
|
||||
printf(" --special special tokens output enabled\n");
|
||||
printf(" --interactive-specials allow special tokens in user text, in interactive mode\n");
|
||||
printf(" --interactive-first run in interactive mode and wait for input right away\n");
|
||||
printf(" --no-special control tokens output disabled\n");
|
||||
printf(" -cnv, --conversation run in conversation mode (does not print special tokens and suffix/prefix)\n");
|
||||
printf(" -ins, --instruct run in instruction mode (use with Alpaca models)\n");
|
||||
printf(" -cml, --chatml run in chatml mode (use with ChatML-compatible models)\n");
|
||||
|
@ -146,7 +146,7 @@ struct gpt_params {
|
||||
bool use_color = false; // use color to distinguish generations and inputs
|
||||
bool interactive = false; // interactive mode
|
||||
bool interactive_specials = false; // whether to allow special tokens from user, during interactive mode
|
||||
bool no_special = false; // disable control token output
|
||||
bool special = false; // enable special token output
|
||||
bool conversation = false; // conversation mode (does not print special tokens and suffix/prefix)
|
||||
bool chatml = false; // chatml mode (used for models trained on chatml syntax)
|
||||
bool prompt_cache_all = false; // save user input and generations to prompt cache
|
||||
|
@ -740,16 +740,10 @@ int main(int argc, char ** argv) {
|
||||
// display text
|
||||
if (input_echo && display) {
|
||||
for (auto id : embd) {
|
||||
const std::string token_str = llama_token_to_piece(ctx, id);
|
||||
const std::string token_str = llama_token_to_piece(ctx, id, params.special);
|
||||
|
||||
// Console/Stream Output
|
||||
if (!llama_token_is_control(llama_get_model(ctx), id)) {
|
||||
// Stream Output Token To Standard Output
|
||||
fprintf(stdout, "%s", token_str.c_str());
|
||||
} else if (!params.no_special && !params.conversation) {
|
||||
// Stream Control Token To Standard Output Stream
|
||||
fprintf(stdout, "%s", token_str.c_str());
|
||||
}
|
||||
fprintf(stdout, "%s", token_str.c_str());
|
||||
|
||||
// Record Displayed Tokens To Log
|
||||
// Note: Generated tokens are created one by one hence this check
|
||||
|
Loading…
Reference in New Issue
Block a user