mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-12 03:31:46 +00:00
tokenize : escape the prompt (#11058)
* tokenize : escape the prompt * tokenize : update help
This commit is contained in:
parent
ae2f606bb5
commit
3e6e7a6bc2
@ -31,6 +31,7 @@ static void print_usage_information(const char * argv0) {
|
|||||||
printf(" -p PROMPT, --prompt PROMPT read prompt from the argument.\n");
|
printf(" -p PROMPT, --prompt PROMPT read prompt from the argument.\n");
|
||||||
printf(" --stdin read prompt from standard input.\n");
|
printf(" --stdin read prompt from standard input.\n");
|
||||||
printf(" --no-bos do not ever add a BOS token to the prompt, even if normally the model uses a BOS token.\n");
|
printf(" --no-bos do not ever add a BOS token to the prompt, even if normally the model uses a BOS token.\n");
|
||||||
|
printf(" --no-escape do not escape input (such as \\n, \\t, etc.).\n");
|
||||||
printf(" --no-parse-special do not parse control tokens.\n");
|
printf(" --no-parse-special do not parse control tokens.\n");
|
||||||
printf(" --log-disable disable logs. Makes stderr quiet when loading the model.\n");
|
printf(" --log-disable disable logs. Makes stderr quiet when loading the model.\n");
|
||||||
printf(" --show-count print the total number of tokens.\n");
|
printf(" --show-count print the total number of tokens.\n");
|
||||||
@ -198,6 +199,7 @@ int main(int raw_argc, char ** raw_argv) {
|
|||||||
// variables where to put any arguments we see.
|
// variables where to put any arguments we see.
|
||||||
bool printing_ids = false;
|
bool printing_ids = false;
|
||||||
bool no_bos = false;
|
bool no_bos = false;
|
||||||
|
bool no_escape = false;
|
||||||
bool no_parse_special = false;
|
bool no_parse_special = false;
|
||||||
bool disable_logging = false;
|
bool disable_logging = false;
|
||||||
bool show_token_count = false;
|
bool show_token_count = false;
|
||||||
@ -233,6 +235,9 @@ int main(int raw_argc, char ** raw_argv) {
|
|||||||
else if (arg == "--no-bos") {
|
else if (arg == "--no-bos") {
|
||||||
no_bos = true;
|
no_bos = true;
|
||||||
}
|
}
|
||||||
|
else if (arg == "--no-escape") {
|
||||||
|
no_escape = true;
|
||||||
|
}
|
||||||
else if (arg == "--no-parse-special") {
|
else if (arg == "--no-parse-special") {
|
||||||
no_parse_special = true;
|
no_parse_special = true;
|
||||||
}
|
}
|
||||||
@ -363,6 +368,11 @@ int main(int raw_argc, char ** raw_argv) {
|
|||||||
const bool model_wants_add_bos = llama_add_bos_token(model);
|
const bool model_wants_add_bos = llama_add_bos_token(model);
|
||||||
const bool add_bos = model_wants_add_bos && !no_bos;
|
const bool add_bos = model_wants_add_bos && !no_bos;
|
||||||
const bool parse_special = !no_parse_special;
|
const bool parse_special = !no_parse_special;
|
||||||
|
const bool escape = !no_escape;
|
||||||
|
|
||||||
|
if (escape) {
|
||||||
|
string_process_escapes(prompt);
|
||||||
|
}
|
||||||
|
|
||||||
std::vector<llama_token> tokens;
|
std::vector<llama_token> tokens;
|
||||||
tokens = common_tokenize(model, prompt, add_bos, parse_special);
|
tokens = common_tokenize(model, prompt, add_bos, parse_special);
|
||||||
|
Loading…
Reference in New Issue
Block a user