From c36e81da62ebfe09a768201cc44fa8d712dd00ed Mon Sep 17 00:00:00 2001 From: yangli2 Date: Thu, 15 Jun 2023 11:05:53 -0700 Subject: [PATCH] examples : add chat-vicuna.sh (#1854) Co-authored-by: Yang Li --- examples/chat-vicuna.sh | 41 +++++++++++++++++++++++++++++++++++++++++ llama.h | 6 +++--- 2 files changed, 44 insertions(+), 3 deletions(-) create mode 100755 examples/chat-vicuna.sh diff --git a/examples/chat-vicuna.sh b/examples/chat-vicuna.sh new file mode 100755 index 000000000..8c7b7bef4 --- /dev/null +++ b/examples/chat-vicuna.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +set -e + +cd "$(dirname "$0")/.." || exit + +MODEL="${MODEL:-./models/ggml-vic13b-uncensored-q5_0.bin}" +PROMPT_TEMPLATE=${PROMPT_TEMPLATE:-./prompts/chat.txt} +USER_NAME="### Human" +AI_NAME="### Assistant" + +# Adjust to the number of CPU cores you want to use. +N_THREAD="${N_THREAD:-8}" +# Number of tokens to predict (made it larger than default because we want a long interaction) +N_PREDICTS="${N_PREDICTS:-2048}" + +# Note: you can also override the generation options by specifying them on the command line: +# For example, override the context size by doing: ./chatLLaMa --ctx_size 1024 +GEN_OPTIONS="${GEN_OPTIONS:---ctx_size 2048 --temp 0.7 --top_k 40 --top_p 0.5 --repeat_last_n 256 --batch_size 1024 --repeat_penalty 1.17647}" + +DATE_TIME=$(date +%H:%M) +DATE_YEAR=$(date +%Y) + +PROMPT_FILE=$(mktemp -t llamacpp_prompt.XXXXXXX.txt) + +sed -e "s/\[\[USER_NAME\]\]/$USER_NAME/g" \ + -e "s/\[\[AI_NAME\]\]/$AI_NAME/g" \ + -e "s/\[\[DATE_TIME\]\]/$DATE_TIME/g" \ + -e "s/\[\[DATE_YEAR\]\]/$DATE_YEAR/g" \ + $PROMPT_TEMPLATE > $PROMPT_FILE + +# shellcheck disable=SC2086 # Intended splitting of GEN_OPTIONS +./bin/main $GEN_OPTIONS \ + --model "$MODEL" \ + --threads "$N_THREAD" \ + --n_predict "$N_PREDICTS" \ + --color --interactive \ + --file ${PROMPT_FILE} \ + --reverse-prompt "### Human:" \ + --in-prefix ' ' \ + "$@" diff --git a/llama.h b/llama.h index 64292265c..1241ba6c0 100644 --- a/llama.h +++ b/llama.h @@ -244,9 +244,9 @@ extern "C" { LLAMA_API const char * llama_token_to_str(const struct llama_context * ctx, llama_token token); // Special tokens - LLAMA_API llama_token llama_token_bos(); - LLAMA_API llama_token llama_token_eos(); - LLAMA_API llama_token llama_token_nl(); + LLAMA_API llama_token llama_token_bos(); // beginning-of-sentence + LLAMA_API llama_token llama_token_eos(); // end-of-sentence + LLAMA_API llama_token llama_token_nl(); // next-line // Sampling functions