mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 02:44:36 +00:00
server : fix passing prompt as tokens (#5955)
* server: fix passing prompt as tokens * Update examples/server/server.cpp --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
parent
8a3012a4ad
commit
0db32beaf0
@ -852,7 +852,16 @@ struct server_context {
|
|||||||
// infill
|
// infill
|
||||||
slot.params.input_prefix = json_value(data, "input_prefix", default_params.input_prefix);
|
slot.params.input_prefix = json_value(data, "input_prefix", default_params.input_prefix);
|
||||||
slot.params.input_suffix = json_value(data, "input_suffix", default_params.input_suffix);
|
slot.params.input_suffix = json_value(data, "input_suffix", default_params.input_suffix);
|
||||||
slot.prompt = json_value(data, "prompt", std::string(""));
|
|
||||||
|
// get prompt
|
||||||
|
{
|
||||||
|
const auto & prompt = data.find("prompt");
|
||||||
|
if (prompt == data.end()) {
|
||||||
|
slot.prompt = "";
|
||||||
|
} else {
|
||||||
|
slot.prompt = *prompt;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// penalize user-provided tokens
|
// penalize user-provided tokens
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user