mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
server : minor sync
This commit is contained in:
parent
9740824ba5
commit
325d1793f7
@ -748,6 +748,7 @@ struct llama_server_context
|
||||
const stop_type type, llama_client_slot &slot)
|
||||
{
|
||||
size_t stop_pos = std::string::npos;
|
||||
|
||||
for (const std::string &word : slot.params.antiprompt)
|
||||
{
|
||||
size_t pos;
|
||||
@ -774,6 +775,7 @@ struct llama_server_context
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return stop_pos;
|
||||
}
|
||||
|
||||
@ -798,8 +800,7 @@ struct llama_server_context
|
||||
pos = std::min(slot.sent_count, slot.generated_text.size());
|
||||
} else {
|
||||
is_stop_full = false;
|
||||
stop_pos = find_stopping_strings(str_test, token_str.size(),
|
||||
STOP_PARTIAL, slot);
|
||||
stop_pos = find_stopping_strings(str_test, token_str.size(), STOP_PARTIAL, slot);
|
||||
}
|
||||
|
||||
// check if there is any token to predict
|
||||
@ -2286,8 +2287,7 @@ int main(int argc, char **argv)
|
||||
|
||||
const json data = format_final_response(llama, slot, completion_text, probs);
|
||||
slot_print_timings(slot);
|
||||
res.set_content(data.dump(-1, ' ', false, json::error_handler_t::replace),
|
||||
"application/json");
|
||||
res.set_content(data.dump(-1, ' ', false, json::error_handler_t::replace), "application/json");
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -2342,9 +2342,11 @@ int main(int argc, char **argv)
|
||||
"data: " +
|
||||
data.dump(-1, ' ', false, json::error_handler_t::replace) +
|
||||
"\n\n";
|
||||
|
||||
LOG_VERBOSE("data stream", {
|
||||
{ "to_send", str }
|
||||
});
|
||||
|
||||
if (!sink.write(str.data(), str.size()))
|
||||
{
|
||||
slot->release();
|
||||
|
Loading…
Reference in New Issue
Block a user