mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 20:04:35 +00:00
484d2f31ae
* bug-fix: snprintf prints NULL in place of the last character We need to give snprintf enough space to print the last character and the null character, thus we allocate one extra byte and then ignore it when converting to std::string. * add comment about extra null-term byte requirement
667 lines
24 KiB
C++
667 lines
24 KiB
C++
#pragma once
|
|
|
|
#include "common.h"
|
|
#include "log.h"
|
|
#include "llama.h"
|
|
|
|
#ifndef NDEBUG
|
|
// crash the server in debug mode, otherwise send an http 500 error
|
|
#define CPPHTTPLIB_NO_EXCEPTIONS 1
|
|
#endif
|
|
// increase max payload length to allow use of larger context size
|
|
#define CPPHTTPLIB_FORM_URL_ENCODED_PAYLOAD_MAX_LENGTH 1048576
|
|
#include "httplib.h"
|
|
|
|
// Change JSON_ASSERT from assert() to GGML_ASSERT:
|
|
#define JSON_ASSERT GGML_ASSERT
|
|
#include "json.hpp"
|
|
|
|
#include <random>
|
|
#include <sstream>
|
|
#include <string>
|
|
#include <vector>
|
|
#include <memory>
|
|
|
|
#define DEFAULT_OAICOMPAT_MODEL "gpt-3.5-turbo-0613"
|
|
|
|
using json = nlohmann::ordered_json;
|
|
|
|
#define SLT_INF(slot, fmt, ...) LOG_INF("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__)
|
|
#define SLT_WRN(slot, fmt, ...) LOG_WRN("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__)
|
|
#define SLT_ERR(slot, fmt, ...) LOG_ERR("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__)
|
|
#define SLT_DBG(slot, fmt, ...) LOG_DBG("slot %12.*s: id %2d | task %d | " fmt, 12, __func__, (slot).id, (slot).id_task, __VA_ARGS__)
|
|
|
|
#define SRV_INF(fmt, ...) LOG_INF("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
|
#define SRV_WRN(fmt, ...) LOG_WRN("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
|
#define SRV_ERR(fmt, ...) LOG_ERR("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
|
#define SRV_DBG(fmt, ...) LOG_DBG("srv %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
|
|
|
#define QUE_INF(fmt, ...) LOG_INF("que %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
|
#define QUE_WRN(fmt, ...) LOG_WRN("que %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
|
#define QUE_ERR(fmt, ...) LOG_ERR("que %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
|
#define QUE_DBG(fmt, ...) LOG_DBG("que %12.*s: " fmt, 12, __func__, __VA_ARGS__)
|
|
|
|
template <typename T>
|
|
static T json_value(const json & body, const std::string & key, const T & default_value) {
|
|
// Fallback null to default value
|
|
if (body.contains(key) && !body.at(key).is_null()) {
|
|
try {
|
|
return body.at(key);
|
|
} catch (NLOHMANN_JSON_NAMESPACE::detail::type_error const &) {
|
|
LOG_WRN("Wrong type supplied for parameter '%s'. Expected '%s', using default value\n", key.c_str(), json(default_value).type_name());
|
|
return default_value;
|
|
}
|
|
} else {
|
|
return default_value;
|
|
}
|
|
}
|
|
|
|
//
|
|
// tokenizer and input processing utils
|
|
//
|
|
|
|
static bool json_is_array_of_numbers(const json & data) {
|
|
if (data.is_array()) {
|
|
for (const auto & e : data) {
|
|
if (!e.is_number_integer()) {
|
|
return false;
|
|
}
|
|
}
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
// is array having BOTH numbers & strings?
|
|
static bool json_is_array_of_mixed_numbers_strings(const json & data) {
|
|
bool seen_string = false;
|
|
bool seen_number = false;
|
|
if (data.is_array()) {
|
|
for (const auto & e : data) {
|
|
seen_string |= e.is_string();
|
|
seen_number |= e.is_number_integer();
|
|
if (seen_number && seen_string) {
|
|
return true;
|
|
}
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
/**
|
|
* this handles 2 cases:
|
|
* - only string, example: "string"
|
|
* - mixed string and tokens, example: [12, 34, "string", 56, 78]
|
|
*/
|
|
static llama_tokens tokenize_mixed(const llama_context * ctx, const json & json_prompt, bool add_special, bool parse_special) {
|
|
// If `add_bos` is true, we only add BOS, when json_prompt is a string,
|
|
// or the first element of the json_prompt array is a string.
|
|
llama_tokens prompt_tokens;
|
|
|
|
if (json_prompt.is_array()) {
|
|
bool first = true;
|
|
for (const auto & p : json_prompt) {
|
|
if (p.is_string()) {
|
|
auto s = p.template get<std::string>();
|
|
|
|
llama_tokens p;
|
|
if (first) {
|
|
p = common_tokenize(ctx, s, add_special, parse_special);
|
|
first = false;
|
|
} else {
|
|
p = common_tokenize(ctx, s, false, parse_special);
|
|
}
|
|
|
|
prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
|
|
} else {
|
|
if (first) {
|
|
first = false;
|
|
}
|
|
|
|
prompt_tokens.push_back(p.template get<llama_token>());
|
|
}
|
|
}
|
|
} else {
|
|
auto s = json_prompt.template get<std::string>();
|
|
prompt_tokens = common_tokenize(ctx, s, add_special, parse_special);
|
|
}
|
|
|
|
return prompt_tokens;
|
|
}
|
|
|
|
/**
|
|
* break the input "prompt" object into multiple prompt if needed, then tokenize them
|
|
* this supports these cases:
|
|
* - "prompt": "string"
|
|
* - "prompt": [12, 34, 56]
|
|
* - "prompt": [12, 34, "string", 56, 78]
|
|
* and multiple prompts (multi-tasks):
|
|
* - "prompt": ["string1", "string2"]
|
|
* - "prompt": ["string1", [12, 34, 56]]
|
|
* - "prompt": [[12, 34, "string", 56, 78], [12, 34, 56]]
|
|
*/
|
|
static std::vector<llama_tokens> tokenize_input_prompts(llama_context * ctx, const json & json_prompt, bool add_special, bool parse_special) {
|
|
std::vector<llama_tokens> result;
|
|
if (json_prompt.is_string() || json_is_array_of_mixed_numbers_strings(json_prompt)) {
|
|
// string or mixed
|
|
result.push_back(tokenize_mixed(ctx, json_prompt, add_special, parse_special));
|
|
} else if (json_is_array_of_numbers(json_prompt)) {
|
|
// array of tokens
|
|
result.push_back(json_prompt.get<llama_tokens>());
|
|
} else if (json_prompt.is_array()) {
|
|
// array of prompts
|
|
result.reserve(json_prompt.size());
|
|
for (const auto & p : json_prompt) {
|
|
if (p.is_string() || json_is_array_of_mixed_numbers_strings(p)) {
|
|
result.push_back(tokenize_mixed(ctx, p, add_special, parse_special));
|
|
} else if (json_is_array_of_numbers(p)) {
|
|
// array of tokens
|
|
result.push_back(p.get<llama_tokens>());
|
|
} else {
|
|
throw std::runtime_error("element of \"prompt\" must be a string, an list of tokens, or a list of mixed strings & tokens");
|
|
}
|
|
}
|
|
} else {
|
|
throw std::runtime_error("\"prompt\" must be a string, an list of tokens, a list of mixed strings & tokens, or a list of prompts");
|
|
}
|
|
if (result.empty()) {
|
|
throw std::runtime_error("\"prompt\" must not be empty");
|
|
}
|
|
return result;
|
|
}
|
|
|
|
//
|
|
// template utils
|
|
//
|
|
|
|
// format rerank task: [BOS]query[EOS][SEP]doc[EOS]
|
|
static llama_tokens format_rerank(const struct llama_model * model, const llama_tokens & query, const llama_tokens & doc) {
|
|
llama_tokens result;
|
|
result.reserve(doc.size() + query.size() + 4);
|
|
result.push_back(llama_token_bos(model));
|
|
result.insert(result.end(), query.begin(), query.end());
|
|
result.push_back(llama_token_eos(model));
|
|
result.push_back(llama_token_sep(model));
|
|
result.insert(result.end(), doc.begin(), doc.end());
|
|
result.push_back(llama_token_eos(model));
|
|
return result;
|
|
}
|
|
|
|
// format infill task
|
|
static llama_tokens format_infill(
|
|
const llama_context * ctx,
|
|
const json & input_prefix,
|
|
const json & input_suffix,
|
|
const json & input_extra,
|
|
const int n_batch,
|
|
const int n_predict,
|
|
const int n_ctx,
|
|
const bool spm_infill,
|
|
const llama_tokens & tokens_prompt
|
|
) {
|
|
// TODO: optimize this block by reducing memory allocations and movement
|
|
|
|
// use FIM repo-level pattern:
|
|
// ref: https://arxiv.org/pdf/2409.12186
|
|
//
|
|
// [FIM_REP]myproject
|
|
// [FIM_SEP]filename0
|
|
// extra chunk 0
|
|
// [FIM_SEP]filename1
|
|
// extra chunk 1
|
|
// ...
|
|
// [FIM_SEP]filename
|
|
// [FIM_PRE]prefix[FIM_SUF]suffix[FIM_MID]prompt
|
|
//
|
|
llama_tokens extra_tokens;
|
|
extra_tokens.reserve(n_ctx);
|
|
|
|
auto model = llama_get_model(ctx);
|
|
auto tokens_prefix = tokenize_mixed(ctx, input_prefix, false, false);
|
|
auto tokens_suffix = tokenize_mixed(ctx, input_suffix, false, false);
|
|
|
|
if (llama_token_fim_rep(model) != LLAMA_TOKEN_NULL) {
|
|
// TODO: make project name an input
|
|
static const auto k_fim_repo = common_tokenize(ctx, "myproject\n", false, false);
|
|
|
|
extra_tokens.push_back(llama_token_fim_rep(model));
|
|
extra_tokens.insert(extra_tokens.end(), k_fim_repo.begin(), k_fim_repo.end());
|
|
}
|
|
for (const auto & chunk : input_extra) {
|
|
// { "text": string, "filename": string }
|
|
const std::string text = json_value(chunk, "text", std::string());
|
|
const std::string filename = json_value(chunk, "filename", std::string("tmp"));
|
|
|
|
if (llama_token_fim_sep(model) != LLAMA_TOKEN_NULL) {
|
|
const auto k_fim_file = common_tokenize(ctx, filename + "\n", false, false);
|
|
|
|
extra_tokens.insert(extra_tokens.end(), llama_token_fim_sep(model));
|
|
extra_tokens.insert(extra_tokens.end(), k_fim_file.begin(), k_fim_file.end());
|
|
} else {
|
|
// chunk separator in binary form to avoid confusing the AI
|
|
static const char k_chunk_prefix_str[] = {0x0a, 0x0a, 0x2d, 0x2d, 0x2d, 0x20, 0x73, 0x6e, 0x69, 0x70, 0x70, 0x65, 0x74, 0x20, 0x2d, 0x2d, 0x2d, 0x0a, 0x0a, 0x00};
|
|
static const auto k_chunk_prefix_tokens = common_tokenize(ctx, k_chunk_prefix_str, false, false);
|
|
|
|
extra_tokens.insert(extra_tokens.end(), k_chunk_prefix_tokens.begin(), k_chunk_prefix_tokens.end());
|
|
}
|
|
|
|
const auto chunk_tokens = common_tokenize(ctx, text, false, false);
|
|
extra_tokens.insert(extra_tokens.end(), chunk_tokens.begin(), chunk_tokens.end());
|
|
}
|
|
|
|
if (llama_token_fim_sep(model) != LLAMA_TOKEN_NULL) {
|
|
// TODO: current filename
|
|
static const auto k_fim_file = common_tokenize(ctx, "filename\n", false, false);
|
|
|
|
extra_tokens.insert(extra_tokens.end(), llama_token_fim_sep(model));
|
|
extra_tokens.insert(extra_tokens.end(), k_fim_file.begin(), k_fim_file.end());
|
|
}
|
|
|
|
// for now pick FIM context to fit in a batch (ratio prefix:suffix = 3:1, TODO: configurable?)
|
|
const int n_prefix_take = std::min<int>(tokens_prefix.size(), 3*(n_batch/4));
|
|
const int n_suffix_take = std::min<int>(tokens_suffix.size(), std::max<int>(0, (n_batch/4) - (2 + tokens_prompt.size())));
|
|
|
|
SRV_DBG("n_prefix_take = %d, n_suffix_take = %d, total = %d\n", n_prefix_take, n_suffix_take, (n_prefix_take + n_suffix_take));
|
|
|
|
// fill the rest of the context with extra chunks
|
|
const int n_extra_take = std::min<int>(std::max<int>(0, n_ctx - (n_batch) - 2*n_predict), extra_tokens.size());
|
|
|
|
tokens_prefix.erase(tokens_prefix.begin(), tokens_prefix.begin() + tokens_prefix.size() - n_prefix_take);
|
|
tokens_suffix.resize(n_suffix_take);
|
|
|
|
tokens_prefix.insert(tokens_prefix.begin(), llama_token_fim_pre(model));
|
|
tokens_prefix.insert(tokens_prefix.end(), tokens_prompt.begin(), tokens_prompt.end());
|
|
tokens_suffix.insert(tokens_suffix.begin(), llama_token_fim_suf(model));
|
|
|
|
auto embd_inp = spm_infill ? tokens_suffix : tokens_prefix;
|
|
auto embd_end = spm_infill ? tokens_prefix : tokens_suffix;
|
|
|
|
if (llama_add_bos_token(model)) {
|
|
embd_inp.insert(embd_inp.begin(), llama_token_bos(model));
|
|
}
|
|
|
|
SRV_DBG("extra: n_ctx = %d, n_extra_take = %d, n_extra = %d\n", n_ctx, n_extra_take, (int) extra_tokens.size());
|
|
|
|
// put the extra context before the FIM prefix
|
|
embd_inp.insert(embd_inp.begin(), extra_tokens.end() - n_extra_take, extra_tokens.end());
|
|
|
|
embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end());
|
|
embd_inp.push_back(llama_token_fim_mid(model));
|
|
|
|
return embd_inp;
|
|
}
|
|
|
|
// Format given chat. If tmpl is empty, we take the template from model metadata
|
|
inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector<json> & messages) {
|
|
std::vector<common_chat_msg> chat;
|
|
|
|
for (size_t i = 0; i < messages.size(); ++i) {
|
|
const auto & curr_msg = messages[i];
|
|
|
|
std::string role = json_value(curr_msg, "role", std::string(""));
|
|
|
|
std::string content;
|
|
if (curr_msg.contains("content")) {
|
|
if (curr_msg["content"].is_string()) {
|
|
content = curr_msg["content"].get<std::string>();
|
|
} else if (curr_msg["content"].is_array()) {
|
|
for (const auto & part : curr_msg["content"]) {
|
|
if (part.contains("text")) {
|
|
content += "\n" + part["text"].get<std::string>();
|
|
}
|
|
}
|
|
} else {
|
|
throw std::runtime_error("Invalid 'content' type (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
|
|
}
|
|
} else {
|
|
throw std::runtime_error("Missing 'content' (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
|
|
}
|
|
|
|
chat.push_back({role, content});
|
|
}
|
|
|
|
const auto formatted_chat = common_chat_apply_template(model, tmpl, chat, true);
|
|
LOG_DBG("formatted_chat: '%s'\n", formatted_chat.c_str());
|
|
|
|
return formatted_chat;
|
|
}
|
|
|
|
static std::string llama_get_chat_template(const struct llama_model * model) {
|
|
std::string template_key = "tokenizer.chat_template";
|
|
// call with NULL buffer to get the total size of the string
|
|
int32_t res = llama_model_meta_val_str(model, template_key.c_str(), NULL, 0);
|
|
if (res < 2) {
|
|
return "";
|
|
} else {
|
|
std::vector<char> model_template(res + 1, 0);
|
|
llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size());
|
|
return std::string(model_template.data(), model_template.size() - 1);
|
|
}
|
|
}
|
|
|
|
//
|
|
// base64 utils (TODO: move to common in the future)
|
|
//
|
|
|
|
static const std::string base64_chars =
|
|
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
|
"abcdefghijklmnopqrstuvwxyz"
|
|
"0123456789+/";
|
|
|
|
static inline bool is_base64(uint8_t c) {
|
|
return (isalnum(c) || (c == '+') || (c == '/'));
|
|
}
|
|
|
|
static inline std::vector<uint8_t> base64_decode(const std::string & encoded_string) {
|
|
int i = 0;
|
|
int j = 0;
|
|
int in_ = 0;
|
|
|
|
int in_len = encoded_string.size();
|
|
|
|
uint8_t char_array_4[4];
|
|
uint8_t char_array_3[3];
|
|
|
|
std::vector<uint8_t> ret;
|
|
|
|
while (in_len-- && (encoded_string[in_] != '=') && is_base64(encoded_string[in_])) {
|
|
char_array_4[i++] = encoded_string[in_]; in_++;
|
|
if (i == 4) {
|
|
for (i = 0; i < 4; i++) {
|
|
char_array_4[i] = base64_chars.find(char_array_4[i]);
|
|
}
|
|
|
|
char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4);
|
|
char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
|
|
char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
|
|
|
|
for (i = 0; (i < 3); i++) {
|
|
ret.push_back(char_array_3[i]);
|
|
}
|
|
|
|
i = 0;
|
|
}
|
|
}
|
|
|
|
if (i) {
|
|
for (j = i; j < 4; j++) {
|
|
char_array_4[j] = 0;
|
|
}
|
|
|
|
for (j = 0; j < 4; j++) {
|
|
char_array_4[j] = base64_chars.find(char_array_4[j]);
|
|
}
|
|
|
|
char_array_3[0] = ((char_array_4[0] ) << 2) + ((char_array_4[1] & 0x30) >> 4);
|
|
char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
|
|
char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3];
|
|
|
|
for (j = 0; j < i - 1; j++) {
|
|
ret.push_back(char_array_3[j]);
|
|
}
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
//
|
|
// random string / id
|
|
//
|
|
|
|
static std::string random_string() {
|
|
static const std::string str("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz");
|
|
|
|
std::random_device rd;
|
|
std::mt19937 generator(rd());
|
|
|
|
std::string result(32, ' ');
|
|
|
|
for (int i = 0; i < 32; ++i) {
|
|
result[i] = str[generator() % str.size()];
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
static std::string gen_chatcmplid() {
|
|
return "chatcmpl-" + random_string();
|
|
}
|
|
|
|
//
|
|
// other common utils
|
|
//
|
|
|
|
static bool ends_with(const std::string & str, const std::string & suffix) {
|
|
return str.size() >= suffix.size() && 0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
|
|
}
|
|
|
|
static size_t find_partial_stop_string(const std::string &stop, const std::string &text) {
|
|
if (!text.empty() && !stop.empty()) {
|
|
const char text_last_char = text.back();
|
|
for (int64_t char_index = stop.size() - 1; char_index >= 0; char_index--) {
|
|
if (stop[char_index] == text_last_char) {
|
|
const std::string current_partial = stop.substr(0, char_index + 1);
|
|
if (ends_with(text, current_partial)) {
|
|
return text.size() - char_index - 1;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
return std::string::npos;
|
|
}
|
|
|
|
// TODO: reuse llama_detokenize
|
|
template <class Iter>
|
|
static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
|
|
std::string ret;
|
|
for (; begin != end; ++begin) {
|
|
ret += common_token_to_piece(ctx, *begin);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
// format incomplete utf-8 multibyte character for output
|
|
static std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token) {
|
|
std::string out = token == -1 ? "" : common_token_to_piece(ctx, token);
|
|
|
|
// if the size is 1 and first bit is 1, meaning it's a partial character
|
|
// (size > 1 meaning it's already a known token)
|
|
if (out.size() == 1 && (out[0] & 0x80) == 0x80) {
|
|
std::stringstream ss;
|
|
ss << std::hex << (out[0] & 0xff);
|
|
std::string res(ss.str());
|
|
out = "byte: \\x" + res;
|
|
}
|
|
|
|
return out;
|
|
}
|
|
|
|
static bool server_sent_event(httplib::DataSink & sink, const char * event, const json & data) {
|
|
const std::string str =
|
|
std::string(event) + ": " +
|
|
data.dump(-1, ' ', false, json::error_handler_t::replace) +
|
|
"\n\n"; // required by RFC 8895 - A message is terminated by a blank line (two line terminators in a row).
|
|
|
|
LOG_DBG("data stream, to_send: %s", str.c_str());
|
|
|
|
return sink.write(str.c_str(), str.size());
|
|
}
|
|
|
|
//
|
|
// OAI utils
|
|
//
|
|
|
|
static json oaicompat_completion_params_parse(
|
|
const struct llama_model * model,
|
|
const json & body, /* openai api json semantics */
|
|
const std::string & chat_template) {
|
|
json llama_params;
|
|
|
|
// Apply chat template to the list of messages
|
|
llama_params["prompt"] = format_chat(model, chat_template, body.at("messages"));
|
|
|
|
// Handle "stop" field
|
|
if (body.contains("stop") && body.at("stop").is_string()) {
|
|
llama_params["stop"] = json::array({body.at("stop").get<std::string>()});
|
|
} else {
|
|
llama_params["stop"] = json_value(body, "stop", json::array());
|
|
}
|
|
|
|
// Handle "response_format" field
|
|
if (body.contains("response_format")) {
|
|
json response_format = json_value(body, "response_format", json::object());
|
|
std::string response_type = json_value(response_format, "type", std::string());
|
|
if (response_type == "json_object") {
|
|
llama_params["json_schema"] = json_value(response_format, "schema", json::object());
|
|
} else if (response_type == "json_schema") {
|
|
json json_schema = json_value(response_format, "json_schema", json::object());
|
|
llama_params["json_schema"] = json_value(json_schema, "schema", json::object());
|
|
} else if (!response_type.empty() && response_type != "text") {
|
|
throw std::runtime_error("response_format type must be one of \"text\" or \"json_object\", but got: " + response_type);
|
|
}
|
|
}
|
|
|
|
// Handle "n" field
|
|
int n_choices = json_value(body, "n", 1);
|
|
if (n_choices != 1) {
|
|
throw std::runtime_error("Only one completion choice is allowed");
|
|
}
|
|
|
|
// Handle "logprobs" field
|
|
// TODO: The response format of this option is not yet OAI-compatible, but seems like no one really using it; We may need to fix it in the future
|
|
if (json_value(body, "logprobs", false)) {
|
|
llama_params["n_probs"] = json_value(body, "top_logprobs", 20);
|
|
} else if (body.contains("top_logprobs") && !body.at("top_logprobs").is_null()) {
|
|
throw std::runtime_error("top_logprobs requires logprobs to be set to true");
|
|
}
|
|
|
|
// Params supported by OAI but unsupported by llama.cpp
|
|
static const std::vector<std::string> unsupported_params { "tools", "tool_choice" };
|
|
for (const auto & param : unsupported_params) {
|
|
if (body.contains(param)) {
|
|
throw std::runtime_error("Unsupported param: " + param);
|
|
}
|
|
}
|
|
|
|
// Copy remaining properties to llama_params
|
|
// This allows user to use llama.cpp-specific params like "mirostat", ... via OAI endpoint.
|
|
// See "launch_slot_with_task()" for a complete list of params supported by llama.cpp
|
|
for (const auto & item : body.items()) {
|
|
// Exception: if "n_predict" is present, we overwrite the value specified earlier by "max_tokens"
|
|
if (!llama_params.contains(item.key()) || item.key() == "n_predict") {
|
|
llama_params[item.key()] = item.value();
|
|
}
|
|
}
|
|
|
|
return llama_params;
|
|
}
|
|
|
|
static json format_embeddings_response_oaicompat(const json & request, const json & embeddings) {
|
|
json data = json::array();
|
|
int i = 0;
|
|
for (const auto & elem : embeddings) {
|
|
data.push_back(json{
|
|
{"embedding", json_value(elem, "embedding", json::array())},
|
|
{"index", i++},
|
|
{"object", "embedding"}
|
|
});
|
|
}
|
|
|
|
json res = json {
|
|
{"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
|
|
{"object", "list"},
|
|
{"usage", json { // TODO: fill
|
|
{"prompt_tokens", 0},
|
|
{"total_tokens", 0}
|
|
}},
|
|
{"data", data}
|
|
};
|
|
|
|
return res;
|
|
}
|
|
|
|
static json format_response_rerank(const json & request, const json & ranks) {
|
|
json data = json::array();
|
|
int i = 0;
|
|
for (const auto & rank : ranks) {
|
|
data.push_back(json{
|
|
{"index", i++},
|
|
{"relevance_score", json_value(rank, "score", 0.0)},
|
|
});
|
|
}
|
|
|
|
json res = json {
|
|
{"model", json_value(request, "model", std::string(DEFAULT_OAICOMPAT_MODEL))},
|
|
{"object", "list"},
|
|
{"usage", json { // TODO: fill
|
|
{"prompt_tokens", 0},
|
|
{"total_tokens", 0}
|
|
}},
|
|
{"results", data}
|
|
};
|
|
|
|
return res;
|
|
}
|
|
|
|
static bool is_valid_utf8(const std::string & str) {
|
|
const unsigned char* bytes = reinterpret_cast<const unsigned char*>(str.data());
|
|
const unsigned char* end = bytes + str.length();
|
|
|
|
while (bytes < end) {
|
|
if (*bytes <= 0x7F) {
|
|
// 1-byte sequence (0xxxxxxx)
|
|
bytes++;
|
|
} else if ((*bytes & 0xE0) == 0xC0) {
|
|
// 2-byte sequence (110xxxxx 10xxxxxx)
|
|
if (end - bytes < 2 || (bytes[1] & 0xC0) != 0x80)
|
|
return false;
|
|
bytes += 2;
|
|
} else if ((*bytes & 0xF0) == 0xE0) {
|
|
// 3-byte sequence (1110xxxx 10xxxxxx 10xxxxxx)
|
|
if (end - bytes < 3 || (bytes[1] & 0xC0) != 0x80 || (bytes[2] & 0xC0) != 0x80)
|
|
return false;
|
|
bytes += 3;
|
|
} else if ((*bytes & 0xF8) == 0xF0) {
|
|
// 4-byte sequence (11110xxx 10xxxxxx 10xxxxxx 10xxxxxx)
|
|
if (end - bytes < 4 || (bytes[1] & 0xC0) != 0x80 ||
|
|
(bytes[2] & 0xC0) != 0x80 || (bytes[3] & 0xC0) != 0x80)
|
|
return false;
|
|
bytes += 4;
|
|
} else {
|
|
// Invalid UTF-8 lead byte
|
|
return false;
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
static json format_tokenizer_response(const json & tokens) {
|
|
return json {
|
|
{"tokens", tokens}
|
|
};
|
|
}
|
|
|
|
static json format_detokenized_response(const std::string & content) {
|
|
return json {
|
|
{"content", content}
|
|
};
|
|
}
|
|
|
|
static json format_logit_bias(const std::vector<llama_logit_bias> & logit_bias) {
|
|
json data = json::array();
|
|
for (const auto & lb : logit_bias) {
|
|
data.push_back(json{
|
|
{"bias", lb.bias},
|
|
{"token", lb.token},
|
|
});
|
|
}
|
|
return data;
|
|
}
|
|
|
|
static std::string safe_json_to_str(json data) {
|
|
return data.dump(-1, ' ', false, json::error_handler_t::replace);
|
|
}
|