From 0e712a5acbbdd1593e5aeb86d4f6b896a11b438c Mon Sep 17 00:00:00 2001 From: Jhen-Jie Hong Date: Wed, 13 Nov 2024 19:15:23 +0800 Subject: [PATCH] server : fix incorrect res in validate_model_chat_template (#10272) * server : fix validate_model_chat_template * server : fix chat res --- examples/server/server.cpp | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index a6d3a1c95..cac55007a 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -655,11 +655,16 @@ struct server_context { } bool validate_model_chat_template() const { - llama_chat_message chat[] = {{"user", "test"}}; - - const int res = llama_chat_apply_template(model, nullptr, chat, 1, true, nullptr, 0); - - return res > 0; + std::vector model_template(2048, 0); // longest known template is about 1200 bytes + std::string template_key = "tokenizer.chat_template"; + int32_t res = llama_model_meta_val_str(model, template_key.c_str(), model_template.data(), model_template.size()); + if (res >= 0) { + llama_chat_message chat[] = {{"user", "test"}}; + std::string tmpl = std::string(model_template.data(), model_template.size()); + int32_t chat_res = llama_chat_apply_template(model, tmpl.c_str(), chat, 1, true, nullptr, 0); + return chat_res > 0; + } + return false; } void init() {