mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-14 06:49:54 +00:00
llama : fix llama_chat_format_single
for mistral (#8657)
* fix `llama_chat_format_single` for mistral * fix typo * use printf
This commit is contained in:
parent
79167d9e49
commit
96952e7181
@ -2723,7 +2723,7 @@ std::string llama_chat_format_single(const struct llama_model * model,
|
|||||||
const llama_chat_msg & new_msg,
|
const llama_chat_msg & new_msg,
|
||||||
bool add_ass) {
|
bool add_ass) {
|
||||||
std::ostringstream ss;
|
std::ostringstream ss;
|
||||||
auto fmt_past_msg = llama_chat_apply_template(model, tmpl, past_msg, false);
|
auto fmt_past_msg = past_msg.empty() ? "" : llama_chat_apply_template(model, tmpl, past_msg, false);
|
||||||
std::vector<llama_chat_msg> chat_new(past_msg);
|
std::vector<llama_chat_msg> chat_new(past_msg);
|
||||||
// if the past_msg ends with a newline, we must preserve it in the formatted version
|
// if the past_msg ends with a newline, we must preserve it in the formatted version
|
||||||
if (add_ass && !fmt_past_msg.empty() && fmt_past_msg.back() == '\n') {
|
if (add_ass && !fmt_past_msg.empty() && fmt_past_msg.back() == '\n') {
|
||||||
|
@ -124,6 +124,7 @@ static std::string chat_add_and_format(struct llama_model * model, std::vector<l
|
|||||||
auto formatted = llama_chat_format_single(
|
auto formatted = llama_chat_format_single(
|
||||||
model, g_params->chat_template, chat_msgs, new_msg, role == "user");
|
model, g_params->chat_template, chat_msgs, new_msg, role == "user");
|
||||||
chat_msgs.push_back({role, content});
|
chat_msgs.push_back({role, content});
|
||||||
|
LOG("formatted: %s\n", formatted.c_str());
|
||||||
return formatted;
|
return formatted;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#include <iostream>
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
@ -133,13 +132,31 @@ int main(void) {
|
|||||||
);
|
);
|
||||||
formatted_chat.resize(res);
|
formatted_chat.resize(res);
|
||||||
std::string output(formatted_chat.data(), formatted_chat.size());
|
std::string output(formatted_chat.data(), formatted_chat.size());
|
||||||
std::cout << output << "\n-------------------------\n";
|
printf("%s\n", output.c_str());
|
||||||
|
printf("-------------------------\n");
|
||||||
assert(output == expected);
|
assert(output == expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
// test llama_chat_format_single
|
|
||||||
std::cout << "\n\n=== llama_chat_format_single ===\n\n";
|
// test llama_chat_format_single for system message
|
||||||
|
printf("\n\n=== llama_chat_format_single (system message) ===\n\n");
|
||||||
std::vector<llama_chat_msg> chat2;
|
std::vector<llama_chat_msg> chat2;
|
||||||
|
llama_chat_msg sys_msg{"system", "You are a helpful assistant"};
|
||||||
|
|
||||||
|
auto fmt_sys = [&](std::string tmpl) {
|
||||||
|
auto output = llama_chat_format_single(nullptr, tmpl, chat2, sys_msg, false);
|
||||||
|
printf("fmt_sys(%s) : %s\n", tmpl.c_str(), output.c_str());
|
||||||
|
printf("-------------------------\n", output.c_str());
|
||||||
|
return output;
|
||||||
|
};
|
||||||
|
assert(fmt_sys("chatml") == "<|im_start|>system\nYou are a helpful assistant<|im_end|>\n");
|
||||||
|
assert(fmt_sys("llama2") == "[INST] You are a helpful assistant\n");
|
||||||
|
assert(fmt_sys("gemma") == ""); // for gemma, system message is merged with user message
|
||||||
|
assert(fmt_sys("llama3") == "<|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant<|eot_id|>");
|
||||||
|
|
||||||
|
|
||||||
|
// test llama_chat_format_single for user message
|
||||||
|
printf("\n\n=== llama_chat_format_single (user message) ===\n\n");
|
||||||
chat2.push_back({"system", "You are a helpful assistant"});
|
chat2.push_back({"system", "You are a helpful assistant"});
|
||||||
chat2.push_back({"user", "Hello"});
|
chat2.push_back({"user", "Hello"});
|
||||||
chat2.push_back({"assistant", "I am assistant"});
|
chat2.push_back({"assistant", "I am assistant"});
|
||||||
@ -147,7 +164,8 @@ int main(void) {
|
|||||||
|
|
||||||
auto fmt_single = [&](std::string tmpl) {
|
auto fmt_single = [&](std::string tmpl) {
|
||||||
auto output = llama_chat_format_single(nullptr, tmpl, chat2, new_msg, true);
|
auto output = llama_chat_format_single(nullptr, tmpl, chat2, new_msg, true);
|
||||||
std::cout << "fmt_single(" << tmpl << ")\n" << output << "\n-------------------------\n";
|
printf("fmt_single(%s) : %s\n", tmpl.c_str(), output.c_str());
|
||||||
|
printf("-------------------------\n", output.c_str());
|
||||||
return output;
|
return output;
|
||||||
};
|
};
|
||||||
assert(fmt_single("chatml") == "\n<|im_start|>user\nHow are you<|im_end|>\n<|im_start|>assistant\n");
|
assert(fmt_single("chatml") == "\n<|im_start|>user\nHow are you<|im_end|>\n<|im_start|>assistant\n");
|
||||||
|
Loading…
Reference in New Issue
Block a user