mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-11 21:39:52 +00:00
f4ab2a4147
* merged the changes from deepseeker models to main branch * Moved regex patterns to unicode.cpp and updated unicode.h * Moved header files * Resolved issues * added and refactored unicode_regex_split and related functions * Updated/merged the deepseek coder pr * Refactored code * Adding unicode regex mappings * Adding unicode regex function * Added needed functionality, testing remains * Fixed issues * Fixed issue with gpt2 regex custom preprocessor * unicode : fix? unicode_wstring_to_utf8 * lint : fix whitespaces * tests : add tokenizer tests for numbers * unicode : remove redundant headers * tests : remove and rename tokenizer test scripts * tests : add sample usage * gguf-py : reader prints warnings on duplicate keys * llama : towards llama3 tokenization support (wip) * unicode : shot in the dark to fix tests on Windows * unicode : first try custom implementations * convert : add "tokenizer.ggml.pre" GGUF KV (wip) * llama : use new pre-tokenizer type * convert : fix pre-tokenizer type writing * lint : fix * make : add test-tokenizer-0-llama-v3 * wip * models : add llama v3 vocab file * llama : adapt punctuation regex + add llama 3 regex * minor * unicode : set bomb * unicode : set bomb * unicode : always use std::wregex * unicode : support \p{N}, \p{L} and \p{P} natively * unicode : try fix windows * unicode : category support via std::regex * unicode : clean-up * unicode : simplify * convert : add convert-hf-to-gguf-update.py ggml-ci * lint : update * convert : add falcon ggml-ci * unicode : normalize signatures * lint : fix * lint : fix * convert : remove unused functions * convert : add comments * convert : exercise contractions ggml-ci * lint : fix * cmake : refactor test targets * tests : refactor vocab tests ggml-ci * tests : add more vocabs and tests ggml-ci * unicode : cleanup * scripts : ignore new update script in check-requirements.sh * models : add phi-3, mpt, gpt-2, starcoder * tests : disable obsolete ggml-ci * tests : use faster bpe test ggml-ci * llama : more prominent warning for old BPE models * tests : disable test-tokenizer-1-bpe due to slowness ggml-ci --------- Co-authored-by: Jaggzh <jaggz.h@gmail.com> Co-authored-by: Kazim Abrar Mahi <kazimabrarmahi135@gmail.com>
272 lines
10 KiB
C++
272 lines
10 KiB
C++
#include "llama.h"
|
||
#include "common.h"
|
||
#include "console.h"
|
||
|
||
#include <cstdio>
|
||
#include <string>
|
||
#include <map>
|
||
#include <vector>
|
||
#include <fstream>
|
||
|
||
//static const std::map<std::string, std::vector<llama_token>> & k_tests() {
|
||
// static std::map<std::string, std::vector<llama_token>> _k_tests = {
|
||
// { "" , { }, },
|
||
// { " " , { 220, }, },
|
||
// { " " , { 256, }, },
|
||
// { " " , { 262, }, },
|
||
// { "\t" , { 197, }, },
|
||
// { "\n" , { 198, }, },
|
||
// { "\n\n" , { 271, }, },
|
||
// { "\n\n\n" , { 1432, }, },
|
||
// { "\t\n" , { 1602, }, },
|
||
// { "Hello world" , { 9906, 1917, }, },
|
||
// { " Hello world" , { 22691, 1917, }, },
|
||
// { "Hello World" , { 9906, 4435, }, },
|
||
// { " Hello World" , { 22691, 4435, }, },
|
||
// { " Hello World!" , { 22691, 4435, 0, }, },
|
||
// { "Hello, world!" , { 9906, 11, 1917, 0, }, },
|
||
// { " Hello, world!" , { 22691, 11, 1917, 0, }, },
|
||
// { " this is 🦙.cpp" , { 420, 374, 11410, 99, 247, 13, 11055, }, },
|
||
// { "w048 7tuijk dsdfhu" , { 86, 23904, 220, 22, 83, 2005, 42908, 11729, 3013, 17156, }, },
|
||
// { "нещо на Български" , { 79862, 102118, 13373, 64571, 34694, 3114, 112203, 80112, }, },
|
||
// { "កាន់តែពិសេសអាចខលចេញ" , { 21549, 222, 98629, 241, 45358, 233, 21549, 237, 45358, 224, 21549, 244, 21549, 115, 21549, 253, 45358, 223, 21549, 253, 21549, 95, 98629, 227, 21549, 223, 21549, 249, 21549, 227, 45358, 223, 21549, 231, }, },
|
||
// { "🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)", { 9468, 248, 222, 320, 8416, 8, 27623, 114, 102470, 9468, 234, 104, 31643, 320, 36773, 100166, 98634, 8, 26602, 227, 320, 3323, 43465, 430, 706, 1202, 1866, 4037, 8, }, },
|
||
// { "Hello" , { 9906, }, },
|
||
// { " Hello" , { 22691, }, },
|
||
// { " Hello" , { 220, 22691, }, },
|
||
// { " Hello" , { 256, 22691, }, },
|
||
// { " Hello" , { 262, 22691, }, },
|
||
// { " Hello\n Hello" , { 262, 22691, 198, 262, 22691, }, },
|
||
// { " (" , { 320, }, },
|
||
// { "\n =" , { 198, 284, }, },
|
||
// { "' era" , { 6, 11639, }, },
|
||
// { "Hello, y'all! How are you 😁 ?我想在apple工作1314151天~", { 9906, 11, 379, 65948, 0, 2650, 527, 499, 27623, 223, 949, 37046, 101067, 19000, 23182, 102301, 9263, 18136, 16, 36827, 21909, }, },
|
||
// { "3" , { 18, }, },
|
||
// { "33" , { 1644, }, },
|
||
// { "333" , { 8765, }, },
|
||
// { "3333" , { 8765, 18, }, },
|
||
// { "33333" , { 8765, 1644, }, },
|
||
// { "333333" , { 8765, 8765, }, },
|
||
// { "3333333" , { 8765, 8765, 18, }, },
|
||
// { "33333333" , { 8765, 8765, 1644, }, },
|
||
// { "333333333" , { 8765, 8765, 8765, }, },
|
||
// };
|
||
//
|
||
// return _k_tests;
|
||
//}
|
||
|
||
static std::map<std::string, std::vector<llama_token>> read_tests(const std::string & fname_inp, const std::string & fname_out) {
|
||
std::map<std::string, std::vector<llama_token>> tests;
|
||
|
||
std::ifstream ifs_inp(fname_inp);
|
||
if (!ifs_inp) {
|
||
fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_inp.c_str());
|
||
return tests;
|
||
}
|
||
|
||
std::string sraw((std::istreambuf_iterator<char>(ifs_inp)), std::istreambuf_iterator<char>());
|
||
|
||
std::ifstream ifs_out(fname_out);
|
||
if (!ifs_out) {
|
||
fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_out.c_str());
|
||
return tests;
|
||
}
|
||
|
||
std::vector<std::string> sout;
|
||
for (std::string line; std::getline(ifs_out, line);) {
|
||
sout.push_back(line);
|
||
}
|
||
|
||
const std::string sep = "\n__ggml_vocab_test__\n";
|
||
|
||
std::vector<std::string> sinp;
|
||
|
||
size_t pos = 0;
|
||
while (pos < sraw.size()) {
|
||
const size_t next = sraw.find(sep, pos);
|
||
if (next == std::string::npos) {
|
||
sinp.push_back(sraw.substr(pos));
|
||
break;
|
||
}
|
||
sinp.push_back(sraw.substr(pos, next - pos));
|
||
pos = next + sep.size();
|
||
}
|
||
|
||
if (sinp.size() != sout.size()) {
|
||
fprintf(stderr, "%s : error: input and output files have different number of tests\n", __func__);
|
||
return tests;
|
||
}
|
||
|
||
for (size_t i = 0; i < sinp.size(); ++i) {
|
||
const std::string & s = sinp[i];
|
||
const std::string & o = string_strip(sout[i]);
|
||
|
||
std::vector<llama_token> toks;
|
||
|
||
size_t pos = 0;
|
||
while (pos < o.size()) {
|
||
size_t next = o.find(' ', pos);
|
||
if (next == std::string::npos) {
|
||
next = o.size();
|
||
}
|
||
const std::string stok = o.substr(pos, next - pos);
|
||
toks.push_back(std::stoi(stok));
|
||
pos = next + 1;
|
||
}
|
||
|
||
tests[s] = toks;
|
||
}
|
||
|
||
return tests;
|
||
}
|
||
|
||
int main(int argc, char **argv) {
|
||
if (argc < 2) {
|
||
fprintf(stderr, "Usage: %s vocab-file [text-file]\n", argv[0]);
|
||
return 1;
|
||
}
|
||
|
||
const std::string fname = argv[1];
|
||
|
||
const std::string fname_inp = fname + ".inp";
|
||
const std::string fname_out = fname + ".out";
|
||
|
||
std::string fname_text;
|
||
if (argc > 2) {
|
||
fname_text = argv[2];
|
||
}
|
||
|
||
fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
|
||
|
||
llama_model * model;
|
||
llama_context * ctx;
|
||
|
||
llama_backend_init();
|
||
|
||
// load the vocab
|
||
{
|
||
auto mparams = llama_model_default_params();
|
||
|
||
mparams.vocab_only = true;
|
||
|
||
model = llama_load_model_from_file(fname.c_str(), mparams);
|
||
|
||
if (model == NULL) {
|
||
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
|
||
return 1;
|
||
}
|
||
|
||
auto cparams = llama_context_default_params();
|
||
|
||
ctx = llama_new_context_with_model(model, cparams);
|
||
|
||
if (ctx == NULL) {
|
||
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
|
||
llama_free_model(model);
|
||
return 1;
|
||
}
|
||
}
|
||
|
||
#ifdef _WIN32
|
||
// We need this for unicode console support
|
||
console::init(false, false);
|
||
atexit([]() { console::cleanup(); });
|
||
#endif
|
||
|
||
bool success = true;
|
||
|
||
const auto k_tests = read_tests(fname_inp, fname_out);
|
||
|
||
if (k_tests.empty()) {
|
||
fprintf(stderr, "%s : error: no tests found\n", __func__);
|
||
return 1;
|
||
}
|
||
|
||
const bool add_special = false;
|
||
|
||
for (const auto & test_kv : k_tests) {
|
||
const std::vector<llama_token> res = llama_tokenize(ctx, test_kv.first, add_special);
|
||
|
||
printf("\n");
|
||
printf("src: '%s'\n", test_kv.first.c_str());
|
||
printf("res: '%s'\n", llama_detokenize_bpe(ctx, res).c_str());
|
||
printf("tok: ");
|
||
for (const auto & tok : res) {
|
||
printf("%d ", tok);
|
||
}
|
||
printf("\n");
|
||
|
||
bool correct = res.size() == test_kv.second.size();
|
||
for (int i = 0; i < (int) res.size() && correct; ++i) {
|
||
if (test_kv.second[i] != res[i]) {
|
||
correct = false;
|
||
}
|
||
}
|
||
|
||
if (!correct) {
|
||
fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str());
|
||
fprintf(stderr, "%s : detokenized to: '%s' instead of '%s'\n", __func__,
|
||
llama_detokenize_bpe(ctx, res).c_str(),
|
||
llama_detokenize_bpe(ctx, test_kv.second).c_str());
|
||
fprintf(stderr, "%s : expected tokens: ", __func__);
|
||
for (const auto & t : test_kv.second) {
|
||
fprintf(stderr, "%6d '%s', ", t, llama_token_to_piece(ctx, t).c_str());
|
||
}
|
||
fprintf(stderr, "\n");
|
||
fprintf(stderr, "%s : got tokens: ", __func__);
|
||
for (const auto & t : res) {
|
||
fprintf(stderr, "%6d '%s', ", t, llama_token_to_piece(ctx, t).c_str());
|
||
}
|
||
fprintf(stderr, "\n");
|
||
|
||
success = false;
|
||
}
|
||
}
|
||
|
||
if (!fname_text.empty()) {
|
||
fprintf(stderr, "%s : tokenizing: '%s'\n", __func__, fname_text.c_str());
|
||
|
||
std::string text;
|
||
{
|
||
std::ifstream ifs(fname_text);
|
||
if (!ifs) {
|
||
fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_text.c_str());
|
||
return 1;
|
||
}
|
||
text = std::string(std::istreambuf_iterator<char>(ifs), std::istreambuf_iterator<char>());
|
||
}
|
||
|
||
fprintf(stderr, "%s : text size: %zu\n", __func__, text.size());
|
||
|
||
const std::vector<llama_token> res = llama_tokenize(ctx, text, add_special);
|
||
|
||
fprintf(stderr, "%s : tokens: %zu\n", __func__, res.size());
|
||
|
||
{
|
||
const std::string fname_out = fname_text + ".tokcpp";
|
||
|
||
std::ofstream ofs(fname_out);
|
||
if (!ofs) {
|
||
fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_out.c_str());
|
||
return 1;
|
||
}
|
||
|
||
for (const auto & tok : res) {
|
||
ofs << tok << " '" << string_strip(llama_detokenize_bpe(ctx, std::vector<int>{tok})) << "'" << std::endl;
|
||
}
|
||
}
|
||
|
||
fprintf(stderr, "%s : tokens written to '%s'\n", __func__, (fname_text + ".tokcpp").c_str());
|
||
}
|
||
|
||
llama_free_model(model);
|
||
llama_free(ctx);
|
||
|
||
llama_backend_free();
|
||
|
||
printf("\n");
|
||
printf("Tests %s\n", success ? "passed" : "failed");
|
||
|
||
return success ? 0 : 3;
|
||
}
|