mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 19:04:35 +00:00
43248e5594
* merged the changes from deepseeker models to main branch * Moved regex patterns to unicode.cpp and updated unicode.h * Moved header files * Resolved issues * added and refactored unicode_regex_split and related functions * Updated/merged the deepseek coder pr * Refactored code * Adding unicode regex mappings * Adding unicode regex function * Added needed functionality, testing remains * Fixed issues * Fixed issue with gpt2 regex custom preprocessor * unicode : fix? unicode_wstring_to_utf8 * lint : fix whitespaces * tests : add tokenizer tests for numbers * unicode : remove redundant headers * tests : remove and rename tokenizer test scripts * tests : add sample usage * gguf-py : reader prints warnings on duplicate keys * llama : towards llama3 tokenization support (wip) * unicode : shot in the dark to fix tests on Windows * unicode : first try custom implementations * convert : add "tokenizer.ggml.pre" GGUF KV (wip) * llama : use new pre-tokenizer type * convert : fix pre-tokenizer type writing * lint : fix * make : add test-tokenizer-0-llama-v3 * wip * models : add llama v3 vocab file * llama : adapt punctuation regex + add llama 3 regex * minor * unicode : set bomb * unicode : set bomb * unicode : always use std::wregex * unicode : support \p{N}, \p{L} and \p{P} natively * unicode : try fix windows * unicode : category support via std::regex * unicode : clean-up * unicode : simplify * llama3 custom regex split * convert : add convert-hf-to-gguf-update.py ggml-ci * lint : update * convert : add falcon ggml-ci * unicode : normalize signatures * lint : fix * lint : fix * convert : remove unused functions * convert : add comments * convert : exercise contractions ggml-ci * Using char32_t for codepoints * lint : fix * already exists unicode_tolower() * Typing * Restore BOM * cmake : refactor test targets * tests : refactor vocab tests ggml-ci * tests : add more vocabs and tests ggml-ci * unicode : cleanup * scripts : ignore new update script in check-requirements.sh * Fix merge * models : add phi-3, mpt, gpt-2, starcoder * tests : disable obsolete ggml-ci * tests : use faster bpe test ggml-ci * llama : more prominent warning for old BPE models * tests : disable test-tokenizer-1-bpe due to slowness ggml-ci * Move unused variable value * GPT2 custom regex split * Add alternative regex for custom aplit llama3 Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Style * Add bruteforce random tests for token encoding * wip: fixing unicode codepoint ranges * Fix merge * Unicode tables: separator, lowercase, uppercase and whitespace * llama3 custom regex split: fix \s * Restore BOM * Style * wip: generate NDF table * Ignore special tokens for testing * Clean gen-unicode-data.py * Refactor random tokenizer test * lint : fix * tests : add fail test for llama-bpe --------- Co-authored-by: Jaggzh <jaggz.h@gmail.com> Co-authored-by: Kazim Abrar Mahi <kazimabrarmahi135@gmail.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> Co-authored-by: jaime-m-p <>
819 lines
30 KiB
C++
819 lines
30 KiB
C++
#include "unicode.h"
|
|
#include "unicode-data.h"
|
|
|
|
#include <cassert>
|
|
#include <cstddef>
|
|
#include <cstdint>
|
|
#include <map>
|
|
#include <regex>
|
|
#include <stdexcept>
|
|
#include <string>
|
|
#include <unordered_map>
|
|
#include <unordered_set>
|
|
#include <utility>
|
|
#include <vector>
|
|
#include <locale>
|
|
#include <codecvt>
|
|
|
|
static std::string unicode_cpts_to_utf8(const std::vector<uint32_t> & cps) {
|
|
std::string result;
|
|
for (size_t i = 0; i < cps.size(); ++i) {
|
|
result.append(unicode_cpt_to_utf8(cps[i]));
|
|
}
|
|
return result;
|
|
}
|
|
|
|
static uint32_t unicode_cpt_from_utf8(const std::string & utf8, size_t & offset) {
|
|
assert(offset < utf8.size());
|
|
if (!(utf8[offset + 0] & 0x80)) {
|
|
auto result = utf8[offset + 0];
|
|
offset += 1;
|
|
return result;
|
|
}
|
|
if (!(utf8[offset + 0] & 0x40)) {
|
|
throw std::invalid_argument("invalid character");
|
|
}
|
|
if (!(utf8[offset + 0] & 0x20)) {
|
|
if (offset + 1 >= utf8.size() || ! ((utf8[offset + 1] & 0xc0) == 0x80)) {
|
|
throw std::invalid_argument("invalid character");
|
|
}
|
|
auto result = ((utf8[offset + 0] & 0x1f) << 6) | (utf8[offset + 1] & 0x3f);
|
|
offset += 2;
|
|
return result;
|
|
}
|
|
if (!(utf8[offset + 0] & 0x10)) {
|
|
if (offset + 2 >= utf8.size() || ! ((utf8[offset + 1] & 0xc0) == 0x80) || ! ((utf8[offset + 2] & 0xc0) == 0x80)) {
|
|
throw std::invalid_argument("invalid character");
|
|
}
|
|
auto result = ((utf8[offset + 0] & 0x0f) << 12) | ((utf8[offset + 1] & 0x3f) << 6) | (utf8[offset + 2] & 0x3f);
|
|
offset += 3;
|
|
return result;
|
|
}
|
|
if (!(utf8[offset + 0] & 0x08)) {
|
|
if (offset + 3 >= utf8.size() || ! ((utf8[offset + 1] & 0xc0) == 0x80) || ! ((utf8[offset + 2] & 0xc0) == 0x80) || !((utf8[offset + 3] & 0xc0) == 0x80)) {
|
|
throw std::invalid_argument("invalid character");
|
|
}
|
|
auto result = ((utf8[offset + 0] & 0x07) << 18) | ((utf8[offset + 1] & 0x3f) << 12) | ((utf8[offset + 2] & 0x3f) << 6) | (utf8[offset + 3] & 0x3f);
|
|
offset += 4;
|
|
return result;
|
|
}
|
|
throw std::invalid_argument("failed to convert utf8 to codepoint");
|
|
}
|
|
|
|
//static std::vector<uint16_t> unicode_cpt_to_utf16(uint32_t cp) {
|
|
// std::vector<uint16_t> result;
|
|
// if (/* 0x0000 <= cp && */ cp <= 0xffff) {
|
|
// result.emplace_back(cp);
|
|
// return result;
|
|
// }
|
|
// if (0x10000 <= cp && cp <= 0x10ffff) {
|
|
// result.emplace_back(0xd800 | ((cp - 0x10000) >> 10));
|
|
// result.emplace_back(0xdc00 | ((cp - 0x10000) & 0x03ff));
|
|
// return result;
|
|
// }
|
|
// throw std::invalid_argument("failed to convert codepoint to utf16");
|
|
//}
|
|
|
|
//static std::vector<uint16_t> unicode_cpts_to_utf16(const std::vector<uint32_t> & cps) {
|
|
// std::vector<uint16_t> result;
|
|
// for (size_t i = 0; i < cps.size(); ++i) {
|
|
// auto temp = unicode_cpt_to_utf16(cps[i]);
|
|
// result.insert(result.end(), temp.begin(), temp.end());
|
|
// }
|
|
// return result;
|
|
//}
|
|
|
|
//static uint32_t unicode_cpt_from_utf16(const std::vector<uint16_t> & utf16, size_t & offset) {
|
|
// assert(offset < utf16.size());
|
|
// if (((utf16[0] >> 10) << 10) != 0xd800) {
|
|
// auto result = utf16[offset + 0];
|
|
// offset += 1;
|
|
// return result;
|
|
// }
|
|
//
|
|
// if (offset + 1 >= utf16.size() || !((utf16[1] & 0xdc00) == 0xdc00)) {
|
|
// throw std::invalid_argument("invalid character");
|
|
// }
|
|
//
|
|
// auto result = 0x10000 + (((utf16[0] & 0x03ff) << 10) | (utf16[1] & 0x03ff));
|
|
// offset += 2;
|
|
// return result;
|
|
//}
|
|
|
|
//static std::vector<uint32_t> unicode_cpts_from_utf16(const std::vector<uint16_t> & utf16) {
|
|
// std::vector<uint32_t> result;
|
|
// size_t offset = 0;
|
|
// while (offset < utf16.size()) {
|
|
// result.push_back(unicode_cpt_from_utf16(utf16, offset));
|
|
// }
|
|
// return result;
|
|
//}
|
|
|
|
static std::unordered_map<uint32_t, int> unicode_cpt_type_map() {
|
|
std::unordered_map<uint32_t, int> cpt_types;
|
|
for (auto p : unicode_ranges_number) {
|
|
for (auto i = p.first; i <= p.second; ++i) {
|
|
cpt_types[i] = CODEPOINT_TYPE_NUMBER;
|
|
}
|
|
}
|
|
for (auto p : unicode_ranges_letter) {
|
|
for (auto i = p.first; i <= p.second; ++i) {
|
|
cpt_types[i] = CODEPOINT_TYPE_LETTER;
|
|
}
|
|
}
|
|
for (auto p : unicode_ranges_separator) {
|
|
for (auto i = p.first; i <= p.second; ++i) {
|
|
cpt_types[i] = CODEPOINT_TYPE_SEPARATOR;
|
|
}
|
|
}
|
|
for (auto p : unicode_ranges_accent_mark) {
|
|
for (auto i = p.first; i <= p.second; ++i) {
|
|
cpt_types[i] = CODEPOINT_TYPE_ACCENT_MARK;
|
|
}
|
|
}
|
|
for (auto p : unicode_ranges_punctuation) {
|
|
for (auto i = p.first; i <= p.second; ++i) {
|
|
cpt_types[i] = CODEPOINT_TYPE_PUNCTUATION;
|
|
}
|
|
}
|
|
for (auto p : unicode_ranges_symbol) {
|
|
for (auto i = p.first; i <= p.second; ++i) {
|
|
cpt_types[i] = CODEPOINT_TYPE_SYMBOL;
|
|
}
|
|
}
|
|
for (auto p : unicode_ranges_control) {
|
|
for (auto i = p.first; i <= p.second; ++i) {
|
|
cpt_types[i] = CODEPOINT_TYPE_CONTROL;
|
|
}
|
|
}
|
|
return cpt_types;
|
|
}
|
|
|
|
static std::unordered_map<uint8_t, std::string> unicode_byte_to_utf8_map() {
|
|
std::unordered_map<uint8_t, std::string> map;
|
|
for (int ch = u'!'; ch <= u'~'; ++ch) {
|
|
assert(0 <= ch && ch < 256);
|
|
map[ch] = unicode_cpt_to_utf8(ch);
|
|
}
|
|
for (int ch = u'¡'; ch <= u'¬'; ++ch) {
|
|
assert(0 <= ch && ch < 256);
|
|
map[ch] = unicode_cpt_to_utf8(ch);
|
|
}
|
|
for (int ch = u'®'; ch <= u'ÿ'; ++ch) {
|
|
assert(0 <= ch && ch < 256);
|
|
map[ch] = unicode_cpt_to_utf8(ch);
|
|
}
|
|
auto n = 0;
|
|
for (int ch = 0; ch < 256; ++ch) {
|
|
if (map.find(ch) == map.end()) {
|
|
map[ch] = unicode_cpt_to_utf8(256 + n);
|
|
++n;
|
|
}
|
|
}
|
|
return map;
|
|
}
|
|
|
|
static std::unordered_map<std::string, uint8_t> unicode_utf8_to_byte_map() {
|
|
std::unordered_map<std::string, uint8_t> map;
|
|
for (int ch = u'!'; ch <= u'~'; ++ch) {
|
|
assert(0 <= ch && ch < 256);
|
|
map[unicode_cpt_to_utf8(ch)] = ch;
|
|
}
|
|
for (int ch = u'¡'; ch <= u'¬'; ++ch) {
|
|
assert(0 <= ch && ch < 256);
|
|
map[unicode_cpt_to_utf8(ch)] = ch;
|
|
}
|
|
for (int ch = u'®'; ch <= u'ÿ'; ++ch) {
|
|
assert(0 <= ch && ch < 256);
|
|
map[unicode_cpt_to_utf8(ch)] = ch;
|
|
}
|
|
auto n = 0;
|
|
for (int ch = 0; ch < 256; ++ch) {
|
|
if (map.find(unicode_cpt_to_utf8(ch)) == map.end()) {
|
|
map[unicode_cpt_to_utf8(256 + n)] = ch;
|
|
++n;
|
|
}
|
|
}
|
|
return map;
|
|
}
|
|
|
|
static inline std::wstring unicode_wstring_from_utf8(const std::string & s) {
|
|
std::wstring_convert<std::codecvt_utf8<wchar_t>> conv;
|
|
return conv.from_bytes(s);
|
|
}
|
|
|
|
static std::vector<std::string> unicode_byte_encoding_process(const std::vector<std::string> & bpe_words) {
|
|
std::vector<std::string> bpe_encoded_words;
|
|
for (const auto & word : bpe_words) {
|
|
std::string text_utf;
|
|
auto utf_word = unicode_cpts_from_utf8(word);
|
|
for (size_t i = 0; i < utf_word.size(); ++i) {
|
|
text_utf += unicode_cpt_to_utf8(utf_word[i]);
|
|
}
|
|
|
|
std::string encoded_token;
|
|
for (char & c : text_utf) {
|
|
encoded_token += unicode_byte_to_utf8(c);
|
|
}
|
|
bpe_encoded_words.emplace_back(encoded_token);
|
|
}
|
|
return bpe_encoded_words;
|
|
}
|
|
|
|
// GPT2 system regex: 's|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+
|
|
static std::vector<size_t> unicode_regex_split_custom_gpt2(const std::string & text, const std::vector<size_t> & offsets) {
|
|
std::vector<size_t> bpe_offsets; // store the offset of each word
|
|
bpe_offsets.reserve(offsets.size()); // Reserve memory for the approximate size
|
|
|
|
const auto cpts = unicode_cpts_from_utf8(text);
|
|
|
|
size_t start = 0;
|
|
for (auto offset : offsets) {
|
|
const size_t offset_ini = start;
|
|
const size_t offset_end = start + offset;
|
|
assert(offset_end <= cpts.size());
|
|
start = offset_end;
|
|
|
|
auto _get_cpt = [&] (const size_t pos) -> char32_t {
|
|
return (offset_ini <= pos && pos < offset_end) ? cpts[pos] : 0;
|
|
};
|
|
|
|
auto _get_cpt_type = [&] (const size_t pos) -> int {
|
|
return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_type(cpts[pos]) : CODEPOINT_TYPE_UNIDENTIFIED;
|
|
};
|
|
|
|
size_t _prev_end = offset_ini;
|
|
auto _add_token = [&] (const size_t end) -> size_t {
|
|
assert(_prev_end <= end && end <= offset_end);
|
|
size_t len = end - _prev_end;
|
|
if (len > 0) {
|
|
bpe_offsets.push_back(len);
|
|
}
|
|
_prev_end = end;
|
|
//if (len > 0) {
|
|
// std::string s = "";
|
|
// for(size_t p = end-len; p < end; p++)
|
|
// s += unicode_cpt_to_utf8(cpts[p]);
|
|
// printf(">>> '%s'\n", s.c_str());
|
|
//}
|
|
return len;
|
|
};
|
|
|
|
for (size_t pos = offset_ini; pos < offset_end; /*pos++*/ ) {
|
|
const char32_t cpt = _get_cpt(pos);
|
|
const int cpt_type = _get_cpt_type(pos);
|
|
|
|
// regex: 's|'t|'re|'ve|'m|'ll|'d
|
|
if (cpt == '\'' && pos+1 < offset_end) {
|
|
char32_t cpt_next = _get_cpt(pos+1);
|
|
if (cpt_next == 's' || cpt_next == 't' || cpt_next == 'm' || cpt_next == 'd') {
|
|
pos += _add_token(pos+2);
|
|
continue;
|
|
}
|
|
if (pos+2 < offset_end) {
|
|
char32_t cpt_next_next = _get_cpt(pos+2);
|
|
if ((cpt_next == 'r' && cpt_next_next == 'e') ||
|
|
(cpt_next == 'v' && cpt_next_next == 'e') ||
|
|
(cpt_next == 'l' && cpt_next_next == 'l')) {
|
|
pos += _add_token(pos+3);
|
|
continue;
|
|
}
|
|
}
|
|
}
|
|
|
|
char32_t cpt2 = (cpt == ' ' ? _get_cpt(pos+1) : cpt);
|
|
int cpt2_type = (cpt == ' ' ? _get_cpt_type(pos+1) : cpt_type);
|
|
// regex: <space>?\p{L}+
|
|
if (cpt2_type == CODEPOINT_TYPE_LETTER) {
|
|
pos += (cpt == ' ');
|
|
while (cpt2_type == CODEPOINT_TYPE_LETTER) {
|
|
cpt2_type = _get_cpt_type(++pos);
|
|
}
|
|
_add_token(pos);
|
|
continue;
|
|
}
|
|
// regex: <space>?\p{N}+
|
|
if (cpt2_type == CODEPOINT_TYPE_NUMBER) {
|
|
pos += (cpt == ' ');
|
|
while (cpt2_type == CODEPOINT_TYPE_NUMBER) {
|
|
cpt2_type = _get_cpt_type(++pos);
|
|
}
|
|
_add_token(pos);
|
|
continue;
|
|
}
|
|
// regex: <space>?[^\s\p{L}\p{N}]+
|
|
if (!unicode_cpt_is_whitespace(cpt2) && cpt2_type != CODEPOINT_TYPE_LETTER && cpt2_type != CODEPOINT_TYPE_NUMBER && cpt2_type != CODEPOINT_TYPE_UNIDENTIFIED) {
|
|
pos += (cpt == ' ');
|
|
while (!unicode_cpt_is_whitespace(cpt2) && cpt2_type != CODEPOINT_TYPE_LETTER && cpt2_type != CODEPOINT_TYPE_NUMBER && cpt2_type != CODEPOINT_TYPE_UNIDENTIFIED) {
|
|
cpt2_type = _get_cpt_type(++pos);
|
|
cpt2 = _get_cpt(pos);
|
|
}
|
|
_add_token(pos);
|
|
continue;
|
|
}
|
|
|
|
size_t num_whitespaces = 0;
|
|
while (unicode_cpt_is_whitespace(_get_cpt(pos+num_whitespaces))) {
|
|
num_whitespaces++;
|
|
}
|
|
|
|
// regex: \s+(?!\S)
|
|
if (num_whitespaces > 1 && _get_cpt(pos+num_whitespaces) != 0) {
|
|
pos += num_whitespaces - 1;
|
|
_add_token(pos);
|
|
continue;
|
|
}
|
|
|
|
// regex: \s+
|
|
if (num_whitespaces > 0) {
|
|
pos += num_whitespaces;
|
|
_add_token(pos);
|
|
continue;
|
|
}
|
|
|
|
// no matches
|
|
_add_token(++pos);
|
|
}
|
|
}
|
|
|
|
return bpe_offsets;
|
|
}
|
|
|
|
// LLAMA3 system regex: "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"
|
|
static std::vector<size_t> unicode_regex_split_custom_llama3(const std::string & text, const std::vector<size_t> & offsets) {
|
|
std::vector<size_t> bpe_offsets; // store the offset of each word
|
|
bpe_offsets.reserve(offsets.size()); // Reserve memory for the approximate size
|
|
|
|
const auto cpts = unicode_cpts_from_utf8(text);
|
|
|
|
size_t start = 0;
|
|
for (auto offset : offsets) {
|
|
const size_t offset_ini = start;
|
|
const size_t offset_end = start + offset;
|
|
assert(offset_end <= cpts.size());
|
|
start = offset_end;
|
|
|
|
auto _get_cpt = [&] (const size_t pos) -> char32_t {
|
|
return (offset_ini <= pos && pos < offset_end) ? cpts[pos] : 0;
|
|
};
|
|
|
|
auto _get_cpt_type = [&] (const size_t pos) -> int {
|
|
return (offset_ini <= pos && pos < offset_end) ? unicode_cpt_type(cpts[pos]) : CODEPOINT_TYPE_UNIDENTIFIED;
|
|
};
|
|
|
|
size_t _prev_end = offset_ini;
|
|
auto _add_token = [&] (const size_t end) -> size_t {
|
|
assert(_prev_end <= end && end <= offset_end);
|
|
size_t len = end - _prev_end;
|
|
if (len > 0) {
|
|
bpe_offsets.push_back(len);
|
|
}
|
|
_prev_end = end;
|
|
//if (len > 0) {
|
|
// std::string s = "";
|
|
// for(size_t p = end-len; p < end; p++)
|
|
// s += unicode_cpt_to_utf8(cpts[p]);
|
|
// printf(">>> '%s'\n", s.c_str());
|
|
//}
|
|
return len;
|
|
};
|
|
|
|
for (size_t pos = offset_ini; pos < offset_end; /*pos++*/ ) {
|
|
const char32_t cpt = _get_cpt(pos);
|
|
const int cpt_type = _get_cpt_type(pos);
|
|
|
|
// regex: (?i:'s|'t|'re|'ve|'m|'ll|'d) // case insensitive
|
|
if (cpt == '\'' && pos+1 < offset_end) {
|
|
char32_t cpt_next = unicode_tolower(_get_cpt(pos+1));
|
|
if (cpt_next == 's' || cpt_next == 't' || cpt_next == 'm' || cpt_next == 'd') {
|
|
pos += _add_token(pos+2);
|
|
continue;
|
|
}
|
|
if (pos+2 < offset_end) {
|
|
char32_t cpt_next_next = unicode_tolower(_get_cpt(pos+2));
|
|
if ((cpt_next == 'r' && cpt_next_next == 'e') ||
|
|
(cpt_next == 'v' && cpt_next_next == 'e') ||
|
|
(cpt_next == 'l' && cpt_next_next == 'l')) {
|
|
pos += _add_token(pos+3);
|
|
continue;
|
|
}
|
|
}
|
|
}
|
|
|
|
// regex: [^\r\n\p{L}\p{N}]?\p{L}+ //####FIXME: the first \p{L} is correct?
|
|
if (cpt != '\r' && cpt != '\n' && /*cpt_type != CODEPOINT_TYPE_LETTER &&*/ cpt_type != CODEPOINT_TYPE_NUMBER) {
|
|
if (cpt_type == CODEPOINT_TYPE_LETTER || _get_cpt_type(pos+1) == CODEPOINT_TYPE_LETTER) { // one or more letters
|
|
pos++;
|
|
while (_get_cpt_type(pos) == CODEPOINT_TYPE_LETTER) {
|
|
pos++;
|
|
}
|
|
_add_token(pos);
|
|
continue;
|
|
}
|
|
}
|
|
|
|
// regex: \p{N}{1,3}
|
|
if (cpt_type == CODEPOINT_TYPE_NUMBER) {
|
|
size_t ini = pos;
|
|
while (_get_cpt_type(pos) == CODEPOINT_TYPE_NUMBER) {
|
|
if (++pos - ini >= 3 ) {
|
|
_add_token(pos);
|
|
ini = pos;
|
|
}
|
|
}
|
|
_add_token(pos);
|
|
continue;
|
|
}
|
|
|
|
// regex: <space>?[^\s\p{L}\p{N}]+[\r\n]*
|
|
char32_t cpt2 = (cpt == ' ' ? _get_cpt(pos+1) : cpt);
|
|
int cpt2_type = (cpt == ' ' ? _get_cpt_type(pos+1) : cpt_type);
|
|
if (!unicode_cpt_is_whitespace(cpt2) && cpt2_type != CODEPOINT_TYPE_LETTER && cpt2_type != CODEPOINT_TYPE_NUMBER && cpt2_type != CODEPOINT_TYPE_UNIDENTIFIED) {
|
|
pos += (cpt == ' ');
|
|
while (!unicode_cpt_is_whitespace(cpt2) && cpt2_type != CODEPOINT_TYPE_LETTER && cpt2_type != CODEPOINT_TYPE_NUMBER && cpt2_type != CODEPOINT_TYPE_UNIDENTIFIED) {
|
|
cpt2_type = _get_cpt_type(++pos);
|
|
cpt2 = _get_cpt(pos);
|
|
}
|
|
while (cpt2 == '\r' || cpt2 == '\n') {
|
|
cpt2 = _get_cpt(++pos);
|
|
}
|
|
_add_token(pos);
|
|
continue;
|
|
}
|
|
|
|
size_t num_whitespaces = 0;
|
|
size_t last_end_r_or_n = 0;
|
|
while (unicode_cpt_is_whitespace(_get_cpt(pos+num_whitespaces))) {
|
|
char32_t cpt2 = _get_cpt(pos+num_whitespaces);
|
|
if (cpt2 == '\r' || cpt2 == '\n') {
|
|
last_end_r_or_n = pos + num_whitespaces + 1;
|
|
}
|
|
num_whitespaces++;
|
|
}
|
|
|
|
// regex: \s*[\r\n]+
|
|
if (last_end_r_or_n > 0) {
|
|
pos = last_end_r_or_n;
|
|
_add_token(pos);
|
|
continue;
|
|
}
|
|
|
|
// regex: \s+(?!\S)
|
|
if (num_whitespaces > 1 && _get_cpt(pos+num_whitespaces) != 0) {
|
|
pos += num_whitespaces - 1;
|
|
_add_token(pos);
|
|
continue;
|
|
}
|
|
|
|
// regex: \s+
|
|
if (num_whitespaces > 0) {
|
|
pos += num_whitespaces;
|
|
_add_token(pos);
|
|
continue;
|
|
}
|
|
|
|
// no matches
|
|
_add_token(++pos);
|
|
}
|
|
}
|
|
|
|
return bpe_offsets;
|
|
}
|
|
|
|
// use std::wregex to split the text
|
|
static std::vector<size_t> unicode_regex_split_stl(const std::wstring & wtext, const std::wstring & regex_expr, const std::vector<size_t> & offsets) {
|
|
std::wregex expr(regex_expr);
|
|
std::vector<size_t> bpe_offsets; // store the offset of each word
|
|
bpe_offsets.reserve(offsets.size()); // Reserve memory for the approximate size
|
|
size_t start = 0;
|
|
for (auto offset : offsets) {
|
|
std::wcregex_iterator it(wtext.data() + start, wtext.data() + start + offset, expr);
|
|
std::wcregex_iterator end;
|
|
|
|
int64_t start_idx = 0;
|
|
while (it != end) {
|
|
std::wcmatch match = *it;
|
|
if (match.position() > start_idx) {
|
|
bpe_offsets.emplace_back(match.position() - start_idx);
|
|
}
|
|
bpe_offsets.emplace_back(match.length());
|
|
start_idx = match.position() + match.length();
|
|
++it;
|
|
}
|
|
|
|
if (start_idx < (int64_t) offset) {
|
|
bpe_offsets.emplace_back(offset - start_idx);
|
|
}
|
|
start += offset;
|
|
}
|
|
|
|
return bpe_offsets;
|
|
}
|
|
|
|
// use std::regex to split the text
|
|
static std::vector<size_t> unicode_regex_split_stl(const std::string & text, const std::string & regex_expr, const std::vector<size_t> & offsets) {
|
|
std::regex expr(regex_expr);
|
|
std::vector<size_t> bpe_offsets; // store the offset of each word
|
|
bpe_offsets.reserve(offsets.size()); // Reserve memory for the approximate size
|
|
size_t start = 0;
|
|
for (auto offset : offsets) {
|
|
std::cregex_iterator it(text.data() + start, text.data() + start + offset, expr);
|
|
std::cregex_iterator end;
|
|
|
|
int64_t start_idx = 0;
|
|
while (it != end) {
|
|
std::cmatch match = *it;
|
|
if (match.position() > start_idx) {
|
|
bpe_offsets.emplace_back(match.position() - start_idx);
|
|
}
|
|
bpe_offsets.emplace_back(match.length());
|
|
start_idx = match.position() + match.length();
|
|
++it;
|
|
}
|
|
|
|
if (start_idx < (int64_t) offset) {
|
|
bpe_offsets.emplace_back(offset - start_idx);
|
|
}
|
|
start += offset;
|
|
}
|
|
|
|
return bpe_offsets;
|
|
}
|
|
|
|
static std::vector<size_t> unicode_regex_split_custom(const std::string & text, const std::string & regex_expr, const std::vector<size_t> & offsets) {
|
|
std::vector<size_t> bpe_offsets;
|
|
|
|
if (regex_expr == "'s|'t|'re|'ve|'m|'ll|'d| ?\\p{L}+| ?\\p{N}+| ?[^\\s\\p{L}\\p{N}]+|\\s+(?!\\S)") {
|
|
bpe_offsets = unicode_regex_split_custom_gpt2(text, offsets);
|
|
} else if (
|
|
regex_expr == "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+" ||
|
|
regex_expr == "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+") {
|
|
|
|
bpe_offsets = unicode_regex_split_custom_llama3(text, offsets);
|
|
}
|
|
|
|
return bpe_offsets;
|
|
}
|
|
|
|
//
|
|
// interface
|
|
//
|
|
|
|
std::string unicode_cpt_to_utf8(uint32_t cp) {
|
|
std::string result;
|
|
|
|
if (/* 0x00 <= cp && */ cp <= 0x7f) {
|
|
result.push_back(cp);
|
|
return result;
|
|
}
|
|
if (0x80 <= cp && cp <= 0x7ff) {
|
|
result.push_back(0xc0 | ((cp >> 6) & 0x1f));
|
|
result.push_back(0x80 | (cp & 0x3f));
|
|
return result;
|
|
}
|
|
if (0x800 <= cp && cp <= 0xffff) {
|
|
result.push_back(0xe0 | ((cp >> 12) & 0x0f));
|
|
result.push_back(0x80 | ((cp >> 6) & 0x3f));
|
|
result.push_back(0x80 | (cp & 0x3f));
|
|
return result;
|
|
}
|
|
if (0x10000 <= cp && cp <= 0x10ffff) {
|
|
result.push_back(0xf0 | ((cp >> 18) & 0x07));
|
|
result.push_back(0x80 | ((cp >> 12) & 0x3f));
|
|
result.push_back(0x80 | ((cp >> 6) & 0x3f));
|
|
result.push_back(0x80 | (cp & 0x3f));
|
|
return result;
|
|
}
|
|
|
|
throw std::invalid_argument("invalid codepoint");
|
|
}
|
|
|
|
std::vector<uint32_t> unicode_cpts_normalize_nfd(const std::vector<uint32_t> & cpts) {
|
|
std::vector<uint32_t> result;
|
|
result.reserve(cpts.size());
|
|
for (size_t i = 0; i < cpts.size(); ++i) {
|
|
auto it = unicode_map_nfd.find(cpts[i]);
|
|
if (it == unicode_map_nfd.end()) {
|
|
result.push_back(cpts[i]);
|
|
} else {
|
|
result.push_back(it->second);
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
std::vector<uint32_t> unicode_cpts_from_utf8(const std::string & utf8) {
|
|
std::vector<uint32_t> result;
|
|
size_t offset = 0;
|
|
while (offset < utf8.size()) {
|
|
result.push_back(unicode_cpt_from_utf8(utf8, offset));
|
|
}
|
|
return result;
|
|
}
|
|
|
|
int unicode_cpt_type(uint32_t cp) {
|
|
static std::unordered_map<uint32_t, int> cpt_types = unicode_cpt_type_map();
|
|
const auto it = cpt_types.find(cp);
|
|
return it == cpt_types.end() ? CODEPOINT_TYPE_UNIDENTIFIED : it->second;
|
|
}
|
|
|
|
int unicode_cpt_type(const std::string & utf8) {
|
|
if (utf8.length() == 0) {
|
|
return CODEPOINT_TYPE_UNIDENTIFIED;
|
|
}
|
|
size_t offset = 0;
|
|
return unicode_cpt_type(unicode_cpt_from_utf8(utf8, offset));
|
|
}
|
|
|
|
bool unicode_cpt_is_whitespace(uint32_t cp) {
|
|
static const std::unordered_set<uint32_t> is_whitespace = [] {
|
|
std::unordered_set<uint32_t> is_whitespace;
|
|
for (auto p : unicode_ranges_whitespace) {
|
|
for (auto i = p.first; i <= p.second; ++i) {
|
|
is_whitespace.insert(i);
|
|
}
|
|
}
|
|
return is_whitespace;
|
|
}();
|
|
return (bool)is_whitespace.count(cp);
|
|
}
|
|
|
|
std::string unicode_byte_to_utf8(uint8_t byte) {
|
|
static std::unordered_map<uint8_t, std::string> map = unicode_byte_to_utf8_map();
|
|
return map.at(byte);
|
|
}
|
|
|
|
uint8_t unicode_utf8_to_byte(const std::string & utf8) {
|
|
static std::unordered_map<std::string, uint8_t> map = unicode_utf8_to_byte_map();
|
|
return map.at(utf8);
|
|
}
|
|
|
|
char32_t unicode_tolower(char32_t cp) {
|
|
auto it = unicode_map_lowercase.find(cp);
|
|
return it == unicode_map_lowercase.end() ? cp : it->second;
|
|
}
|
|
|
|
std::vector<std::string> unicode_regex_split(const std::string & text, const std::vector<std::string> & regex_exprs) {
|
|
// unicode categories
|
|
static const std::map<std::string, int> k_ucat_enum = {
|
|
{ "\\p{N}", CODEPOINT_TYPE_NUMBER },
|
|
{ "\\p{L}", CODEPOINT_TYPE_LETTER },
|
|
{ "\\p{P}", CODEPOINT_TYPE_PUNCTUATION },
|
|
};
|
|
|
|
static const std::map<int, int> k_ucat_cpt = {
|
|
{ CODEPOINT_TYPE_NUMBER, 0xD1 },
|
|
{ CODEPOINT_TYPE_LETTER, 0xD2 },
|
|
{ CODEPOINT_TYPE_PUNCTUATION, 0xD3 },
|
|
};
|
|
|
|
static const std::map<int, std::string> k_ucat_map = {
|
|
{ CODEPOINT_TYPE_NUMBER, "\x30-\x39" }, // 0-9
|
|
{ CODEPOINT_TYPE_LETTER, "\x41-\x5A\x61-\x7A" }, // A-Za-z
|
|
{ CODEPOINT_TYPE_PUNCTUATION, "\x21-\x23\x25-\x2A\x2C-\x2F\x3A-\x3B\x3F-\x40\\\x5B-\\\x5D\x5F\\\x7B\\\x7D" }, // !-#%-*,-/:-;?-@\[-\]_\{\}
|
|
};
|
|
|
|
// compute collapsed codepoints only if needed by at least one regex
|
|
bool need_collapse = false;
|
|
for (auto & regex_expr : regex_exprs) {
|
|
// search for unicode categories
|
|
for (const auto & ucat : k_ucat_enum) {
|
|
if (std::string::npos != regex_expr.find(ucat.first)) {
|
|
need_collapse = true;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
const auto cpts = unicode_cpts_from_utf8(text);
|
|
|
|
// generate a "collapsed" representation of the text, where all codepoints are replaced by a single byte
|
|
// ref: https://github.com/ggerganov/llama.cpp/pull/6920#issuecomment-2081479935
|
|
std::string text_collapsed;
|
|
if (need_collapse) {
|
|
// collapse all unicode categories
|
|
text_collapsed.resize(cpts.size());
|
|
|
|
for (size_t i = 0; i < cpts.size(); ++i) {
|
|
// keep single-byte codepoints as is
|
|
if (cpts[i] < 128) {
|
|
text_collapsed[i] = cpts[i];
|
|
continue;
|
|
}
|
|
|
|
const int cpt_type = unicode_cpt_type(cpts[i]);
|
|
|
|
if (k_ucat_cpt.find(cpt_type) != k_ucat_cpt.end()) {
|
|
text_collapsed[i] = k_ucat_cpt.at(cpt_type);
|
|
} else {
|
|
text_collapsed[i] = (char) 0xD0; // fallback
|
|
}
|
|
}
|
|
}
|
|
|
|
std::vector<size_t> bpe_offsets = { cpts.size() };
|
|
|
|
for (auto & regex_expr : regex_exprs) {
|
|
// first, see if we have an efficient custom regex implementation
|
|
auto tmp = unicode_regex_split_custom(text, regex_expr, bpe_offsets);
|
|
|
|
if (!tmp.empty()) {
|
|
bpe_offsets = std::move(tmp);
|
|
continue;
|
|
}
|
|
|
|
// fallback to general-purpose std::regex / std::wregex
|
|
try {
|
|
// if a unicode category is used in the regex, we use the collapsed text and replace the unicode category
|
|
// with the corresponding collapsed representation
|
|
bool use_collapsed = false;
|
|
for (auto & ucat : k_ucat_enum) {
|
|
if (std::string::npos != regex_expr.find(ucat.first)) {
|
|
use_collapsed = true;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (use_collapsed) {
|
|
// sanity-check that the original regex does not contain any non-ASCII characters
|
|
const auto cpts_regex = unicode_cpts_from_utf8(regex_expr);
|
|
for (size_t i = 0; i < cpts_regex.size(); ++i) {
|
|
if (cpts_regex[i] >= 128) {
|
|
throw std::runtime_error("Regex includes both unicode categories and non-ASCII characters - not supported");
|
|
}
|
|
}
|
|
|
|
// generate a collapsed representation of the regex
|
|
std::string regex_expr_collapsed;
|
|
|
|
// track if we are inside [], because nested [] are not allowed
|
|
bool inside = false;
|
|
for (size_t i = 0; i < regex_expr.size(); ++i) {
|
|
if (regex_expr[i] == '[' && (i == 0 || regex_expr[i - 1] != '\\')) {
|
|
regex_expr_collapsed += '[';
|
|
inside = true;
|
|
continue;
|
|
}
|
|
|
|
if (inside && regex_expr[i] == ']' && regex_expr[i - 1] != '\\') {
|
|
regex_expr_collapsed += ']';
|
|
inside = false;
|
|
continue;
|
|
}
|
|
|
|
if (regex_expr[i + 0] == '\\' && i + 4 < regex_expr.size() &&
|
|
regex_expr[i + 1] == 'p' &&
|
|
regex_expr[i + 2] == '{' &&
|
|
regex_expr[i + 4] == '}') {
|
|
const std::string pat = regex_expr.substr(i, 5);
|
|
if (k_ucat_enum.find(pat) != k_ucat_enum.end()) {
|
|
if (!inside) {
|
|
regex_expr_collapsed += '[';
|
|
}
|
|
regex_expr_collapsed += k_ucat_cpt.at(k_ucat_enum.at(pat));
|
|
regex_expr_collapsed += k_ucat_map.at(k_ucat_enum.at(pat));
|
|
if (!inside) {
|
|
regex_expr_collapsed += ']';
|
|
}
|
|
i += 4;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
regex_expr_collapsed += regex_expr[i];
|
|
}
|
|
|
|
//printf("text_collapsed: %s\n", text_collapsed.c_str());
|
|
//printf("regex_expr_collapsed: %s\n", regex_expr_collapsed.c_str());
|
|
bpe_offsets = unicode_regex_split_stl(text_collapsed, regex_expr_collapsed, bpe_offsets);
|
|
} else {
|
|
// no unicode category used, we can use std::wregex directly
|
|
const std::wstring wtext = unicode_wstring_from_utf8(text);
|
|
const std::wstring wregex_expr = unicode_wstring_from_utf8(regex_expr);
|
|
|
|
//printf("text: %s\n", text.c_str());
|
|
//printf("regex_expr: %s\n", regex_expr.c_str());
|
|
bpe_offsets = unicode_regex_split_stl(wtext, wregex_expr, bpe_offsets);
|
|
}
|
|
} catch (std::regex_error & e) {
|
|
fprintf(stderr, "Failed to process regex: '%s'\n", regex_expr.c_str());
|
|
fprintf(stderr, "Regex error: %s\n", e.what());
|
|
throw std::runtime_error("Failed to process regex");
|
|
}
|
|
}
|
|
|
|
std::vector<std::string> bpe_words;
|
|
bpe_words.reserve(bpe_offsets.size()); // reserve memory for the approximate size
|
|
|
|
size_t start = 0;
|
|
for (size_t & offset : bpe_offsets) {
|
|
bpe_words.emplace_back();
|
|
for (size_t i = start; i < start + offset; ++i) {
|
|
bpe_words.back() += unicode_cpt_to_utf8(cpts[i]);
|
|
}
|
|
start += offset;
|
|
}
|
|
|
|
return unicode_byte_encoding_process(bpe_words);
|
|
}
|