mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-11 13:30:35 +00:00
llama : make llm_tokenizer more private
ggml-ci
This commit is contained in:
parent
2ec25dbf27
commit
02629d98f1
@ -79,6 +79,15 @@ struct naive_trie {
|
||||
// impl
|
||||
//
|
||||
|
||||
struct llm_tokenizer {
|
||||
llm_tokenizer() {}
|
||||
virtual ~llm_tokenizer() = default;
|
||||
};
|
||||
|
||||
llama_vocab::~llama_vocab() {
|
||||
delete tokenizer;
|
||||
}
|
||||
|
||||
int llama_vocab::find_bpe_rank(const std::string & token_left, const std::string & token_right) const {
|
||||
GGML_ASSERT(token_left.find(' ') == std::string::npos);
|
||||
GGML_ASSERT(token_left.find('\n') == std::string::npos);
|
||||
@ -188,13 +197,12 @@ struct llm_bigram_spm {
|
||||
};
|
||||
|
||||
struct llm_tokenizer_spm : llm_tokenizer {
|
||||
llm_tokenizer_spm(const llama_vocab & vocab) : llm_tokenizer(vocab) {}
|
||||
llm_tokenizer_spm(const llama_vocab & /*vocab*/) : llm_tokenizer() {}
|
||||
};
|
||||
|
||||
struct llm_tokenizer_spm_session {
|
||||
|
||||
llm_tokenizer_spm_session(const llm_tokenizer & tokenizer) :
|
||||
spm_tokenizer(static_cast<const llm_tokenizer_spm &>(tokenizer)) {}
|
||||
llm_tokenizer_spm_session(const llama_vocab & vocab) : vocab(vocab),
|
||||
spm_tokenizer(static_cast<const llm_tokenizer_spm *>(vocab.tokenizer)) {}
|
||||
|
||||
void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
|
||||
|
||||
@ -257,7 +265,6 @@ struct llm_tokenizer_spm_session {
|
||||
|
||||
private:
|
||||
void resegment(llm_symbol & symbol, std::vector<llama_vocab::id> & output) {
|
||||
const auto & vocab = spm_tokenizer.vocab;
|
||||
auto text = std::string(symbol.text, symbol.n);
|
||||
auto token = vocab.token_to_id.find(text);
|
||||
|
||||
@ -287,7 +294,6 @@ private:
|
||||
if (left == -1 || right == -1) {
|
||||
return;
|
||||
}
|
||||
const auto & vocab = spm_tokenizer.vocab;
|
||||
const std::string text = std::string(symbols[left].text, symbols[left].n + symbols[right].n);
|
||||
auto token = vocab.token_to_id.find(text);
|
||||
|
||||
@ -313,7 +319,8 @@ private:
|
||||
rev_merge[text] = std::make_pair(left, right);
|
||||
}
|
||||
|
||||
const llm_tokenizer_spm & spm_tokenizer;
|
||||
const llama_vocab & vocab;
|
||||
const llm_tokenizer_spm * spm_tokenizer; // currently unused
|
||||
|
||||
std::vector<llm_symbol> symbols;
|
||||
llm_bigram_spm::queue work_queue;
|
||||
@ -360,7 +367,7 @@ struct llm_bigram_bpe {
|
||||
};
|
||||
|
||||
struct llm_tokenizer_bpe : llm_tokenizer {
|
||||
llm_tokenizer_bpe(const llama_vocab & vocab) : llm_tokenizer(vocab) {
|
||||
llm_tokenizer_bpe(const llama_vocab & vocab) : llm_tokenizer() {
|
||||
GGML_ASSERT(vocab.type == LLAMA_VOCAB_TYPE_BPE);
|
||||
switch (vocab.type_pre) {
|
||||
case LLAMA_VOCAB_PRE_TYPE_LLAMA3:
|
||||
@ -473,16 +480,14 @@ struct llm_tokenizer_bpe : llm_tokenizer {
|
||||
};
|
||||
|
||||
struct llm_tokenizer_bpe_session {
|
||||
llm_tokenizer_bpe_session(const llama_vocab & vocab) : vocab(vocab),
|
||||
bpe_tokenizer(static_cast<const llm_tokenizer_bpe *>(vocab.tokenizer)) {}
|
||||
|
||||
llm_tokenizer_bpe_session(const llm_tokenizer & tokenizer) :
|
||||
bpe_tokenizer(static_cast<const llm_tokenizer_bpe &>(tokenizer)) {}
|
||||
|
||||
void append(const llama_vocab::id token_id, std::vector<llama_vocab::id> & output) const {
|
||||
static void append(const llama_vocab::id token_id, std::vector<llama_vocab::id> & output) {
|
||||
output.push_back(token_id);
|
||||
}
|
||||
|
||||
bool append_bos(std::vector<llama_vocab::id> & output) const {
|
||||
const auto & vocab = bpe_tokenizer.vocab;
|
||||
if (vocab.tokenizer_add_bos) {
|
||||
GGML_ASSERT(vocab.special_bos_id != -1);
|
||||
output.push_back(vocab.special_bos_id);
|
||||
@ -492,7 +497,6 @@ struct llm_tokenizer_bpe_session {
|
||||
}
|
||||
|
||||
bool append_eos(std::vector<llama_vocab::id> & output) const {
|
||||
const auto & vocab = bpe_tokenizer.vocab;
|
||||
if (vocab.tokenizer_add_eos) {
|
||||
GGML_ASSERT(vocab.special_eos_id != -1);
|
||||
output.push_back(vocab.special_eos_id);
|
||||
@ -502,7 +506,6 @@ struct llm_tokenizer_bpe_session {
|
||||
}
|
||||
|
||||
void check_double_bos_eos(const std::vector<llama_vocab::id> & output) const {
|
||||
const auto & vocab = bpe_tokenizer.vocab;
|
||||
if (vocab.tokenizer_add_bos && output.size() >= 2 && output[1] == vocab.special_bos_id) {
|
||||
LLAMA_LOG_WARN(
|
||||
"%s: Added a BOS token to the prompt as specified by the model but the prompt "
|
||||
@ -519,12 +522,11 @@ struct llm_tokenizer_bpe_session {
|
||||
|
||||
void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
|
||||
int final_prev_index = -1;
|
||||
const auto word_collection = unicode_regex_split(text, bpe_tokenizer.regex_exprs);
|
||||
const auto & vocab = bpe_tokenizer.vocab;
|
||||
const auto word_collection = unicode_regex_split(text, bpe_tokenizer->regex_exprs);
|
||||
|
||||
symbols_final.clear();
|
||||
|
||||
for (auto & word : word_collection) {
|
||||
for (const auto & word : word_collection) {
|
||||
work_queue = llm_bigram_bpe::queue();
|
||||
symbols.clear();
|
||||
|
||||
@ -627,7 +629,6 @@ private:
|
||||
if (left == -1 || right == -1) {
|
||||
return;
|
||||
}
|
||||
const auto & vocab = bpe_tokenizer.vocab;
|
||||
std::string left_token = std::string(symbols[left].text, symbols[left].n);
|
||||
std::string right_token = std::string(symbols[right].text, symbols[right].n);
|
||||
|
||||
@ -650,7 +651,8 @@ private:
|
||||
work_queue.push(bigram);
|
||||
}
|
||||
|
||||
const llm_tokenizer_bpe & bpe_tokenizer;
|
||||
const llama_vocab & vocab;
|
||||
const llm_tokenizer_bpe * bpe_tokenizer;
|
||||
|
||||
std::vector<llm_symbol> symbols;
|
||||
std::vector<llm_symbol> symbols_final;
|
||||
@ -662,16 +664,14 @@ private:
|
||||
//
|
||||
|
||||
struct llm_tokenizer_wpm : llm_tokenizer {
|
||||
llm_tokenizer_wpm(const llama_vocab & vocab) : llm_tokenizer(vocab) {}
|
||||
llm_tokenizer_wpm(const llama_vocab & /*vocab*/) : llm_tokenizer() {}
|
||||
};
|
||||
|
||||
struct llm_tokenizer_wpm_session {
|
||||
|
||||
llm_tokenizer_wpm_session(const llm_tokenizer & tokenizer)
|
||||
: wpm_tokenizer(static_cast<const llm_tokenizer_wpm &>(tokenizer)) {}
|
||||
llm_tokenizer_wpm_session(const llama_vocab & vocab) : vocab(vocab),
|
||||
wpm_tokenizer(static_cast<const llm_tokenizer_wpm *>(vocab.tokenizer)) {}
|
||||
|
||||
void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
|
||||
const auto & vocab = wpm_tokenizer.vocab;
|
||||
const auto & token_map = vocab.token_to_id;
|
||||
// normalize and split by whitespace
|
||||
std::vector<std::string> words = preprocess(text);
|
||||
@ -719,7 +719,7 @@ struct llm_tokenizer_wpm_session {
|
||||
}
|
||||
|
||||
// TODO: reduce string copies by using cpts_offs array
|
||||
std::vector<std::string> preprocess(const std::string & text) const {
|
||||
static std::vector<std::string> preprocess(const std::string & text) {
|
||||
const std::vector<uint32_t> cpts_nfd = unicode_cpts_normalize_nfd(unicode_cpts_from_utf8(text));
|
||||
std::vector<std::string> words(1, "");
|
||||
|
||||
@ -772,7 +772,8 @@ struct llm_tokenizer_wpm_session {
|
||||
}
|
||||
|
||||
private:
|
||||
const llm_tokenizer_wpm & wpm_tokenizer;
|
||||
const llama_vocab & vocab;
|
||||
const llm_tokenizer_wpm * wpm_tokenizer;
|
||||
};
|
||||
|
||||
//
|
||||
@ -780,7 +781,7 @@ private:
|
||||
//
|
||||
|
||||
struct llm_tokenizer_ugm : llm_tokenizer {
|
||||
llm_tokenizer_ugm(const llama_vocab & vocab) : llm_tokenizer(vocab) {
|
||||
llm_tokenizer_ugm(const llama_vocab & vocab) : llm_tokenizer() {
|
||||
if (vocab.precompiled_charsmap.size() > 0) {
|
||||
size_t charsmap_offset = 0;
|
||||
|
||||
@ -847,9 +848,8 @@ struct llm_tokenizer_ugm : llm_tokenizer {
|
||||
};
|
||||
|
||||
struct llm_tokenizer_ugm_session {
|
||||
|
||||
llm_tokenizer_ugm_session(const llm_tokenizer & tokenizer)
|
||||
: ugm_tokenizer(static_cast<const llm_tokenizer_ugm &>(tokenizer)) {}
|
||||
llm_tokenizer_ugm_session(const llama_vocab & vocab) : vocab(vocab),
|
||||
ugm_tokenizer(static_cast<const llm_tokenizer_ugm *>(vocab.tokenizer)) {}
|
||||
|
||||
/* This implementation is based on SentencePiece optimized Viterbi algorithm for
|
||||
* unigram language models. The general idea is to:
|
||||
@ -867,7 +867,6 @@ struct llm_tokenizer_ugm_session {
|
||||
void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
|
||||
// get current size of output (for reversal later)
|
||||
size_t output_size = output.size();
|
||||
const auto & vocab = ugm_tokenizer.vocab;
|
||||
|
||||
// normalize the input first
|
||||
std::string normalized;
|
||||
@ -890,7 +889,7 @@ struct llm_tokenizer_ugm_session {
|
||||
// traverse the token matcher trie to find a matching token
|
||||
bool single_codepoint_token_found = false;
|
||||
const struct best_tokenization & current_best = tokenization_results[input_offset];
|
||||
const struct naive_trie * node = ugm_tokenizer.token_matcher.traverse(normalized[prefix_offset++]);
|
||||
const struct naive_trie * node = ugm_tokenizer->token_matcher.traverse(normalized[prefix_offset++]);
|
||||
|
||||
while (prefix_offset <= input_len && node != NULL) {
|
||||
// check if we found valid token in prefix
|
||||
@ -920,7 +919,7 @@ struct llm_tokenizer_ugm_session {
|
||||
// if we didn't find a valid token corresponding to the whole UTF code point
|
||||
// then use unknown token as the tokenization of this UTF code point
|
||||
if (!single_codepoint_token_found) {
|
||||
const double challenger_score = current_best.score_sum + ugm_tokenizer.unknown_token_score;
|
||||
const double challenger_score = current_best.score_sum + ugm_tokenizer->unknown_token_score;
|
||||
prefix_offset = input_offset + n_utf8_code_units;
|
||||
struct best_tokenization & current_champ = tokenization_results[prefix_offset];
|
||||
if (challenger_score > current_champ.score_sum) {
|
||||
@ -963,9 +962,8 @@ private:
|
||||
void normalize(const std::string& input, std::string * normalized) {
|
||||
normalized->clear();
|
||||
normalized->reserve(input.size() * 3);
|
||||
const auto & vocab = ugm_tokenizer.vocab;
|
||||
|
||||
const std::string space = vocab.tokenizer_escape_whitespaces ? ugm_tokenizer.escaped_space : " ";
|
||||
const std::string space = vocab.tokenizer_escape_whitespaces ? ugm_tokenizer->escaped_space : " ";
|
||||
|
||||
bool shall_prepend_space = !vocab.tokenizer_treat_whitespace_as_suffix && vocab.tokenizer_add_space_prefix;
|
||||
bool shall_append_space = vocab.tokenizer_treat_whitespace_as_suffix && vocab.tokenizer_add_space_prefix;
|
||||
@ -1061,7 +1059,7 @@ private:
|
||||
|
||||
// if input prefix matches some user-defined token return this token as normalization result
|
||||
auto user_defined_token_match =
|
||||
ugm_tokenizer.user_defined_token_matcher.get_longest_prefix(&input[input_offset], input.size() - input_offset);
|
||||
ugm_tokenizer->user_defined_token_matcher.get_longest_prefix(&input[input_offset], input.size() - input_offset);
|
||||
if (user_defined_token_match.second > 0) {
|
||||
return { &input[input_offset], user_defined_token_match.second, user_defined_token_match.second };
|
||||
}
|
||||
@ -1069,8 +1067,8 @@ private:
|
||||
size_t longest_prefix_length = 0;
|
||||
size_t longest_prefix_offset = 0;
|
||||
|
||||
if (ugm_tokenizer.xcda_array_size > 0) {
|
||||
struct xcda_array_view xcda_view(ugm_tokenizer.xcda_array, ugm_tokenizer.xcda_array_size);
|
||||
if (ugm_tokenizer->xcda_array_size > 0) {
|
||||
struct xcda_array_view xcda_view(ugm_tokenizer->xcda_array, ugm_tokenizer->xcda_array_size);
|
||||
|
||||
// Find the longest normalized sequence matching the input prefix by walking
|
||||
// the XOR-compressed compact double array (XCDA) starting from the root node
|
||||
@ -1106,26 +1104,27 @@ private:
|
||||
|
||||
if (longest_prefix_length > 0) {
|
||||
// we have a match, so return the replacement sequence
|
||||
if (longest_prefix_offset >= ugm_tokenizer.prefix_replacements_size) {
|
||||
if (longest_prefix_offset >= ugm_tokenizer->prefix_replacements_size) {
|
||||
throw std::runtime_error("Index out of array bounds in precompiled charsmap!");
|
||||
}
|
||||
const char * prefix_replacement = &(ugm_tokenizer.prefix_replacements)[longest_prefix_offset];
|
||||
const char * prefix_replacement = &(ugm_tokenizer->prefix_replacements)[longest_prefix_offset];
|
||||
return { prefix_replacement, strlen(prefix_replacement), longest_prefix_length };
|
||||
} else {
|
||||
// check if the input prefix contains a valid sequence of UTF-8 code units
|
||||
try {
|
||||
// if yes, return this sequence unmodified
|
||||
size_t prefix_offset = input_offset;
|
||||
unicode_cpt_from_utf8(input, prefix_offset);
|
||||
return { &input[input_offset], prefix_offset - input_offset, prefix_offset - input_offset };
|
||||
} catch (std::invalid_argument & /*ex*/) {
|
||||
// if no, consume 1 byte and return U+FFFD - REPLACEMENT CHARACTER
|
||||
return { "\xEF\xBF\xBD", 3, 1 };
|
||||
}
|
||||
}
|
||||
|
||||
// check if the input prefix contains a valid sequence of UTF-8 code units
|
||||
try {
|
||||
// if yes, return this sequence unmodified
|
||||
size_t prefix_offset = input_offset;
|
||||
unicode_cpt_from_utf8(input, prefix_offset);
|
||||
return { &input[input_offset], prefix_offset - input_offset, prefix_offset - input_offset };
|
||||
} catch (std::invalid_argument & /*ex*/) {
|
||||
// if no, consume 1 byte and return U+FFFD - REPLACEMENT CHARACTER
|
||||
return { "\xEF\xBF\xBD", 3, 1 };
|
||||
}
|
||||
}
|
||||
|
||||
const llm_tokenizer_ugm & ugm_tokenizer;
|
||||
const llama_vocab & vocab;
|
||||
const llm_tokenizer_ugm * ugm_tokenizer;
|
||||
};
|
||||
|
||||
//
|
||||
@ -1187,7 +1186,7 @@ static std::vector<uint8_t> llama_unescape_rwkv_token(const std::string & escape
|
||||
}
|
||||
|
||||
struct llm_tokenizer_rwkv : llm_tokenizer {
|
||||
llm_tokenizer_rwkv(const llama_vocab & vocab) : llm_tokenizer(vocab) {
|
||||
llm_tokenizer_rwkv(const llama_vocab & vocab) : llm_tokenizer() {
|
||||
// RWKV supports arbitrary byte tokens, but the vocab struct only supports string tokens.
|
||||
// For now, we decode the vocab here into the lookup we'll use for tokenization.
|
||||
|
||||
@ -1203,13 +1202,11 @@ struct llm_tokenizer_rwkv : llm_tokenizer {
|
||||
};
|
||||
|
||||
struct llm_tokenizer_rwkv_session {
|
||||
|
||||
llm_tokenizer_rwkv_session(const llm_tokenizer & tokenizer)
|
||||
: rwkv_tokenizer(static_cast<const llm_tokenizer_rwkv &>(tokenizer)) {}
|
||||
llm_tokenizer_rwkv_session(const llama_vocab & vocab) : vocab(vocab),
|
||||
rwkv_tokenizer(static_cast<const llm_tokenizer_rwkv &>(*vocab.tokenizer)) {}
|
||||
|
||||
void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
|
||||
uint32_t position = 0;
|
||||
const auto & vocab = rwkv_tokenizer.vocab;
|
||||
while (position < text.size()) {
|
||||
const struct naive_trie * node = rwkv_tokenizer.token_matcher.traverse(text[position]);
|
||||
if (node == NULL) {
|
||||
@ -1237,9 +1234,32 @@ struct llm_tokenizer_rwkv_session {
|
||||
}
|
||||
|
||||
private:
|
||||
const llama_vocab & vocab;
|
||||
const llm_tokenizer_rwkv & rwkv_tokenizer;
|
||||
};
|
||||
|
||||
void llama_vocab::init_tokenizer() {
|
||||
switch (type) {
|
||||
case LLAMA_VOCAB_TYPE_SPM:
|
||||
tokenizer = new llm_tokenizer_spm(*this);
|
||||
break;
|
||||
case LLAMA_VOCAB_TYPE_BPE:
|
||||
tokenizer = new llm_tokenizer_bpe(*this);
|
||||
break;
|
||||
case LLAMA_VOCAB_TYPE_WPM:
|
||||
tokenizer = new llm_tokenizer_wpm(*this);
|
||||
break;
|
||||
case LLAMA_VOCAB_TYPE_UGM:
|
||||
tokenizer = new llm_tokenizer_ugm(*this);
|
||||
break;
|
||||
case LLAMA_VOCAB_TYPE_RWKV:
|
||||
tokenizer = new llm_tokenizer_rwkv(*this);
|
||||
break;
|
||||
default:
|
||||
GGML_ABORT("unsupported vocab type");
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// (de-) tokenize
|
||||
//
|
||||
@ -1301,7 +1321,7 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
|
||||
|
||||
// if a fragment is text ( not yet processed )
|
||||
if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
|
||||
auto & raw_text = fragment.raw_text;
|
||||
const auto & raw_text = fragment.raw_text;
|
||||
|
||||
auto raw_text_base_offset = fragment.offset;
|
||||
auto raw_text_base_length = fragment.length;
|
||||
@ -1400,11 +1420,15 @@ static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list<
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<llama_vocab::id> llama_tokenize_internal(const llm_tokenizer * tokenizer,
|
||||
std::string raw_text, bool add_special, bool parse_special) {
|
||||
std::vector<llama_vocab::id> llama_tokenize_internal(
|
||||
const llama_vocab & vocab,
|
||||
std::string raw_text,
|
||||
bool add_special,
|
||||
bool parse_special) {
|
||||
GGML_ASSERT(vocab.tokenizer && "Tokenizer not initialized. Call llama_vocab::init_tokenizer() first.");
|
||||
|
||||
std::vector<llama_vocab::id> output;
|
||||
std::forward_list<fragment_buffer_variant> fragment_buffer;
|
||||
const llama_vocab & vocab = tokenizer->vocab;
|
||||
|
||||
if (!raw_text.empty()) {
|
||||
fragment_buffer.emplace_front(raw_text, 0, raw_text.length());
|
||||
@ -1440,7 +1464,7 @@ std::vector<llama_vocab::id> llama_tokenize_internal(const llm_tokenizer * token
|
||||
LLAMA_LOG_WARN("TT: (%ld %ld %ld) '%s'\n", raw_text.length(), fragment.offset, fragment.length, raw_text.c_str());
|
||||
#endif
|
||||
llama_escape_whitespace(raw_text);
|
||||
llm_tokenizer_spm_session session(*tokenizer);
|
||||
llm_tokenizer_spm_session session(vocab);
|
||||
session.tokenize(raw_text, output);
|
||||
is_prev_special = false;
|
||||
} else { // if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_TOKEN)
|
||||
@ -1463,7 +1487,7 @@ std::vector<llama_vocab::id> llama_tokenize_internal(const llm_tokenizer * token
|
||||
} break;
|
||||
case LLAMA_VOCAB_TYPE_BPE:
|
||||
{
|
||||
llm_tokenizer_bpe_session session(*tokenizer);
|
||||
llm_tokenizer_bpe_session session(vocab);
|
||||
// it calls some other methods that are not exist in llm_tokenizer,
|
||||
// here just cast it to bpe tokenizer object
|
||||
if (add_special) {
|
||||
@ -1494,7 +1518,7 @@ std::vector<llama_vocab::id> llama_tokenize_internal(const llm_tokenizer * token
|
||||
output.push_back(vocab.special_cls_id);
|
||||
}
|
||||
|
||||
llm_tokenizer_wpm_session session(*tokenizer);
|
||||
llm_tokenizer_wpm_session session(vocab);
|
||||
|
||||
for (const auto & fragment : fragment_buffer) {
|
||||
if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
|
||||
@ -1520,7 +1544,7 @@ std::vector<llama_vocab::id> llama_tokenize_internal(const llm_tokenizer * token
|
||||
GGML_ASSERT(vocab.special_bos_id != -1);
|
||||
output.push_back(vocab.special_bos_id);
|
||||
}
|
||||
llm_tokenizer_ugm_session session(*tokenizer);
|
||||
llm_tokenizer_ugm_session session(vocab);
|
||||
|
||||
for (const auto & fragment : fragment_buffer) {
|
||||
if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
|
||||
@ -1548,7 +1572,7 @@ std::vector<llama_vocab::id> llama_tokenize_internal(const llm_tokenizer * token
|
||||
} break;
|
||||
case LLAMA_VOCAB_TYPE_RWKV:
|
||||
{
|
||||
llm_tokenizer_rwkv_session session(*tokenizer);
|
||||
llm_tokenizer_rwkv_session session(vocab);
|
||||
for (const auto & fragment : fragment_buffer) {
|
||||
if (fragment.type == FRAGMENT_BUFFER_VARIANT_TYPE_RAW_TEXT) {
|
||||
auto raw_text = fragment.raw_text.substr(fragment.offset, fragment.length);
|
||||
@ -1570,32 +1594,6 @@ std::vector<llama_vocab::id> llama_tokenize_internal(const llm_tokenizer * token
|
||||
return output;
|
||||
}
|
||||
|
||||
llm_tokenizer * llama_create_tokenizer(const llama_vocab & vocab) {
|
||||
llm_tokenizer * tokenizer;
|
||||
|
||||
switch (vocab.type) {
|
||||
case LLAMA_VOCAB_TYPE_SPM:
|
||||
tokenizer = new llm_tokenizer_spm(vocab);
|
||||
break;
|
||||
case LLAMA_VOCAB_TYPE_BPE:
|
||||
tokenizer = new llm_tokenizer_bpe(vocab);
|
||||
break;
|
||||
case LLAMA_VOCAB_TYPE_WPM:
|
||||
tokenizer = new llm_tokenizer_wpm(vocab);
|
||||
break;
|
||||
case LLAMA_VOCAB_TYPE_UGM:
|
||||
tokenizer = new llm_tokenizer_ugm(vocab);
|
||||
break;
|
||||
case LLAMA_VOCAB_TYPE_RWKV:
|
||||
tokenizer = new llm_tokenizer_rwkv(vocab);
|
||||
break;
|
||||
default:
|
||||
GGML_ABORT("fatal error");
|
||||
}
|
||||
|
||||
return tokenizer;
|
||||
}
|
||||
|
||||
llama_token llama_byte_to_token_impl(const llama_vocab & vocab, uint8_t ch) {
|
||||
GGML_ASSERT(llama_vocab_get_type(vocab) != LLAMA_VOCAB_TYPE_NONE);
|
||||
static const char * hex = "0123456789ABCDEF";
|
||||
@ -1700,14 +1698,14 @@ llama_token llama_token_eom_impl(const struct llama_vocab & vocab) {
|
||||
}
|
||||
|
||||
int32_t llama_tokenize_impl(
|
||||
const llm_tokenizer * tokenizer,
|
||||
const struct llama_vocab & vocab,
|
||||
const char * text,
|
||||
int32_t text_len,
|
||||
llama_token * tokens,
|
||||
int32_t n_tokens_max,
|
||||
bool add_special,
|
||||
bool parse_special) {
|
||||
auto res = llama_tokenize_internal(tokenizer, std::string(text, text_len), add_special, parse_special);
|
||||
auto res = llama_tokenize_internal(vocab, std::string(text, text_len), add_special, parse_special);
|
||||
if (n_tokens_max < (int) res.size()) {
|
||||
// LLAMA_LOG_ERROR("%s: too many tokens\n", __func__);
|
||||
return -((int) res.size());
|
||||
@ -1831,6 +1829,8 @@ int32_t llama_detokenize_impl(
|
||||
int32_t text_len_max,
|
||||
bool remove_special,
|
||||
bool unparse_special) {
|
||||
GGML_ASSERT(vocab.tokenizer && "Tokenizer not initialized. Call llama_vocab::init_tokenizer() first.");
|
||||
|
||||
int32_t avail = text_len_max;
|
||||
int32_t total = 0;
|
||||
|
||||
|
@ -7,6 +7,8 @@
|
||||
#include <unordered_map>
|
||||
#include <map>
|
||||
|
||||
struct llm_tokenizer;
|
||||
|
||||
struct llama_vocab {
|
||||
using id = llama_token;
|
||||
using token = std::string;
|
||||
@ -61,14 +63,14 @@ struct llama_vocab {
|
||||
|
||||
std::vector<char> precompiled_charsmap;
|
||||
|
||||
llm_tokenizer * tokenizer = nullptr;
|
||||
|
||||
llama_vocab() = default;
|
||||
~llama_vocab();
|
||||
|
||||
int find_bpe_rank(const std::string & token_left, const std::string & token_right) const;
|
||||
};
|
||||
|
||||
struct llm_tokenizer {
|
||||
llm_tokenizer(const llama_vocab & vocab) : vocab(vocab) {}
|
||||
virtual ~llm_tokenizer() = default;
|
||||
|
||||
const llama_vocab & vocab;
|
||||
void init_tokenizer();
|
||||
};
|
||||
|
||||
//
|
||||
@ -78,13 +80,11 @@ struct llm_tokenizer {
|
||||
// TODO: rename to llama_tokenize_impl
|
||||
// TODO: This should probably be in llama.h
|
||||
std::vector<llama_vocab::id> llama_tokenize_internal(
|
||||
const llm_tokenizer * tokenizer,
|
||||
const llama_vocab & vocab,
|
||||
std::string raw_text,
|
||||
bool add_special,
|
||||
bool parse_special = false);
|
||||
|
||||
llm_tokenizer * llama_create_tokenizer(const llama_vocab & vocab);
|
||||
|
||||
// TODO: move the API below as member functions of llama_vocab
|
||||
llama_token llama_byte_to_token_impl(const llama_vocab & vocab, uint8_t ch);
|
||||
|
||||
@ -115,13 +115,13 @@ llama_token llama_token_eot_impl (const struct llama_vocab & vocab);
|
||||
llama_token llama_token_eom_impl (const struct llama_vocab & vocab);
|
||||
|
||||
int32_t llama_tokenize_impl(
|
||||
const llm_tokenizer * tokenizer,
|
||||
const char * text,
|
||||
int32_t text_len,
|
||||
llama_token * tokens,
|
||||
int32_t n_tokens_max,
|
||||
bool add_special,
|
||||
bool parse_special);
|
||||
const struct llama_vocab & vocab,
|
||||
const char * text,
|
||||
int32_t text_len,
|
||||
llama_token * tokens,
|
||||
int32_t n_tokens_max,
|
||||
bool add_special,
|
||||
bool parse_special);
|
||||
|
||||
// does not write null-terminator to buf
|
||||
int32_t llama_token_to_piece_impl(
|
||||
@ -139,4 +139,4 @@ int32_t llama_detokenize_impl(
|
||||
char * text,
|
||||
int32_t text_len_max,
|
||||
bool remove_special,
|
||||
bool unparse_special);
|
||||
bool unparse_special);
|
@ -2846,9 +2846,8 @@ struct llama_model {
|
||||
|
||||
std::string name = "n/a";
|
||||
|
||||
llama_hparams hparams = {};
|
||||
llama_vocab vocab;
|
||||
llm_tokenizer * tokenizer;
|
||||
llama_hparams hparams = {};
|
||||
llama_vocab vocab;
|
||||
|
||||
struct ggml_tensor * tok_embd;
|
||||
struct ggml_tensor * type_embd;
|
||||
@ -2924,8 +2923,6 @@ struct llama_model {
|
||||
while (!lora_adapters.empty()) {
|
||||
llama_lora_adapter_free(*lora_adapters.begin());
|
||||
}
|
||||
|
||||
delete tokenizer;
|
||||
}
|
||||
};
|
||||
|
||||
@ -6407,7 +6404,7 @@ static void llm_load_vocab(
|
||||
}
|
||||
GGML_ASSERT(vocab.id_to_token.size() == vocab.token_to_id.size());
|
||||
|
||||
model.tokenizer = llama_create_tokenizer(vocab);
|
||||
vocab.init_tokenizer();
|
||||
|
||||
// determine the newline token: LLaMA "<0x0A>" == 10 == '\n', Falcon 193 == '\n'
|
||||
if (vocab.type == LLAMA_VOCAB_TYPE_SPM) {
|
||||
@ -6458,11 +6455,11 @@ static void llm_load_vocab(
|
||||
} else if (vocab.type == LLAMA_VOCAB_TYPE_WPM) {
|
||||
vocab.linefeed_id = vocab.special_pad_id;
|
||||
} else if (vocab.type == LLAMA_VOCAB_TYPE_RWKV) {
|
||||
const std::vector<int> ids = llama_tokenize_internal(model.tokenizer, "\n", false);
|
||||
const std::vector<int> ids = llama_tokenize_internal(model.vocab, "\n", false);
|
||||
GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
|
||||
vocab.linefeed_id = ids[0];
|
||||
} else {
|
||||
const std::vector<int> ids = llama_tokenize_internal(model.tokenizer, "\xC4\x8A", false); // U+010A
|
||||
const std::vector<int> ids = llama_tokenize_internal(model.vocab, "\xC4\x8A", false); // U+010A
|
||||
GGML_ASSERT(!ids.empty() && "model vocab missing newline token");
|
||||
vocab.linefeed_id = ids[0];
|
||||
}
|
||||
@ -20890,7 +20887,7 @@ int32_t llama_tokenize(
|
||||
int32_t n_tokens_max,
|
||||
bool add_special,
|
||||
bool parse_special) {
|
||||
return llama_tokenize_impl(model->tokenizer, text, text_len, tokens, n_tokens_max, add_special, parse_special);
|
||||
return llama_tokenize_impl(model->vocab, text, text_len, tokens, n_tokens_max, add_special, parse_special);
|
||||
}
|
||||
|
||||
int32_t llama_token_to_piece(
|
||||
|
Loading…
Reference in New Issue
Block a user