mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-30 13:24:35 +00:00
275 lines
8.9 KiB
C++
275 lines
8.9 KiB
C++
#include "speculative.h"
|
|
|
|
#include "log.h"
|
|
#include "common.h"
|
|
#include "sampling.h"
|
|
|
|
#include <cstring>
|
|
|
|
#define SPEC_VOCAB_MAX_SIZE_DIFFERENCE 128
|
|
#define SPEC_VOCAB_CHECK_START_TOKEN_ID 5
|
|
|
|
struct common_speculative {
|
|
struct llama_context * ctx;
|
|
struct common_sampler * smpl;
|
|
|
|
llama_batch batch;
|
|
llama_tokens prompt;
|
|
};
|
|
|
|
struct common_speculative * common_speculative_init(
|
|
struct llama_context * ctx_dft) {
|
|
auto * result = new common_speculative {
|
|
/* .ctx = */ ctx_dft,
|
|
/* .smpl = */ nullptr,
|
|
/* .batch = */ llama_batch_init(llama_n_batch(ctx_dft), 0, 1),
|
|
/* .prompt = */ {},
|
|
};
|
|
|
|
// TODO: optimize or pass from outside?
|
|
#if 0
|
|
{
|
|
common_params_sampling params;
|
|
params.no_perf = false;
|
|
|
|
params.top_k = 40;
|
|
params.top_p = 0.9;
|
|
|
|
params.samplers = {
|
|
COMMON_SAMPLER_TYPE_TOP_K,
|
|
COMMON_SAMPLER_TYPE_TOP_P,
|
|
COMMON_SAMPLER_TYPE_INFILL,
|
|
};
|
|
|
|
result->smpl = common_sampler_init(llama_get_model(ctx_dft), params);
|
|
}
|
|
#else
|
|
{
|
|
common_params_sampling params;
|
|
params.no_perf = false;
|
|
|
|
params.top_k = 10;
|
|
|
|
params.samplers = {
|
|
COMMON_SAMPLER_TYPE_TOP_K,
|
|
};
|
|
|
|
result->smpl = common_sampler_init(llama_get_model(ctx_dft), params);
|
|
}
|
|
#endif
|
|
|
|
return result;
|
|
}
|
|
|
|
void common_speculative_free(struct common_speculative * spec) {
|
|
if (spec == nullptr) {
|
|
return;
|
|
}
|
|
|
|
common_sampler_free(spec->smpl);
|
|
|
|
llama_batch_free(spec->batch);
|
|
|
|
delete spec;
|
|
}
|
|
|
|
bool common_speculative_are_compatible(
|
|
const struct llama_context * ctx_tgt,
|
|
const struct llama_context * ctx_dft) {
|
|
const struct llama_model * model_tgt = llama_get_model(ctx_tgt);
|
|
const struct llama_model * model_dft = llama_get_model(ctx_dft);
|
|
|
|
const bool vocab_type_tgt = llama_vocab_type(model_tgt);
|
|
LOG_DBG("%s: vocab_type tgt: %d\n", __func__, vocab_type_tgt);
|
|
|
|
const bool vocab_type_dft = llama_vocab_type(model_dft);
|
|
LOG_DBG("%s: vocab_type dft: %d\n", __func__, vocab_type_dft);
|
|
|
|
if (vocab_type_tgt != vocab_type_dft) {
|
|
LOG_ERR("%s: draft model vocab type must match target model to use speculation but "
|
|
"vocab_type_dft = %d while vocab_type_tgt = %d\n", __func__, vocab_type_dft, vocab_type_tgt);
|
|
return false;
|
|
}
|
|
|
|
if (llama_add_bos_token(model_tgt) != llama_add_bos_token(model_dft) ||
|
|
llama_add_eos_token(model_tgt) != llama_add_eos_token(model_dft) ||
|
|
llama_token_bos(model_tgt) != llama_token_bos(model_dft) ||
|
|
llama_token_eos(model_tgt) != llama_token_eos(model_dft)) {
|
|
LOG_ERR("%s: draft model special tokens must match target model to use speculation\n", __func__);
|
|
LOG_ERR("%s: tgt: bos = %d (%d), eos = %d (%d)\n", __func__, llama_token_bos(model_tgt), llama_add_bos_token(model_tgt), llama_token_eos(model_tgt), llama_add_eos_token(model_tgt));
|
|
LOG_ERR("%s: dft: bos = %d (%d), eos = %d (%d)\n", __func__, llama_token_bos(model_dft), llama_add_bos_token(model_dft), llama_token_eos(model_dft), llama_add_eos_token(model_dft));
|
|
return false;
|
|
}
|
|
|
|
{
|
|
const int n_vocab_tgt = llama_n_vocab(model_tgt);
|
|
const int n_vocab_dft = llama_n_vocab(model_dft);
|
|
|
|
const int vocab_diff = std::abs(n_vocab_tgt - n_vocab_dft);
|
|
|
|
if (vocab_diff > SPEC_VOCAB_MAX_SIZE_DIFFERENCE) {
|
|
LOG_ERR("%s: draft model vocab must closely match target model to use speculation but "
|
|
"target vocab size %d does not match draft vocab size %d - difference %d, max allowed %d\n",
|
|
__func__, n_vocab_tgt, llama_n_vocab(model_dft), vocab_diff, SPEC_VOCAB_MAX_SIZE_DIFFERENCE);
|
|
return false;
|
|
}
|
|
|
|
for (int i = SPEC_VOCAB_CHECK_START_TOKEN_ID; i < std::min(n_vocab_tgt, n_vocab_dft); ++i) {
|
|
const char * token_text_tgt = llama_token_get_text(model_tgt, i);
|
|
const char * token_text_dft = llama_token_get_text(model_dft, i);
|
|
if (std::strcmp(token_text_tgt, token_text_dft) != 0) {
|
|
LOG_ERR("%s: draft model vocab must match target model to use speculation but "
|
|
"token %d content differs - target '%s', draft '%s'\n", __func__, i,
|
|
common_token_to_piece(ctx_tgt, i).c_str(),
|
|
common_token_to_piece(ctx_dft, i).c_str());
|
|
return false;
|
|
}
|
|
}
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
llama_tokens common_speculative_gen_draft(
|
|
struct common_speculative * spec,
|
|
struct common_speculative_params params,
|
|
const llama_tokens & prompt_tgt,
|
|
llama_token id_last) {
|
|
auto & batch = spec->batch;
|
|
auto & ctx = spec->ctx;
|
|
auto & smpl = spec->smpl;
|
|
auto & prompt = spec->prompt;
|
|
|
|
int reuse_i = 0;
|
|
int reuse_n = 0;
|
|
|
|
const int n_ctx = llama_n_ctx(ctx) - params.n_draft;
|
|
|
|
const int i_start = std::max<int>(0, (int) prompt_tgt.size() - n_ctx);
|
|
|
|
// reuse as much as possible from the old draft context
|
|
// ideally, the draft context should be as big as the target context and we will always reuse the entire prompt
|
|
for (int i = 0; i < (int) prompt.size(); ++i) {
|
|
int cur = 0;
|
|
while (i_start + cur < (int) prompt_tgt.size() &&
|
|
i + cur < (int) prompt.size() &&
|
|
prompt_tgt[i_start + cur] == prompt[i + cur]) {
|
|
cur++;
|
|
}
|
|
|
|
if ((cur >= params.n_reuse || n_ctx >= (int) prompt_tgt.size()) && cur > reuse_n) {
|
|
reuse_i = i;
|
|
reuse_n = cur;
|
|
}
|
|
}
|
|
|
|
LOG_DBG("%s: reuse_i = %d, reuse_n = %d, prompt = %d\n", __func__, reuse_i, reuse_n, (int) prompt.size());
|
|
|
|
llama_tokens result;
|
|
result.reserve(params.n_draft);
|
|
|
|
if (reuse_n == 0) {
|
|
llama_kv_cache_clear(ctx);
|
|
|
|
prompt.clear();
|
|
} else {
|
|
// this happens when a previous draft has been discarded (for example, due to being too small), but the
|
|
// target model agreed with it. in this case, we simply pass back the previous results to save compute
|
|
if (reuse_i + reuse_n < (int) prompt.size() && prompt[reuse_i + reuse_n] == id_last) {
|
|
for (int i = reuse_i + reuse_n + 1; i < (int) prompt.size(); ++i) {
|
|
result.push_back(prompt[i]);
|
|
|
|
if (params.n_draft <= (int) result.size()) {
|
|
break;
|
|
}
|
|
}
|
|
|
|
return result;
|
|
}
|
|
|
|
if (reuse_i > 0) {
|
|
llama_kv_cache_seq_rm (ctx, 0, 0, reuse_i);
|
|
llama_kv_cache_seq_add(ctx, 0, reuse_i, -1, -reuse_i);
|
|
|
|
prompt.erase(prompt.begin(), prompt.begin() + reuse_i);
|
|
}
|
|
|
|
if (reuse_n < (int) prompt.size()) {
|
|
llama_kv_cache_seq_rm (ctx, 0, reuse_n, -1);
|
|
|
|
prompt.erase(prompt.begin() + reuse_n, prompt.end());
|
|
}
|
|
}
|
|
|
|
// prepare a batch to evaluate any new tokens in the prompt
|
|
common_batch_clear(batch);
|
|
|
|
for (size_t i = i_start + reuse_n; i < prompt_tgt.size(); ++i) {
|
|
//LOG_DBG("i = %d, i_start = %d, reuse_n = %d, i - i_start = %d, id = %6d\n", i, i_start, reuse_n, i - i_start, prompt_tgt[i]);
|
|
common_batch_add(batch, prompt_tgt[i], i - i_start, { 0 }, false);
|
|
|
|
prompt.push_back(prompt_tgt[i]);
|
|
}
|
|
|
|
// we should rarely end-up here during normal decoding
|
|
if (batch.n_tokens > 0) {
|
|
//LOG_DBG("%s: draft prompt batch: %s\n", __func__, string_from(ctx, batch).c_str());
|
|
|
|
llama_decode(ctx, batch);
|
|
}
|
|
|
|
const llama_pos n_past = prompt.size();
|
|
|
|
LOG_DBG("%s: n_past = %d\n", __func__, n_past);
|
|
|
|
common_batch_clear(batch);
|
|
common_batch_add (batch, id_last, n_past, { 0 }, true);
|
|
|
|
prompt.push_back(id_last);
|
|
|
|
//LOG_DBG("%s: draft prompt: %s\n", __func__, string_from(ctx, prompt).c_str());
|
|
|
|
llama_decode(ctx, batch);
|
|
|
|
common_sampler_reset(smpl);
|
|
|
|
// sample n_draft tokens from the draft model
|
|
for (int i = 0; i < params.n_draft; ++i) {
|
|
common_batch_clear(batch);
|
|
|
|
common_sampler_sample(smpl, ctx, 0, true);
|
|
|
|
const auto * cur_p = common_sampler_get_candidates(smpl);
|
|
|
|
for (int k = 0; k < std::min(3, (int) cur_p->size); ++k) {
|
|
LOG_DBG(" - draft candidate %3d, pos %3d: %6d (%8.3f) '%s'\n",
|
|
k, i, cur_p->data[k].id, cur_p->data[k].p, common_token_to_piece(ctx, cur_p->data[k].id).c_str());
|
|
}
|
|
|
|
// add drafted token for each sequence
|
|
const llama_token id = cur_p->data[0].id;
|
|
|
|
// only collect very high-confidence draft tokens
|
|
if (cur_p->data[0].p < params.p_min) {
|
|
break;
|
|
}
|
|
|
|
common_sampler_accept(smpl, id, true);
|
|
|
|
result.push_back(id);
|
|
|
|
if (params.n_draft <= (int) result.size()) {
|
|
break;
|
|
}
|
|
|
|
common_batch_add(batch, id, n_past + i + 1, { 0 }, true);
|
|
|
|
// evaluate the drafted tokens on the draft model
|
|
llama_decode(ctx, batch);
|
|
|
|
prompt.push_back(id);
|
|
}
|
|
|
|
return result;
|
|
}
|