2024-11-15 06:20:28 +00:00
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include "llama.h"
|
2024-11-21 19:27:14 +00:00
|
|
|
#include "common.h"
|
2024-11-15 06:20:28 +00:00
|
|
|
|
|
|
|
struct common_speculative;
|
|
|
|
|
|
|
|
struct common_speculative_params {
|
|
|
|
int n_draft = 16;
|
|
|
|
int n_min = 5; // do not add drafts smaller than this, TODO: leave this to user?
|
2024-11-21 19:27:14 +00:00
|
|
|
int n_reuse = 256;
|
|
|
|
|
|
|
|
float p_min = 0.9f;
|
2024-11-15 06:20:28 +00:00
|
|
|
|
|
|
|
struct llama_model * model_dft = nullptr;
|
|
|
|
|
|
|
|
struct llama_context * ctx_dft = nullptr;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct common_speculative * common_speculative_init(struct common_speculative_params params);
|
|
|
|
|
|
|
|
void common_speculative_free(struct common_speculative * spec);
|
|
|
|
|
2024-11-17 16:55:27 +00:00
|
|
|
// sample up to n_draft tokens and add them to the batch using the draft model
|
|
|
|
//
|
2024-11-15 06:20:28 +00:00
|
|
|
void common_speculative_add_draft(
|
|
|
|
struct common_speculative * spec,
|
|
|
|
struct llama_batch & batch_tgt,
|
2024-11-21 19:27:14 +00:00
|
|
|
const llama_tokens & prompt,
|
2024-11-15 06:20:28 +00:00
|
|
|
llama_token id_last,
|
2024-11-21 19:27:14 +00:00
|
|
|
llama_token n_past_tgt);
|