mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-09 18:21:45 +00:00
speculative : fix batch sizes at initialization
ggml-ci
This commit is contained in:
parent
cda0e4b648
commit
47bb241cb1
@ -190,8 +190,8 @@ int main(int argc, char ** argv) {
|
||||
drafts[s].smpl = common_sampler_init(model_dft, params.sparams);
|
||||
}
|
||||
|
||||
llama_batch batch_dft = llama_batch_init(params.n_ctx, 0, 1);
|
||||
llama_batch batch_tgt = llama_batch_init(params.n_ctx, 0, n_seq_dft);
|
||||
llama_batch batch_dft = llama_batch_init(llama_n_ctx(ctx_dft), 0, 1);
|
||||
llama_batch batch_tgt = llama_batch_init(llama_n_ctx(ctx_tgt), 0, n_seq_dft);
|
||||
|
||||
const auto t_dec_start = ggml_time_us();
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user