mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 02:44:36 +00:00
fix missing parameters in llama_init_from_gpt_params
(#1293)
This commit is contained in:
parent
67c77799e0
commit
bf4b22ffe4
@ -414,6 +414,8 @@ struct llama_context * llama_init_from_gpt_params(const gpt_params & params) {
|
|||||||
lparams.f16_kv = params.memory_f16;
|
lparams.f16_kv = params.memory_f16;
|
||||||
lparams.use_mmap = params.use_mmap;
|
lparams.use_mmap = params.use_mmap;
|
||||||
lparams.use_mlock = params.use_mlock;
|
lparams.use_mlock = params.use_mlock;
|
||||||
|
lparams.logits_all = params.perplexity;
|
||||||
|
lparams.embedding = params.embedding;
|
||||||
|
|
||||||
llama_context * lctx = llama_init_from_file(params.model.c_str(), lparams);
|
llama_context * lctx = llama_init_from_file(params.model.c_str(), lparams);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user