mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-14 06:49:54 +00:00
Init llama_context_params properly from CLI (#370)
This commit is contained in:
parent
56817b1f88
commit
928480ef5b
@ -1398,6 +1398,10 @@ struct llama_context * llama_init_from_file(
|
|||||||
|
|
||||||
llama_context * ctx = new llama_context;
|
llama_context * ctx = new llama_context;
|
||||||
|
|
||||||
|
if (params.seed <= 0) {
|
||||||
|
params.seed = time(NULL);
|
||||||
|
}
|
||||||
|
|
||||||
ctx->rng = std::mt19937(params.seed);
|
ctx->rng = std::mt19937(params.seed);
|
||||||
ctx->logits_all = params.logits_all;
|
ctx->logits_all = params.logits_all;
|
||||||
|
|
||||||
|
3
main.cpp
3
main.cpp
@ -194,6 +194,9 @@ int main(int argc, char ** argv) {
|
|||||||
{
|
{
|
||||||
auto lparams = llama_context_default_params();
|
auto lparams = llama_context_default_params();
|
||||||
|
|
||||||
|
lparams.n_ctx = params.n_ctx;
|
||||||
|
lparams.n_parts = params.n_parts;
|
||||||
|
lparams.seed = params.seed;
|
||||||
lparams.f16_kv = params.memory_f16;
|
lparams.f16_kv = params.memory_f16;
|
||||||
lparams.logits_all = params.perplexity;
|
lparams.logits_all = params.perplexity;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user