Use params when loading models in llava-cli (#3976)

llava-cli was loading models with default params and ignoring settings
from the cli. This switches to a generic function to load the params
from the cli options.
This commit is contained in:
Matthew Tejo 2023-11-06 23:43:59 -08:00 committed by GitHub
parent 46876d2a2c
commit 54b4df8886
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -242,18 +242,16 @@ static struct llava_context * llava_init(gpt_params * params) {
llama_backend_init(params->numa); llama_backend_init(params->numa);
llama_model_params model_params = llama_model_default_params(); llama_model_params model_params = llama_model_params_from_gpt_params(*params);
llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params); llama_model * model = llama_load_model_from_file(params->model.c_str(), model_params);
if (model == NULL) { if (model == NULL) {
fprintf(stderr , "%s: error: unable to load model\n" , __func__); fprintf(stderr , "%s: error: unable to load model\n" , __func__);
return NULL; return NULL;
} }
llama_context_params ctx_params = llama_context_default_params(); llama_context_params ctx_params = llama_context_params_from_gpt_params(*params);
ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings ctx_params.n_ctx = params->n_ctx < 2048 ? 2048 : params->n_ctx; // we need a longer context size to process image embeddings
ctx_params.n_threads = params->n_threads;
ctx_params.n_threads_batch = params->n_threads_batch == -1 ? params->n_threads : params->n_threads_batch;
llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params); llama_context * ctx_llama = llama_new_context_with_model(model, ctx_params);