mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 02:44:36 +00:00
llava-cli: fix base64 prompt (#7248)
This commit is contained in:
parent
1c570d8bee
commit
30e70334f7
@ -300,6 +300,19 @@ int main(int argc, char ** argv) {
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (prompt_contains_image(params.prompt)) {
|
||||||
|
auto ctx_llava = llava_init_context(¶ms, model);
|
||||||
|
|
||||||
|
auto image_embed = load_image(ctx_llava, ¶ms, "");
|
||||||
|
|
||||||
|
// process the prompt
|
||||||
|
process_prompt(ctx_llava, image_embed, ¶ms, params.prompt);
|
||||||
|
|
||||||
|
llama_print_timings(ctx_llava->ctx_llama);
|
||||||
|
llava_image_embed_free(image_embed);
|
||||||
|
ctx_llava->model = NULL;
|
||||||
|
llava_free(ctx_llava);
|
||||||
|
} else {
|
||||||
for (auto & image : params.image) {
|
for (auto & image : params.image) {
|
||||||
auto ctx_llava = llava_init_context(¶ms, model);
|
auto ctx_llava = llava_init_context(¶ms, model);
|
||||||
|
|
||||||
@ -317,6 +330,8 @@ int main(int argc, char ** argv) {
|
|||||||
ctx_llava->model = NULL;
|
ctx_llava->model = NULL;
|
||||||
llava_free(ctx_llava);
|
llava_free(ctx_llava);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
llama_free_model(model);
|
llama_free_model(model);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
Loading…
Reference in New Issue
Block a user