mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 03:14:35 +00:00
Fail test if model file is missing
This commit is contained in:
parent
4b1f70cb03
commit
32ebd525bf
@ -1,8 +1,20 @@
|
|||||||
#include "llama.h"
|
#include "llama.h"
|
||||||
|
|
||||||
|
#include <cstdio>
|
||||||
#include <cstdlib>
|
#include <cstdlib>
|
||||||
|
|
||||||
int main(void) {
|
int main(void) {
|
||||||
|
auto model_path = "models/7B/ggml-model-f16.gguf";
|
||||||
|
auto file = fopen(model_path, "r");
|
||||||
|
|
||||||
|
if (file == nullptr) {
|
||||||
|
fprintf(stderr, "no model at '%s' found\n", model_path);
|
||||||
|
return EXIT_FAILURE;
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "using '%s'\n", model_path);
|
||||||
|
fclose(file);
|
||||||
|
}
|
||||||
|
|
||||||
llama_backend_init(false);
|
llama_backend_init(false);
|
||||||
auto params = llama_model_params{};
|
auto params = llama_model_params{};
|
||||||
params.use_mmap = false;
|
params.use_mmap = false;
|
||||||
@ -10,7 +22,7 @@ int main(void) {
|
|||||||
(void) ctx;
|
(void) ctx;
|
||||||
return progress > 0.50;
|
return progress > 0.50;
|
||||||
};
|
};
|
||||||
auto * model = llama_load_model_from_file("models/7B/ggml-model-f16.gguf", params);
|
auto * model = llama_load_model_from_file(model_path, params);
|
||||||
llama_backend_free();
|
llama_backend_free();
|
||||||
return model == nullptr ? EXIT_SUCCESS : EXIT_FAILURE;
|
return model == nullptr ? EXIT_SUCCESS : EXIT_FAILURE;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user