mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-24 02:14:35 +00:00
Properly free llama_context on failure
This commit is contained in:
parent
481044d50c
commit
afd220d9c6
10
llama.cpp
10
llama.cpp
@ -1432,16 +1432,16 @@ struct llama_context * llama_init_from_file(
|
||||
if (!llama_model_load(path_model, *ctx, params.n_ctx, params.n_parts, type_memory,
|
||||
params.vocab_only)) {
|
||||
fprintf(stderr, "%s: failed to load model\n", __func__);
|
||||
delete ctx;
|
||||
llama_free(ctx);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
if (params.use_mlock) {
|
||||
char *err;
|
||||
if (!ggml_mlock(ctx->model.ctx, &err)) {
|
||||
fprintf(stderr, "%s\n", err);
|
||||
free(err);
|
||||
delete ctx;
|
||||
llama_free(ctx);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
@ -1464,7 +1464,9 @@ struct llama_context * llama_init_from_file(
|
||||
}
|
||||
|
||||
void llama_free(struct llama_context * ctx) {
|
||||
ggml_free(ctx->model.ctx);
|
||||
if (ctx->model.ctx) {
|
||||
ggml_free(ctx->model.ctx);
|
||||
}
|
||||
|
||||
delete ctx;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user