Properly free llama_context on failure

This commit is contained in:
Georgi Gerganov 2023-03-24 17:21:01 +02:00
parent 481044d50c
commit afd220d9c6
No known key found for this signature in database
GPG Key ID: 449E073F9DC10735

View File

@ -1432,7 +1432,7 @@ struct llama_context * llama_init_from_file(
if (!llama_model_load(path_model, *ctx, params.n_ctx, params.n_parts, type_memory,
params.vocab_only)) {
fprintf(stderr, "%s: failed to load model\n", __func__);
delete ctx;
llama_free(ctx);
return nullptr;
}
@ -1441,7 +1441,7 @@ struct llama_context * llama_init_from_file(
if (!ggml_mlock(ctx->model.ctx, &err)) {
fprintf(stderr, "%s\n", err);
free(err);
delete ctx;
llama_free(ctx);
return nullptr;
}
}
@ -1464,7 +1464,9 @@ struct llama_context * llama_init_from_file(
}
void llama_free(struct llama_context * ctx) {
ggml_free(ctx->model.ctx);
if (ctx->model.ctx) {
ggml_free(ctx->model.ctx);
}
delete ctx;
}