mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 20:04:35 +00:00
llama : fix llama_model_loader memory leak
This commit is contained in:
parent
dd9e2fc988
commit
81a2c2a6f4
@ -1083,6 +1083,15 @@ struct llama_model_loader {
|
|||||||
this->use_mmap = use_mmap;
|
this->use_mmap = use_mmap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
~llama_model_loader() {
|
||||||
|
if (ctx_gguf) {
|
||||||
|
gguf_free(ctx_gguf);
|
||||||
|
}
|
||||||
|
if (ctx_meta) {
|
||||||
|
ggml_free(ctx_meta);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const char * get_tensor_name(int i) const {
|
const char * get_tensor_name(int i) const {
|
||||||
return gguf_get_tensor_name(ctx_gguf, i);
|
return gguf_get_tensor_name(ctx_gguf, i);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user