mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 02:44:36 +00:00
llama : fix mlock with no-mmap with Metal (#5025)
This commit is contained in:
parent
2d5419d08a
commit
96d7f56d29
@ -1599,7 +1599,7 @@ struct llama_model {
|
||||
std::unique_ptr<llama_mmap> mapping;
|
||||
|
||||
// objects representing data potentially being locked in memory
|
||||
llama_mlock mlock_buf;
|
||||
std::vector<std::unique_ptr<llama_mlock>> mlock_bufs;
|
||||
llama_mlock mlock_mmap;
|
||||
|
||||
// for quantize-stats only
|
||||
@ -3815,8 +3815,10 @@ static bool llm_load_tensors(
|
||||
else {
|
||||
buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
|
||||
if (buf != nullptr && use_mlock && ggml_backend_buffer_is_host(buf)) {
|
||||
model.mlock_buf.init (ggml_backend_buffer_get_base(buf));
|
||||
model.mlock_buf.grow_to(ggml_backend_buffer_get_size(buf));
|
||||
model.mlock_bufs.emplace_back(new llama_mlock);
|
||||
auto & mlock_buf = model.mlock_bufs.back();
|
||||
mlock_buf->init (ggml_backend_buffer_get_base(buf));
|
||||
mlock_buf->grow_to(ggml_backend_buffer_get_size(buf));
|
||||
}
|
||||
}
|
||||
if (buf == nullptr) {
|
||||
|
Loading…
Reference in New Issue
Block a user