mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-24 10:24:35 +00:00
Fix crash for 65B model with pre-allocated memory (#485)
This commit is contained in:
parent
8520fc310e
commit
6f1ee4b640
@ -239,7 +239,7 @@ static bool kv_cache_init(
|
||||
const int n_mem = n_layer*n_ctx;
|
||||
const int n_elements = n_embd*n_mem;
|
||||
|
||||
cache.buf.resize(2*n_elements*ggml_type_size(wtype) + 2u*MB);
|
||||
cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB);
|
||||
|
||||
struct ggml_init_params params;
|
||||
params.mem_size = cache.buf.size();
|
||||
|
Loading…
Reference in New Issue
Block a user