mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 02:44:36 +00:00
Fix crash for 65B model with pre-allocated memory (#485)
This commit is contained in:
parent
8520fc310e
commit
6f1ee4b640
@ -239,7 +239,7 @@ static bool kv_cache_init(
|
|||||||
const int n_mem = n_layer*n_ctx;
|
const int n_mem = n_layer*n_ctx;
|
||||||
const int n_elements = n_embd*n_mem;
|
const int n_elements = n_embd*n_mem;
|
||||||
|
|
||||||
cache.buf.resize(2*n_elements*ggml_type_size(wtype) + 2u*MB);
|
cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB);
|
||||||
|
|
||||||
struct ggml_init_params params;
|
struct ggml_init_params params;
|
||||||
params.mem_size = cache.buf.size();
|
params.mem_size = cache.buf.size();
|
||||||
|
Loading…
Reference in New Issue
Block a user