From c71bfd736ee99a56e697697b39240f2ee06ed26d Mon Sep 17 00:00:00 2001 From: slaren Date: Thu, 18 Apr 2024 09:04:47 +0200 Subject: [PATCH] llama : fix compatibility with old 2 expert models (#6735) --- llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index f4f4063cf..8c1446296 100644 --- a/llama.cpp +++ b/llama.cpp @@ -4592,7 +4592,7 @@ static bool llm_load_tensors( size_t ctx_size = ggml_tensor_overhead()*(ml.n_tensors + 1); // +1 for models where tok_embd is duplicated as output // for moe merged tensors - ctx_size += ggml_tensor_overhead()*hparams.n_expert*n_layer; + ctx_size += ggml_tensor_overhead()*n_layer*3; std::map ctx_map; for (auto & it : buft_layer_count) {