llama : fix qs.n_attention_wv for DeepSeek-V2 (#9156)

This commit is contained in:
compilade 2024-08-27 06:09:23 -04:00 committed by GitHub
parent a77feb5d71
commit 78eb487bb0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -16822,7 +16822,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
// TODO: avoid hardcoded tensor names - use the TN_* constants // TODO: avoid hardcoded tensor names - use the TN_* constants
if (name.find("attn_v.weight") != std::string::npos || if (name.find("attn_v.weight") != std::string::npos ||
name.find("attn_qkv.weight") != std::string::npos) { name.find("attn_qkv.weight") != std::string::npos ||
name.find("attn_kv_b.weight")!= std::string::npos) {
++qs.n_attention_wv; ++qs.n_attention_wv;
} else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) { } else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) {
qs.has_output = true; qs.has_output = true;