mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-11 19:21:46 +00:00
llama : fix non-quantization of expert gating tensors (#5754)
This reverts a single line from #5475
This commit is contained in:
parent
177628bfd8
commit
adcb12a9ba
@ -11162,7 +11162,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||
quantize &= !params->only_copy;
|
||||
|
||||
// do not quantize expert gating tensors
|
||||
quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_FFN_GATE_INP, "weight");
|
||||
// NOTE: can't use LLM_TN here because the layer number is not known
|
||||
quantize &= name.find("ffn_gate_inp.weight") == std::string::npos;
|
||||
|
||||
// do not quantize positional embeddings and token types (BERT)
|
||||
quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD, "weight");
|
||||
|
Loading…
Reference in New Issue
Block a user