llama : fix comment for "output.weight" tensor

This commit is contained in:
Georgi Gerganov 2023-04-21 10:23:36 +03:00
parent 2510c1831f
commit d40fded93e
No known key found for this signature in database
GPG Key ID: 449E073F9DC10735

View File

@ -1618,8 +1618,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
// quantize only 2D tensors // quantize only 2D tensors
quantize &= (tensor.ne.size() == 2); quantize &= (tensor.ne.size() == 2);
// GG: uncomment this to keep the output layer in FP16 // uncomment this to keep the output layer in FP16
//if (tensor.name.rfind("output")) { //if (tensor.name == "output.weight") {
// quantize = false; // quantize = false;
//} //}