mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
minor : spacing
This commit is contained in:
parent
fc4c2a6fc3
commit
9c5fd6be14
@ -111,14 +111,14 @@ static void usage(const char * executable) {
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static void load_imatrix(const std::string& imatrix_file, std::unordered_map<std::string, std::vector<float>>& imatrix_data) {
|
||||
static void load_imatrix(const std::string & imatrix_file, std::unordered_map<std::string, std::vector<float>> & imatrix_data) {
|
||||
std::ifstream in(imatrix_file.c_str(), std::ios::binary);
|
||||
if (!in) {
|
||||
printf("%s: failed to open %s\n",__func__,imatrix_file.c_str());
|
||||
printf("%s: failed to open %s\n",__func__, imatrix_file.c_str());
|
||||
return;
|
||||
}
|
||||
int n_entries;
|
||||
in.read((char*)&n_entries, sizeof(n_entries));
|
||||
in.read((char *)&n_entries, sizeof(n_entries));
|
||||
if (in.fail() || n_entries < 1) {
|
||||
printf("%s: no data in file %s\n", __func__, imatrix_file.c_str());
|
||||
return;
|
||||
@ -128,25 +128,25 @@ static void load_imatrix(const std::string& imatrix_file, std::unordered_map<std
|
||||
std::vector<char> name_as_vec(len+1);
|
||||
in.read((char *)name_as_vec.data(), len);
|
||||
if (in.fail()) {
|
||||
printf("%s: failed reading name for entry %d from %s\n",__func__,i+1,imatrix_file.c_str());
|
||||
printf("%s: failed reading name for entry %d from %s\n", __func__, i+1, imatrix_file.c_str());
|
||||
return;
|
||||
}
|
||||
name_as_vec[len] = 0;
|
||||
std::string name{name_as_vec.data()};
|
||||
auto& e = imatrix_data[std::move(name)];
|
||||
auto & e = imatrix_data[std::move(name)];
|
||||
int ncall;
|
||||
in.read((char*)&ncall, sizeof(ncall));
|
||||
in.read((char *)&ncall, sizeof(ncall));
|
||||
int nval;
|
||||
in.read((char *)&nval, sizeof(nval));
|
||||
if (in.fail() || nval < 1) {
|
||||
printf("%s: failed reading number of values for entry %d\n",__func__,i);
|
||||
printf("%s: failed reading number of values for entry %d\n", __func__, i);
|
||||
imatrix_data = {};
|
||||
return;
|
||||
}
|
||||
e.resize(nval);
|
||||
in.read((char*)e.data(), nval*sizeof(float));
|
||||
in.read((char *)e.data(), nval*sizeof(float));
|
||||
if (in.fail()) {
|
||||
printf("%s: failed reading data for entry %d\n",__func__,i);
|
||||
printf("%s: failed reading data for entry %d\n", __func__, i);
|
||||
imatrix_data = {};
|
||||
return;
|
||||
}
|
||||
@ -154,13 +154,13 @@ static void load_imatrix(const std::string& imatrix_file, std::unordered_map<std
|
||||
for (auto& v : e) v /= ncall;
|
||||
}
|
||||
}
|
||||
printf("%s: loaded %d importance matrix entries from %s\n",__func__,int(imatrix_data.size()),imatrix_file.c_str());
|
||||
printf("%s: loaded %d importance matrix entries from %s\n", __func__, int(imatrix_data.size()), imatrix_file.c_str());
|
||||
}
|
||||
|
||||
static void prepare_imatrix(const std::string& imatrix_file,
|
||||
const std::vector<std::string>& included_weights,
|
||||
const std::vector<std::string>& excluded_weights,
|
||||
std::unordered_map<std::string, std::vector<float>>& imatrix_data) {
|
||||
static void prepare_imatrix(const std::string & imatrix_file,
|
||||
const std::vector<std::string> & included_weights,
|
||||
const std::vector<std::string> & excluded_weights,
|
||||
std::unordered_map<std::string, std::vector<float>> & imatrix_data) {
|
||||
if (!imatrix_file.empty()) {
|
||||
load_imatrix(imatrix_file, imatrix_data);
|
||||
}
|
||||
@ -205,7 +205,7 @@ static ggml_type parse_ggml_type(const char * arg) {
|
||||
return result;
|
||||
}
|
||||
|
||||
static bool parse_kv_override(const char * data, std::vector<llama_model_kv_override>& overrides) {
|
||||
static bool parse_kv_override(const char * data, std::vector<llama_model_kv_override> & overrides) {
|
||||
const char* sep = strchr(data, '=');
|
||||
if (sep == nullptr || sep - data >= 128) {
|
||||
fprintf(stderr, "%s: malformed KV override '%s'\n", __func__, data);
|
||||
@ -219,27 +219,22 @@ static bool parse_kv_override(const char * data, std::vector<llama_model_kv_over
|
||||
sep += 4;
|
||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
|
||||
kvo.int_value = std::atol(sep);
|
||||
}
|
||||
else if (strncmp(sep, "float:", 6) == 0) {
|
||||
} else if (strncmp(sep, "float:", 6) == 0) {
|
||||
sep += 6;
|
||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
|
||||
kvo.float_value = std::atof(sep);
|
||||
}
|
||||
else if (strncmp(sep, "bool:", 5) == 0) {
|
||||
} else if (strncmp(sep, "bool:", 5) == 0) {
|
||||
sep += 5;
|
||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
|
||||
if (std::strcmp(sep, "true") == 0) {
|
||||
kvo.bool_value = true;
|
||||
}
|
||||
else if (std::strcmp(sep, "false") == 0) {
|
||||
} else if (std::strcmp(sep, "false") == 0) {
|
||||
kvo.bool_value = false;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
fprintf(stderr, "%s: invalid boolean value for KV override '%s'\n", __func__, data);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
fprintf(stderr, "%s: invalid type for KV override '%s'\n", __func__, data);
|
||||
return false;
|
||||
}
|
||||
@ -344,8 +339,7 @@ int main(int argc, char ** argv) {
|
||||
if (ftype_str == "COPY") {
|
||||
params.only_copy = true;
|
||||
}
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
fname_out = argv[arg_idx];
|
||||
arg_idx++;
|
||||
|
||||
|
28
llama.cpp
28
llama.cpp
@ -12809,20 +12809,18 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||
gguf_set_kv (ctx_out, ml.meta);
|
||||
gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
|
||||
gguf_set_val_u32(ctx_out, "general.file_type", ftype);
|
||||
|
||||
if (params->kv_overrides) {
|
||||
const std::vector<llama_model_kv_override>& overrides = *(const std::vector<llama_model_kv_override>*)params->kv_overrides;
|
||||
for (auto& o : overrides) {
|
||||
const std::vector<llama_model_kv_override> & overrides = *(const std::vector<llama_model_kv_override> *)params->kv_overrides;
|
||||
for (auto & o : overrides) {
|
||||
if (o.key[0] == 0) break;
|
||||
if (o.tag == LLAMA_KV_OVERRIDE_TYPE_FLOAT) {
|
||||
gguf_set_val_f32(ctx_out, o.key, o.float_value);
|
||||
}
|
||||
else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) {
|
||||
} else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) {
|
||||
gguf_set_val_i32(ctx_out, o.key, o.int_value);
|
||||
}
|
||||
else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) {
|
||||
} else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) {
|
||||
gguf_set_val_bool(ctx_out, o.key, o.bool_value);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
LLAMA_LOG_WARN("%s: unknown KV override type for key %s\n", __func__, o.key);
|
||||
}
|
||||
}
|
||||
@ -12836,21 +12834,17 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||
// TODO: avoid hardcoded tensor names - use the TN_* constants
|
||||
if (name.find("attn_v.weight") != std::string::npos || name.find("attn_qkv.weight") != std::string::npos) {
|
||||
++qs.n_attention_wv;
|
||||
}
|
||||
else if (name.find("ffn_down") != std::string::npos) {
|
||||
} else if (name.find("ffn_down") != std::string::npos) {
|
||||
++qs.n_ffn_down;
|
||||
}
|
||||
else if (name.find("ffn_gate") != std::string::npos) {
|
||||
} else if (name.find("ffn_gate") != std::string::npos) {
|
||||
++qs.n_ffn_gate;
|
||||
}
|
||||
else if (name.find("ffn_up") != std::string::npos) {
|
||||
} else if (name.find("ffn_up") != std::string::npos) {
|
||||
++qs.n_ffn_up;
|
||||
}
|
||||
else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) {
|
||||
} else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) {
|
||||
qs.has_output = true;
|
||||
}
|
||||
}
|
||||
if (qs.n_attention_wv != qs.n_ffn_down || (uint32_t)qs.n_attention_wv != model.hparams.n_layer) {
|
||||
if (qs.n_attention_wv != qs.n_ffn_down || (uint32_t) qs.n_attention_wv != model.hparams.n_layer) {
|
||||
LLAMA_LOG_WARN("%s ============ Strange model: n_attention_wv = %d, n_ffn_down = %d, hparams.n_layer = %d\n",
|
||||
__func__, qs.n_attention_wv, qs.n_ffn_down, model.hparams.n_layer);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user