mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-28 12:24:35 +00:00
minor : spacing
This commit is contained in:
parent
fc4c2a6fc3
commit
9c5fd6be14
@ -219,27 +219,22 @@ static bool parse_kv_override(const char * data, std::vector<llama_model_kv_over
|
|||||||
sep += 4;
|
sep += 4;
|
||||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
|
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
|
||||||
kvo.int_value = std::atol(sep);
|
kvo.int_value = std::atol(sep);
|
||||||
}
|
} else if (strncmp(sep, "float:", 6) == 0) {
|
||||||
else if (strncmp(sep, "float:", 6) == 0) {
|
|
||||||
sep += 6;
|
sep += 6;
|
||||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
|
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
|
||||||
kvo.float_value = std::atof(sep);
|
kvo.float_value = std::atof(sep);
|
||||||
}
|
} else if (strncmp(sep, "bool:", 5) == 0) {
|
||||||
else if (strncmp(sep, "bool:", 5) == 0) {
|
|
||||||
sep += 5;
|
sep += 5;
|
||||||
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
|
kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
|
||||||
if (std::strcmp(sep, "true") == 0) {
|
if (std::strcmp(sep, "true") == 0) {
|
||||||
kvo.bool_value = true;
|
kvo.bool_value = true;
|
||||||
}
|
} else if (std::strcmp(sep, "false") == 0) {
|
||||||
else if (std::strcmp(sep, "false") == 0) {
|
|
||||||
kvo.bool_value = false;
|
kvo.bool_value = false;
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
fprintf(stderr, "%s: invalid boolean value for KV override '%s'\n", __func__, data);
|
fprintf(stderr, "%s: invalid boolean value for KV override '%s'\n", __func__, data);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
fprintf(stderr, "%s: invalid type for KV override '%s'\n", __func__, data);
|
fprintf(stderr, "%s: invalid type for KV override '%s'\n", __func__, data);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -344,8 +339,7 @@ int main(int argc, char ** argv) {
|
|||||||
if (ftype_str == "COPY") {
|
if (ftype_str == "COPY") {
|
||||||
params.only_copy = true;
|
params.only_copy = true;
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
fname_out = argv[arg_idx];
|
fname_out = argv[arg_idx];
|
||||||
arg_idx++;
|
arg_idx++;
|
||||||
|
|
||||||
|
22
llama.cpp
22
llama.cpp
@ -12809,20 +12809,18 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
gguf_set_kv (ctx_out, ml.meta);
|
gguf_set_kv (ctx_out, ml.meta);
|
||||||
gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
|
gguf_set_val_u32(ctx_out, "general.quantization_version", GGML_QNT_VERSION);
|
||||||
gguf_set_val_u32(ctx_out, "general.file_type", ftype);
|
gguf_set_val_u32(ctx_out, "general.file_type", ftype);
|
||||||
|
|
||||||
if (params->kv_overrides) {
|
if (params->kv_overrides) {
|
||||||
const std::vector<llama_model_kv_override> & overrides = *(const std::vector<llama_model_kv_override> *)params->kv_overrides;
|
const std::vector<llama_model_kv_override> & overrides = *(const std::vector<llama_model_kv_override> *)params->kv_overrides;
|
||||||
for (auto & o : overrides) {
|
for (auto & o : overrides) {
|
||||||
if (o.key[0] == 0) break;
|
if (o.key[0] == 0) break;
|
||||||
if (o.tag == LLAMA_KV_OVERRIDE_TYPE_FLOAT) {
|
if (o.tag == LLAMA_KV_OVERRIDE_TYPE_FLOAT) {
|
||||||
gguf_set_val_f32(ctx_out, o.key, o.float_value);
|
gguf_set_val_f32(ctx_out, o.key, o.float_value);
|
||||||
}
|
} else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) {
|
||||||
else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_INT) {
|
|
||||||
gguf_set_val_i32(ctx_out, o.key, o.int_value);
|
gguf_set_val_i32(ctx_out, o.key, o.int_value);
|
||||||
}
|
} else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) {
|
||||||
else if (o.tag == LLAMA_KV_OVERRIDE_TYPE_BOOL) {
|
|
||||||
gguf_set_val_bool(ctx_out, o.key, o.bool_value);
|
gguf_set_val_bool(ctx_out, o.key, o.bool_value);
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
LLAMA_LOG_WARN("%s: unknown KV override type for key %s\n", __func__, o.key);
|
LLAMA_LOG_WARN("%s: unknown KV override type for key %s\n", __func__, o.key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -12836,17 +12834,13 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
// TODO: avoid hardcoded tensor names - use the TN_* constants
|
// TODO: avoid hardcoded tensor names - use the TN_* constants
|
||||||
if (name.find("attn_v.weight") != std::string::npos || name.find("attn_qkv.weight") != std::string::npos) {
|
if (name.find("attn_v.weight") != std::string::npos || name.find("attn_qkv.weight") != std::string::npos) {
|
||||||
++qs.n_attention_wv;
|
++qs.n_attention_wv;
|
||||||
}
|
} else if (name.find("ffn_down") != std::string::npos) {
|
||||||
else if (name.find("ffn_down") != std::string::npos) {
|
|
||||||
++qs.n_ffn_down;
|
++qs.n_ffn_down;
|
||||||
}
|
} else if (name.find("ffn_gate") != std::string::npos) {
|
||||||
else if (name.find("ffn_gate") != std::string::npos) {
|
|
||||||
++qs.n_ffn_gate;
|
++qs.n_ffn_gate;
|
||||||
}
|
} else if (name.find("ffn_up") != std::string::npos) {
|
||||||
else if (name.find("ffn_up") != std::string::npos) {
|
|
||||||
++qs.n_ffn_up;
|
++qs.n_ffn_up;
|
||||||
}
|
} else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) {
|
||||||
else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) {
|
|
||||||
qs.has_output = true;
|
qs.has_output = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user