mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-29 04:44:34 +00:00
llama.cpp : print kv general.name
This commit is contained in:
parent
d9e6890a51
commit
306070c896
@ -1329,6 +1329,8 @@ static void llama_model_load_internal(
|
|||||||
|
|
||||||
auto & hparams = model.hparams;
|
auto & hparams = model.hparams;
|
||||||
|
|
||||||
|
std::string general_name = "n/a";
|
||||||
|
|
||||||
// read hparams
|
// read hparams
|
||||||
{
|
{
|
||||||
struct gguf_context * ctx = ml->ctx_gguf;
|
struct gguf_context * ctx = ml->ctx_gguf;
|
||||||
@ -1347,6 +1349,10 @@ static void llama_model_load_internal(
|
|||||||
} \
|
} \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// get general kv
|
||||||
|
GGUF_GET(general_name, gguf_get_val_str, GGUF_TYPE_STRING, false, "general.name");
|
||||||
|
|
||||||
|
// get hparams kv
|
||||||
GGUF_GET(hparams.n_vocab, gguf_get_arr_n, GGUF_TYPE_ARRAY, true, "tokenizer.ggml.tokens");
|
GGUF_GET(hparams.n_vocab, gguf_get_arr_n, GGUF_TYPE_ARRAY, true, "tokenizer.ggml.tokens");
|
||||||
GGUF_GET(hparams.n_ctx_train, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.context_length");
|
GGUF_GET(hparams.n_ctx_train, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.context_length");
|
||||||
GGUF_GET(hparams.n_embd, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.embedding_length");
|
GGUF_GET(hparams.n_embd, gguf_get_val_u32, GGUF_TYPE_UINT32, true, "llama.embedding_length");
|
||||||
@ -1359,6 +1365,7 @@ static void llama_model_load_internal(
|
|||||||
// n_head_kv is optional, default to n_head
|
// n_head_kv is optional, default to n_head
|
||||||
hparams.n_head_kv = hparams.n_head;
|
hparams.n_head_kv = hparams.n_head;
|
||||||
GGUF_GET(hparams.n_head_kv, gguf_get_val_u32, GGUF_TYPE_UINT32, false, "llama.attention.head_count_kv");
|
GGUF_GET(hparams.n_head_kv, gguf_get_val_u32, GGUF_TYPE_UINT32, false, "llama.attention.head_count_kv");
|
||||||
|
|
||||||
#undef GGUF_GET
|
#undef GGUF_GET
|
||||||
|
|
||||||
switch (hparams.n_layer) {
|
switch (hparams.n_layer) {
|
||||||
@ -1422,6 +1429,8 @@ static void llama_model_load_internal(
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
LLAMA_LOG_INFO("%s: general.name = %s\n", __func__, general_name.c_str());
|
||||||
|
|
||||||
LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml->file_version));
|
LLAMA_LOG_INFO("%s: format = %s\n", __func__, llama_file_version_name(ml->file_version));
|
||||||
LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
|
LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab);
|
||||||
LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train);
|
LLAMA_LOG_INFO("%s: n_ctx_train = %u\n", __func__, hparams.n_ctx_train);
|
||||||
|
Loading…
Reference in New Issue
Block a user