mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 11:24:35 +00:00
gptneox-main.cpp : add tensor data layout
This commit is contained in:
parent
2ae0e985b3
commit
4a1741aa2d
@ -381,6 +381,8 @@ bool gpt_neox_model_load(const std::string & fname, gpt_neox_model & model, gpt2
|
||||
if (keyidx != -1) { fprintf(stdout, "%s: model architecture = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); }
|
||||
keyidx = gguf_find_key(ggufctx, "general.file_type");
|
||||
if (keyidx != -1) { fprintf(stdout, "%s: model file type = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); }
|
||||
keyidx = gguf_find_key(ggufctx, "gptneox.tensor_data_layout");
|
||||
if (keyidx != -1) { fprintf(stdout, "%s: model data layout = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); }
|
||||
keyidx = gguf_find_key(ggufctx, "general.source.hugginface.repository");
|
||||
if (keyidx != -1) { fprintf(stdout, "%s: model source HF repo = %s\n", __func__, gguf_get_val_str(ggufctx, keyidx)); }
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user