mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-28 12:24:35 +00:00
convert-llama-h5-to-gguf.py : n_layer --> n_block
This commit is contained in:
parent
489616e126
commit
e91a2224e4
@ -76,7 +76,7 @@ gguf_writer.add_name(last_dir)
|
|||||||
gguf_writer.add_architecture(llm_arch)
|
gguf_writer.add_architecture(llm_arch)
|
||||||
gguf_writer.add_context_length(llm_arch, hparams["max_position_embeddings"])
|
gguf_writer.add_context_length(llm_arch, hparams["max_position_embeddings"])
|
||||||
gguf_writer.add_embedding_length(llm_arch, hparams["hidden_size"])
|
gguf_writer.add_embedding_length(llm_arch, hparams["hidden_size"])
|
||||||
gguf_writer.add_layer_count(llm_arch, block_count)
|
gguf_writer.add_block_count(llm_arch, block_count)
|
||||||
gguf_writer.add_feed_forward_length(llm_arch, hparams["intermediate_size"])
|
gguf_writer.add_feed_forward_length(llm_arch, hparams["intermediate_size"])
|
||||||
gguf_writer.add_rope_dimension_count(llm_arch, hparams["hidden_size"] // hparams["num_attention_heads"])
|
gguf_writer.add_rope_dimension_count(llm_arch, hparams["hidden_size"] // hparams["num_attention_heads"])
|
||||||
gguf_writer.add_head_count(llm_arch, head_count)
|
gguf_writer.add_head_count(llm_arch, head_count)
|
||||||
|
Loading…
Reference in New Issue
Block a user