convert-llama-h5-to-gguf.py : n_layer --> n_block

This commit is contained in:
klosax 2023-08-13 00:02:44 +02:00 committed by GitHub
parent 489616e126
commit e91a2224e4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -76,7 +76,7 @@ gguf_writer.add_name(last_dir)
gguf_writer.add_architecture(llm_arch) gguf_writer.add_architecture(llm_arch)
gguf_writer.add_context_length(llm_arch, hparams["max_position_embeddings"]) gguf_writer.add_context_length(llm_arch, hparams["max_position_embeddings"])
gguf_writer.add_embedding_length(llm_arch, hparams["hidden_size"]) gguf_writer.add_embedding_length(llm_arch, hparams["hidden_size"])
gguf_writer.add_layer_count(llm_arch, block_count) gguf_writer.add_block_count(llm_arch, block_count)
gguf_writer.add_feed_forward_length(llm_arch, hparams["intermediate_size"]) gguf_writer.add_feed_forward_length(llm_arch, hparams["intermediate_size"])
gguf_writer.add_rope_dimension_count(llm_arch, hparams["hidden_size"] // hparams["num_attention_heads"]) gguf_writer.add_rope_dimension_count(llm_arch, hparams["hidden_size"] // hparams["num_attention_heads"])
gguf_writer.add_head_count(llm_arch, head_count) gguf_writer.add_head_count(llm_arch, head_count)