convert-llama-h5-to-gguf.py : add tensor data layout

This commit is contained in:
klosax 2023-08-15 19:54:33 +02:00 committed by GitHub
parent b6056c3db8
commit 66756c82af
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -97,6 +97,7 @@ gguf_writer.add_architecture(llm_arch)
gguf_writer.add_name(last_dir) gguf_writer.add_name(last_dir)
gguf_writer.add_file_type("All tensors F32" if ftype == 0 else "Most tensors F16, some F32") gguf_writer.add_file_type("All tensors F32" if ftype == 0 else "Most tensors F16, some F32")
gguf_writer.add_source_hf_repo(hf_repo) gguf_writer.add_source_hf_repo(hf_repo)
gguf_writer.add_tensor_data_layout(llm_arch, "Meta AI original pth")
gguf_writer.add_context_length(llm_arch, hparams["max_position_embeddings"]) gguf_writer.add_context_length(llm_arch, hparams["max_position_embeddings"])
gguf_writer.add_embedding_length(llm_arch, hparams["hidden_size"]) gguf_writer.add_embedding_length(llm_arch, hparams["hidden_size"])
gguf_writer.add_block_count(llm_arch, block_count) gguf_writer.add_block_count(llm_arch, block_count)