mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 11:54:35 +00:00
34b0a08207
* gguf-py: Refactor and add file reading support * Replay changes from #3871 Credit to @cebtenzzre for that pull * Various type annotation fixes. * sort imports with isort (again) * Fix missing return statement in add_tensor * style cleanup with flake8 * fix NamedTuple and Enum usage * Fix an issue with state init in GGUFReader Move examples to an examples/ directory Clean up examples Add an example of modifying keys in a GGUF file Update documentation with info on examples Try to support people importing gguf/gguf.py directly * Damagage is not a word. * Clean up gguf-py/examples/modify_gguf.py whitespace Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> * Update gguf-py/examples/modify_gguf.py formatting Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> * Update gguf-py/gguf/gguf_reader.py type hint Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> * Make examples executable, formatting changes * Add more information to GGUFReader and examples comments * Include a gguf Python package version bump * Add convert-gguf-endian.py script * cleanup * gguf-py : bump minor version * Reorganize scripts * Make GGUFReader endian detection less arbitrary * Add JSON dumping support to gguf-dump.py Which I kind of regret now * A few for gguf-dump.py cleanups * Murder accidental tuple in gguf-py/scripts/gguf-dump.py Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> * cleanup * constants : remove unneeded type annotations * fix python 3.8 compat * Set up gguf- scripts in pyproject.toml * And include scripts/__init__.py, derp * convert.py: We can't currently support Q8_0 on big endian. * gguf-py: SpecialVocab: Always try available sources for special token ids gguf-py: SpecialVocab: Try to load merges from merges.txt if not in tokenizer.json gguf-py: SpecialVocab: Add 'add_bos_token' type bools to GGUF metadata u * cleanup * Promote add_X_token to GGUF metadata for BOS and EOS --------- Co-authored-by: Jared Van Bortel <jared@nomic.ai> Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com>
41 lines
1.1 KiB
Python
Executable File
41 lines
1.1 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
import numpy as np
|
|
|
|
# Necessary to load the local gguf package
|
|
sys.path.insert(0, str(Path(__file__).parent.parent))
|
|
|
|
from gguf import GGUFWriter # noqa: E402
|
|
|
|
|
|
# Example usage:
|
|
def writer_example() -> None:
|
|
# Example usage with a file
|
|
gguf_writer = GGUFWriter("example.gguf", "llama")
|
|
|
|
gguf_writer.add_architecture()
|
|
gguf_writer.add_block_count(12)
|
|
gguf_writer.add_uint32("answer", 42) # Write a 32-bit integer
|
|
gguf_writer.add_float32("answer_in_float", 42.0) # Write a 32-bit float
|
|
gguf_writer.add_custom_alignment(64)
|
|
|
|
tensor1 = np.ones((32,), dtype=np.float32) * 100.0
|
|
tensor2 = np.ones((64,), dtype=np.float32) * 101.0
|
|
tensor3 = np.ones((96,), dtype=np.float32) * 102.0
|
|
|
|
gguf_writer.add_tensor("tensor1", tensor1)
|
|
gguf_writer.add_tensor("tensor2", tensor2)
|
|
gguf_writer.add_tensor("tensor3", tensor3)
|
|
|
|
gguf_writer.write_header_to_file()
|
|
gguf_writer.write_kv_data_to_file()
|
|
gguf_writer.write_tensors_to_file()
|
|
|
|
gguf_writer.close()
|
|
|
|
|
|
if __name__ == '__main__':
|
|
writer_example()
|