mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
Merge branch 'master' into openelm
This commit is contained in:
commit
c6ac198424
11
.gitignore
vendored
11
.gitignore
vendored
@ -98,13 +98,14 @@ examples/server/*.mjs.hpp
|
|||||||
|
|
||||||
# Python
|
# Python
|
||||||
|
|
||||||
__pycache__
|
/.venv
|
||||||
.venv
|
__pycache__/
|
||||||
/Pipfile
|
*/poetry.lock
|
||||||
dist
|
|
||||||
poetry.lock
|
|
||||||
poetry.toml
|
poetry.toml
|
||||||
|
|
||||||
|
# Nix
|
||||||
|
/result
|
||||||
|
|
||||||
# Test binaries
|
# Test binaries
|
||||||
/tests/test-backend-ops
|
/tests/test-backend-ops
|
||||||
/tests/test-double-float
|
/tests/test-double-float
|
||||||
|
@ -156,7 +156,7 @@ install(FILES ${CMAKE_CURRENT_BINARY_DIR}/llama-config.cmake
|
|||||||
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/llama)
|
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/llama)
|
||||||
|
|
||||||
install(
|
install(
|
||||||
FILES convert-hf-to-gguf.py
|
FILES convert_hf_to_gguf.py
|
||||||
PERMISSIONS
|
PERMISSIONS
|
||||||
OWNER_READ
|
OWNER_READ
|
||||||
OWNER_WRITE
|
OWNER_WRITE
|
||||||
|
@ -688,7 +688,7 @@ function gg_run_embd_bge_small {
|
|||||||
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
||||||
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
||||||
|
|
||||||
python3 ../convert-hf-to-gguf.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf
|
python3 ../convert_hf_to_gguf.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf
|
||||||
|
|
||||||
model_f16="${path_models}/ggml-model-f16.gguf"
|
model_f16="${path_models}/ggml-model-f16.gguf"
|
||||||
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
||||||
|
@ -1,3 +1,3 @@
|
|||||||
-r ../../requirements/requirements-convert-legacy-llama.txt
|
-r ../../requirements/requirements-convert-legacy-llama.txt
|
||||||
pillow~=10.2.0
|
pillow~=10.2.0
|
||||||
torch~=2.1.1
|
torch~=2.2.1
|
||||||
|
1197
poetry.lock
generated
Normal file
1197
poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
44
pyproject.toml
Normal file
44
pyproject.toml
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
[tool.poetry]
|
||||||
|
name = "llama-cpp-scripts"
|
||||||
|
version = "0.0.0"
|
||||||
|
description = "Scripts that ship with llama.cpp"
|
||||||
|
authors = ["GGML <ggml@ggml.ai>"]
|
||||||
|
readme = "README.md"
|
||||||
|
homepage = "https://ggml.ai"
|
||||||
|
repository = "https://github.com/ggerganov/llama.cpp"
|
||||||
|
keywords = ["ggml", "gguf", "llama.cpp"]
|
||||||
|
packages = [{ include = "*.py", from = "." }]
|
||||||
|
classifiers = [
|
||||||
|
"Programming Language :: Python :: 3",
|
||||||
|
"License :: OSI Approved :: MIT License",
|
||||||
|
"Operating System :: OS Independent",
|
||||||
|
]
|
||||||
|
|
||||||
|
[tool.poetry.dependencies]
|
||||||
|
python = ">=3.9"
|
||||||
|
numpy = "^1.25.0"
|
||||||
|
sentencepiece = ">=0.1.98,<0.2.0"
|
||||||
|
transformers = ">=4.35.2,<5.0.0"
|
||||||
|
protobuf = ">=4.21.0,<5.0.0"
|
||||||
|
gguf = { path = "./gguf-py" }
|
||||||
|
torch = { version = "^2.2.0", source = "pytorch" }
|
||||||
|
|
||||||
|
[tool.poetry.dev-dependencies]
|
||||||
|
pytest = "^5.2"
|
||||||
|
|
||||||
|
|
||||||
|
# Force wheel + cpu
|
||||||
|
# For discussion and context see https://github.com/python-poetry/poetry#6409
|
||||||
|
[[tool.poetry.source]]
|
||||||
|
name = "pytorch"
|
||||||
|
url = "https://download.pytorch.org/whl/cpu"
|
||||||
|
priority = "explicit"
|
||||||
|
|
||||||
|
[build-system]
|
||||||
|
requires = ["poetry-core>=1.0.0"]
|
||||||
|
build-backend = "poetry.core.masonry.api"
|
||||||
|
|
||||||
|
[tool.poetry.scripts]
|
||||||
|
llama-convert-hf-to-gguf = "convert_hf_to_gguf:main"
|
||||||
|
llama-convert-llama-ggml-to-gguf = "convert_llama_ggml_to_gguf:main"
|
||||||
|
llama-ggml-vk-generate-shaders = "ggml_vk_generate_shaders:main"
|
@ -6,6 +6,6 @@
|
|||||||
|
|
||||||
-r ./requirements/requirements-convert-legacy-llama.txt
|
-r ./requirements/requirements-convert-legacy-llama.txt
|
||||||
|
|
||||||
-r ./requirements/requirements-convert-hf-to-gguf.txt
|
-r ./requirements/requirements-convert_hf_to_gguf.txt
|
||||||
-r ./requirements/requirements-convert-hf-to-gguf-update.txt
|
-r ./requirements/requirements-convert_hf_to_gguf_update.txt
|
||||||
-r ./requirements/requirements-convert-llama-ggml-to-gguf.txt
|
-r ./requirements/requirements-convert_llama_ggml_to_gguf.txt
|
||||||
|
@ -167,11 +167,11 @@ if (( do_cleanup )); then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
check_convert_script examples/convert-legacy-llama.py
|
check_convert_script examples/convert-legacy-llama.py
|
||||||
for py in convert-*.py; do
|
for py in convert_*.py; do
|
||||||
# skip convert-hf-to-gguf-update.py
|
# skip convert-hf-to-gguf-update.py
|
||||||
# TODO: the check is failing for some reason:
|
# TODO: the check is failing for some reason:
|
||||||
# https://github.com/ggerganov/llama.cpp/actions/runs/8875330981/job/24364557177?pr=6920
|
# https://github.com/ggerganov/llama.cpp/actions/runs/8875330981/job/24364557177?pr=6920
|
||||||
[[ $py == convert-hf-to-gguf-update.py ]] && continue
|
[[ $py == convert_hf_to_gguf_update.py ]] && continue
|
||||||
|
|
||||||
check_convert_script "$py"
|
check_convert_script "$py"
|
||||||
done
|
done
|
||||||
|
Loading…
Reference in New Issue
Block a user