mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 11:24:35 +00:00
Merge branch 'master' into compilade/fix-mpt-pretok
This commit is contained in:
commit
ac0f33c920
@ -17,19 +17,18 @@
|
|||||||
rocmPackages,
|
rocmPackages,
|
||||||
vulkan-headers,
|
vulkan-headers,
|
||||||
vulkan-loader,
|
vulkan-loader,
|
||||||
clblast,
|
curl,
|
||||||
useBlas ? builtins.all (x: !x) [
|
useBlas ? builtins.all (x: !x) [
|
||||||
useCuda
|
useCuda
|
||||||
useMetalKit
|
useMetalKit
|
||||||
useOpenCL
|
|
||||||
useRocm
|
useRocm
|
||||||
useVulkan
|
useVulkan
|
||||||
] && blas.meta.available,
|
] && blas.meta.available,
|
||||||
useCuda ? config.cudaSupport,
|
useCuda ? config.cudaSupport,
|
||||||
useMetalKit ? stdenv.isAarch64 && stdenv.isDarwin && !useOpenCL,
|
useMetalKit ? stdenv.isAarch64 && stdenv.isDarwin,
|
||||||
useMpi ? false, # Increases the runtime closure size by ~700M
|
useMpi ? false, # Increases the runtime closure size by ~700M
|
||||||
useOpenCL ? false,
|
|
||||||
useRocm ? config.rocmSupport,
|
useRocm ? config.rocmSupport,
|
||||||
|
enableCurl ? true,
|
||||||
useVulkan ? false,
|
useVulkan ? false,
|
||||||
llamaVersion ? "0.0.0", # Arbitrary version, substituted by the flake
|
llamaVersion ? "0.0.0", # Arbitrary version, substituted by the flake
|
||||||
|
|
||||||
@ -56,7 +55,6 @@ let
|
|||||||
++ lib.optionals useCuda [ "CUDA" ]
|
++ lib.optionals useCuda [ "CUDA" ]
|
||||||
++ lib.optionals useMetalKit [ "MetalKit" ]
|
++ lib.optionals useMetalKit [ "MetalKit" ]
|
||||||
++ lib.optionals useMpi [ "MPI" ]
|
++ lib.optionals useMpi [ "MPI" ]
|
||||||
++ lib.optionals useOpenCL [ "OpenCL" ]
|
|
||||||
++ lib.optionals useRocm [ "ROCm" ]
|
++ lib.optionals useRocm [ "ROCm" ]
|
||||||
++ lib.optionals useVulkan [ "Vulkan" ];
|
++ lib.optionals useVulkan [ "Vulkan" ];
|
||||||
|
|
||||||
@ -198,19 +196,19 @@ effectiveStdenv.mkDerivation (
|
|||||||
optionals effectiveStdenv.isDarwin darwinBuildInputs
|
optionals effectiveStdenv.isDarwin darwinBuildInputs
|
||||||
++ optionals useCuda cudaBuildInputs
|
++ optionals useCuda cudaBuildInputs
|
||||||
++ optionals useMpi [ mpi ]
|
++ optionals useMpi [ mpi ]
|
||||||
++ optionals useOpenCL [ clblast ]
|
|
||||||
++ optionals useRocm rocmBuildInputs
|
++ optionals useRocm rocmBuildInputs
|
||||||
++ optionals useBlas [ blas ]
|
++ optionals useBlas [ blas ]
|
||||||
++ optionals useVulkan vulkanBuildInputs;
|
++ optionals useVulkan vulkanBuildInputs
|
||||||
|
++ optionals enableCurl [ curl ];
|
||||||
|
|
||||||
cmakeFlags =
|
cmakeFlags =
|
||||||
[
|
[
|
||||||
(cmakeBool "LLAMA_BUILD_SERVER" true)
|
(cmakeBool "LLAMA_BUILD_SERVER" true)
|
||||||
(cmakeBool "BUILD_SHARED_LIBS" (!enableStatic))
|
(cmakeBool "BUILD_SHARED_LIBS" (!enableStatic))
|
||||||
(cmakeBool "CMAKE_SKIP_BUILD_RPATH" true)
|
(cmakeBool "CMAKE_SKIP_BUILD_RPATH" true)
|
||||||
|
(cmakeBool "LLAMA_CURL" enableCurl)
|
||||||
(cmakeBool "GGML_NATIVE" false)
|
(cmakeBool "GGML_NATIVE" false)
|
||||||
(cmakeBool "GGML_BLAS" useBlas)
|
(cmakeBool "GGML_BLAS" useBlas)
|
||||||
(cmakeBool "GGML_CLBLAST" useOpenCL)
|
|
||||||
(cmakeBool "GGML_CUDA" useCuda)
|
(cmakeBool "GGML_CUDA" useCuda)
|
||||||
(cmakeBool "GGML_HIPBLAS" useRocm)
|
(cmakeBool "GGML_HIPBLAS" useRocm)
|
||||||
(cmakeBool "GGML_METAL" useMetalKit)
|
(cmakeBool "GGML_METAL" useMetalKit)
|
||||||
@ -254,7 +252,6 @@ effectiveStdenv.mkDerivation (
|
|||||||
useCuda
|
useCuda
|
||||||
useMetalKit
|
useMetalKit
|
||||||
useMpi
|
useMpi
|
||||||
useOpenCL
|
|
||||||
useRocm
|
useRocm
|
||||||
useVulkan
|
useVulkan
|
||||||
;
|
;
|
||||||
@ -281,7 +278,7 @@ effectiveStdenv.mkDerivation (
|
|||||||
# Configurations we don't want even the CI to evaluate. Results in the
|
# Configurations we don't want even the CI to evaluate. Results in the
|
||||||
# "unsupported platform" messages. This is mostly a no-op, because
|
# "unsupported platform" messages. This is mostly a no-op, because
|
||||||
# cudaPackages would've refused to evaluate anyway.
|
# cudaPackages would've refused to evaluate anyway.
|
||||||
badPlatforms = optionals (useCuda || useOpenCL) lib.platforms.darwin;
|
badPlatforms = optionals useCuda lib.platforms.darwin;
|
||||||
|
|
||||||
# Configurations that are known to result in build failures. Can be
|
# Configurations that are known to result in build failures. Can be
|
||||||
# overridden by importing Nixpkgs with `allowBroken = true`.
|
# overridden by importing Nixpkgs with `allowBroken = true`.
|
||||||
|
2
.github/ISSUE_TEMPLATE/config.yml
vendored
2
.github/ISSUE_TEMPLATE/config.yml
vendored
@ -9,5 +9,3 @@ contact_links:
|
|||||||
- name: Want to contribute?
|
- name: Want to contribute?
|
||||||
url: https://github.com/ggerganov/llama.cpp/wiki/contribute
|
url: https://github.com/ggerganov/llama.cpp/wiki/contribute
|
||||||
about: Head to the contribution guide page of the wiki for areas you can help with
|
about: Head to the contribution guide page of the wiki for areas you can help with
|
||||||
|
|
||||||
|
|
||||||
|
12
.gitignore
vendored
12
.gitignore
vendored
@ -47,6 +47,7 @@ build*
|
|||||||
!build-info.cpp.in
|
!build-info.cpp.in
|
||||||
!build-info.sh
|
!build-info.sh
|
||||||
!build.zig
|
!build.zig
|
||||||
|
!docs/build.md
|
||||||
/libllama.so
|
/libllama.so
|
||||||
/llama-*
|
/llama-*
|
||||||
android-ndk-*
|
android-ndk-*
|
||||||
@ -98,13 +99,14 @@ examples/server/*.mjs.hpp
|
|||||||
|
|
||||||
# Python
|
# Python
|
||||||
|
|
||||||
__pycache__
|
/.venv
|
||||||
.venv
|
__pycache__/
|
||||||
/Pipfile
|
*/poetry.lock
|
||||||
dist
|
|
||||||
poetry.lock
|
|
||||||
poetry.toml
|
poetry.toml
|
||||||
|
|
||||||
|
# Nix
|
||||||
|
/result
|
||||||
|
|
||||||
# Test binaries
|
# Test binaries
|
||||||
/tests/test-backend-ops
|
/tests/test-backend-ops
|
||||||
/tests/test-double-float
|
/tests/test-double-float
|
||||||
|
@ -42,6 +42,10 @@ endif()
|
|||||||
|
|
||||||
option(BUILD_SHARED_LIBS "build shared libraries" ${BUILD_SHARED_LIBS_DEFAULT})
|
option(BUILD_SHARED_LIBS "build shared libraries" ${BUILD_SHARED_LIBS_DEFAULT})
|
||||||
|
|
||||||
|
if (WIN32)
|
||||||
|
add_compile_definitions(_CRT_SECURE_NO_WARNINGS)
|
||||||
|
endif()
|
||||||
|
|
||||||
#
|
#
|
||||||
# option list
|
# option list
|
||||||
#
|
#
|
||||||
@ -152,7 +156,7 @@ install(FILES ${CMAKE_CURRENT_BINARY_DIR}/llama-config.cmake
|
|||||||
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/llama)
|
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/llama)
|
||||||
|
|
||||||
install(
|
install(
|
||||||
FILES convert-hf-to-gguf.py
|
FILES convert_hf_to_gguf.py
|
||||||
PERMISSIONS
|
PERMISSIONS
|
||||||
OWNER_READ
|
OWNER_READ
|
||||||
OWNER_WRITE
|
OWNER_WRITE
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
"cacheVariables": {
|
"cacheVariables": {
|
||||||
"CMAKE_EXPORT_COMPILE_COMMANDS": "ON",
|
"CMAKE_EXPORT_COMPILE_COMMANDS": "ON",
|
||||||
"CMAKE_CXX_COMPILER": "icx",
|
"CMAKE_CXX_COMPILER": "icx",
|
||||||
|
"CMAKE_C_COMPILER": "cl",
|
||||||
"GGML_SYCL": "ON",
|
"GGML_SYCL": "ON",
|
||||||
"CMAKE_INSTALL_RPATH": "$ORIGIN;$ORIGIN/.."
|
"CMAKE_INSTALL_RPATH": "$ORIGIN;$ORIGIN/.."
|
||||||
}
|
}
|
||||||
|
@ -1,14 +1,24 @@
|
|||||||
# Contributing Guidelines
|
# Pull requests
|
||||||
|
|
||||||
## Checklist
|
- Always squash-merge the PR before merging
|
||||||
|
- Use the following format for your final commit: `<module> : <commit title> (#<issue_number>)`. For example: `utils : fix typo in utils.py (#1234)`
|
||||||
|
- Test your changes:
|
||||||
|
- Using the commands in the [`tests`](tests) folder. For instance, running the `./tests/test-backend-ops` command tests different backend implementations of the GGML library
|
||||||
|
- Execute [the full CI locally on your machine](ci/README.md) before publishing
|
||||||
|
- If the pull request contains only documentation changes (e.g., updating READMEs, adding new wiki pages), please add `[no ci]` to the commit title. This will skip unnecessary CI checks and help reduce build times
|
||||||
|
- Please rate the complexity of your PR (i.e. `Review Complexity : Low`, `Review Complexity : Medium`, `Review Complexity : High`). This makes it easier for maintainers to triage the PRs.
|
||||||
|
- The PR template has a series of review complexity checkboxes `[ ]` that [you can mark as](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/about-task-lists) `[X]` for your conveience
|
||||||
|
|
||||||
* Make sure your PR follows the [coding guidelines](https://github.com/ggerganov/llama.cpp/blob/master/README.md#coding-guidelines)
|
# Coding guidelines
|
||||||
* Test your changes using the commands in the [`tests`](tests) folder. For instance, running the `./tests/test-backend-ops` command tests different backend implementations of the GGML library
|
|
||||||
* Execute [the full CI locally on your machine](ci/README.md) before publishing
|
|
||||||
|
|
||||||
## PR formatting
|
- Avoid adding third-party dependencies, extra files, extra headers, etc.
|
||||||
|
- Always consider cross-compatibility with other operating systems and architectures
|
||||||
|
- Avoid fancy looking modern STL constructs, use basic `for` loops, avoid templates, keep it simple
|
||||||
|
- There are no strict rules for the code style, but try to follow the patterns in the code (indentation, spaces, etc.). Vertical alignment makes things more readable and easier to batch edit
|
||||||
|
- Clean-up any trailing whitespaces, use 4 spaces for indentation, brackets on the same line, `void * ptr`, `int & a`
|
||||||
|
- Naming usually optimizes for common prefix (see https://github.com/ggerganov/ggml/pull/302#discussion_r1243240963)
|
||||||
|
- Tensors store data in row-major order. We refer to dimension 0 as columns, 1 as rows, 2 as matrices
|
||||||
|
- Matrix multiplication is unconventional: [`C = ggml_mul_mat(ctx, A, B)`](https://github.com/ggerganov/llama.cpp/blob/880e352277fc017df4d5794f0c21c44e1eae2b84/ggml.h#L1058-L1064) means $C^T = A B^T \Leftrightarrow C = B A^T.$
|
||||||
|
|
||||||
|
![matmul](media/matmul.png)
|
||||||
|
|
||||||
* Please rate the complexity of your PR (i.e. `Review Complexity : Low`, `Review Complexity : Medium`, `Review Complexity : High`). This makes it easier for maintainers to triage the PRs.
|
|
||||||
- The PR template has a series of review complexity checkboxes `[ ]` that you can mark as `[X]` for your conveience. Refer to [About task lists](https://docs.github.com/en/get-started/writing-on-github/working-with-advanced-formatting/about-task-lists) for more information.
|
|
||||||
* If the pull request only contains documentation changes (e.g., updating READMEs, adding new wiki pages), please add `[no ci]` to the commit title. This will skip unnecessary CI checks and help reduce build times.
|
|
||||||
* When squashing multiple commits on merge, use the following format for your commit title: `<module> : <commit title> (#<issue_number>)`. For example: `utils : Fix typo in utils.py (#1234)`
|
|
||||||
|
24
Makefile
24
Makefile
@ -14,6 +14,7 @@ BUILD_TARGETS = \
|
|||||||
llama-finetune \
|
llama-finetune \
|
||||||
llama-gbnf-validator \
|
llama-gbnf-validator \
|
||||||
llama-gguf \
|
llama-gguf \
|
||||||
|
llama-gguf-hash \
|
||||||
llama-gguf-split \
|
llama-gguf-split \
|
||||||
llama-gritlm \
|
llama-gritlm \
|
||||||
llama-imatrix \
|
llama-imatrix \
|
||||||
@ -62,6 +63,11 @@ TEST_TARGETS = \
|
|||||||
tests/test-tokenizer-1-bpe \
|
tests/test-tokenizer-1-bpe \
|
||||||
tests/test-tokenizer-1-spm
|
tests/test-tokenizer-1-spm
|
||||||
|
|
||||||
|
# Legacy build targets that were renamed in #7809, but should still be removed when the project is cleaned
|
||||||
|
LEGACY_TARGETS = main quantize quantize-stats perplexity imatrix embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \
|
||||||
|
simple batched batched-bench save-load-state server gguf gguf-split eval-callback llama-bench libllava.a llava-cli baby-llama \
|
||||||
|
retrieval speculative infill tokenize benchmark-matmult parallel finetune export-lora lookahead lookup passkey gritlm
|
||||||
|
|
||||||
# Deprecation aliases
|
# Deprecation aliases
|
||||||
ifdef LLAMA_CUBLAS
|
ifdef LLAMA_CUBLAS
|
||||||
$(error LLAMA_CUBLAS is removed. Use GGML_CUDA instead.)
|
$(error LLAMA_CUBLAS is removed. Use GGML_CUDA instead.)
|
||||||
@ -1086,6 +1092,7 @@ clean:
|
|||||||
rm -vrf ggml/src/ggml-cuda/template-instances/*.o
|
rm -vrf ggml/src/ggml-cuda/template-instances/*.o
|
||||||
rm -rvf $(BUILD_TARGETS)
|
rm -rvf $(BUILD_TARGETS)
|
||||||
rm -rvf $(TEST_TARGETS)
|
rm -rvf $(TEST_TARGETS)
|
||||||
|
rm -rvf $(LEGACY_TARGETS)
|
||||||
find examples pocs -type f -name "*.o" -delete
|
find examples pocs -type f -name "*.o" -delete
|
||||||
|
|
||||||
#
|
#
|
||||||
@ -1172,6 +1179,23 @@ llama-gguf: examples/gguf/gguf.cpp \
|
|||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
|
examples/gguf-hash/deps/sha1/sha1.o: \
|
||||||
|
examples/gguf-hash/deps/sha1/sha1.c
|
||||||
|
$(CC) $(CFLAGS) -Iexamples/gguf-hash/deps -c $< -o $@
|
||||||
|
|
||||||
|
examples/gguf-hash/deps/xxhash/xxhash.o: \
|
||||||
|
examples/gguf-hash/deps/xxhash/xxhash.c
|
||||||
|
$(CC) $(CFLAGS) -Iexamples/gguf-hash/deps -c $< -o $@
|
||||||
|
|
||||||
|
examples/gguf-hash/deps/sha256/sha256.o: \
|
||||||
|
examples/gguf-hash/deps/sha256/sha256.c
|
||||||
|
$(CC) $(CFLAGS) -Iexamples/gguf-hash/deps -c $< -o $@
|
||||||
|
|
||||||
|
llama-gguf-hash: examples/gguf-hash/gguf-hash.cpp examples/gguf-hash/deps/sha1/sha1.o examples/gguf-hash/deps/xxhash/xxhash.o examples/gguf-hash/deps/sha256/sha256.o\
|
||||||
|
$(OBJ_ALL)
|
||||||
|
$(CXX) $(CXXFLAGS) -Iexamples/gguf-hash/deps -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
llama-gguf-split: examples/gguf-split/gguf-split.cpp \
|
llama-gguf-split: examples/gguf-split/gguf-split.cpp \
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
|
769
README.md
769
README.md
@ -13,7 +13,7 @@ Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others)
|
|||||||
> [!IMPORTANT]
|
> [!IMPORTANT]
|
||||||
[2024 Jun 12] Binaries have been renamed w/ a `llama-` prefix. `main` is now `llama-cli`, `server` is `llama-server`, etc (https://github.com/ggerganov/llama.cpp/pull/7809)
|
[2024 Jun 12] Binaries have been renamed w/ a `llama-` prefix. `main` is now `llama-cli`, `server` is `llama-server`, etc (https://github.com/ggerganov/llama.cpp/pull/7809)
|
||||||
|
|
||||||
### Recent API changes
|
## Recent API changes
|
||||||
|
|
||||||
- [2024 Jun 26] The source code and CMake build scripts have been restructured https://github.com/ggerganov/llama.cpp/pull/8006
|
- [2024 Jun 26] The source code and CMake build scripts have been restructured https://github.com/ggerganov/llama.cpp/pull/8006
|
||||||
- [2024 Apr 21] `llama_token_to_piece` can now optionally render special tokens https://github.com/ggerganov/llama.cpp/pull/6807
|
- [2024 Apr 21] `llama_token_to_piece` can now optionally render special tokens https://github.com/ggerganov/llama.cpp/pull/6807
|
||||||
@ -24,9 +24,9 @@ Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others)
|
|||||||
- [2024 Mar 4] Embeddings API updated https://github.com/ggerganov/llama.cpp/pull/5796
|
- [2024 Mar 4] Embeddings API updated https://github.com/ggerganov/llama.cpp/pull/5796
|
||||||
- [2024 Mar 3] `struct llama_context_params` https://github.com/ggerganov/llama.cpp/pull/5849
|
- [2024 Mar 3] `struct llama_context_params` https://github.com/ggerganov/llama.cpp/pull/5849
|
||||||
|
|
||||||
### Hot topics
|
## Hot topics
|
||||||
|
|
||||||
- **`convert.py` has been deprecated and moved to `examples/convert-legacy-llama.py`, please use `convert-hf-to-gguf.py`** https://github.com/ggerganov/llama.cpp/pull/7430
|
- **`convert.py` has been deprecated and moved to `examples/convert_legacy_llama.py`, please use `convert_hf_to_gguf.py`** https://github.com/ggerganov/llama.cpp/pull/7430
|
||||||
- Initial Flash-Attention support: https://github.com/ggerganov/llama.cpp/pull/5021
|
- Initial Flash-Attention support: https://github.com/ggerganov/llama.cpp/pull/5021
|
||||||
- BPE pre-tokenization support has been added: https://github.com/ggerganov/llama.cpp/pull/6920
|
- BPE pre-tokenization support has been added: https://github.com/ggerganov/llama.cpp/pull/6920
|
||||||
- MoE memory layout has been updated - reconvert models for `mmap` support and regenerate `imatrix` https://github.com/ggerganov/llama.cpp/pull/6387
|
- MoE memory layout has been updated - reconvert models for `mmap` support and regenerate `imatrix` https://github.com/ggerganov/llama.cpp/pull/6387
|
||||||
@ -39,37 +39,6 @@ Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others)
|
|||||||
|
|
||||||
----
|
----
|
||||||
|
|
||||||
<details>
|
|
||||||
<summary>Table of Contents</summary>
|
|
||||||
<ol>
|
|
||||||
<li>
|
|
||||||
<a href="#description">Description</a>
|
|
||||||
</li>
|
|
||||||
<li>
|
|
||||||
<a href="#usage">Usage</a>
|
|
||||||
<ul>
|
|
||||||
<li><a href="#get-the-code">Get the Code</a></li>
|
|
||||||
<li><a href="#build">Build</a></li>
|
|
||||||
<li><a href="#blas-build">BLAS Build</a></li>
|
|
||||||
<li><a href="#prepare-and-quantize">Prepare and Quantize</a></li>
|
|
||||||
<li><a href="#run-the-quantized-model">Run the quantized model</a></li>
|
|
||||||
<li><a href="#memorydisk-requirements">Memory/Disk Requirements</a></li>
|
|
||||||
<li><a href="#quantization">Quantization</a></li>
|
|
||||||
<li><a href="#interactive-mode">Interactive mode</a></li>
|
|
||||||
<li><a href="#constrained-output-with-grammars">Constrained output with grammars</a></li>
|
|
||||||
<li><a href="#obtaining-and-using-the-facebook-llama-2-model">Obtaining and using the Facebook LLaMA 2 model</a></li>
|
|
||||||
<li><a href="#seminal-papers-and-background-on-the-models">Seminal papers and background on the models</a></li>
|
|
||||||
<li><a href="#perplexity-measuring-model-quality">Perplexity (measuring model quality)</a></li>
|
|
||||||
<li><a href="#android">Android</a></li>
|
|
||||||
<li><a href="#docker">Docker</a></li>
|
|
||||||
</ul>
|
|
||||||
</li>
|
|
||||||
<li><a href="#contributing">Contributing</a></li>
|
|
||||||
<li><a href="#coding-guidelines">Coding guidelines</a></li>
|
|
||||||
<li><a href="#docs">Docs</a></li>
|
|
||||||
</ol>
|
|
||||||
</details>
|
|
||||||
|
|
||||||
## Description
|
## Description
|
||||||
|
|
||||||
The main goal of `llama.cpp` is to enable LLM inference with minimal setup and state-of-the-art performance on a wide
|
The main goal of `llama.cpp` is to enable LLM inference with minimal setup and state-of-the-art performance on a wide
|
||||||
@ -87,14 +56,6 @@ Since its [inception](https://github.com/ggerganov/llama.cpp/issues/33#issuecomm
|
|||||||
improved significantly thanks to many contributions. It is the main playground for developing new features for the
|
improved significantly thanks to many contributions. It is the main playground for developing new features for the
|
||||||
[ggml](https://github.com/ggerganov/ggml) library.
|
[ggml](https://github.com/ggerganov/ggml) library.
|
||||||
|
|
||||||
**Supported platforms:**
|
|
||||||
|
|
||||||
- [X] Mac OS
|
|
||||||
- [X] Linux
|
|
||||||
- [X] Windows (via CMake)
|
|
||||||
- [X] Docker
|
|
||||||
- [X] FreeBSD
|
|
||||||
|
|
||||||
**Supported models:**
|
**Supported models:**
|
||||||
|
|
||||||
Typically finetunes of the base models below are supported as well.
|
Typically finetunes of the base models below are supported as well.
|
||||||
@ -108,6 +69,7 @@ Typically finetunes of the base models below are supported as well.
|
|||||||
- [X] [Falcon](https://huggingface.co/models?search=tiiuae/falcon)
|
- [X] [Falcon](https://huggingface.co/models?search=tiiuae/falcon)
|
||||||
- [X] [Chinese LLaMA / Alpaca](https://github.com/ymcui/Chinese-LLaMA-Alpaca) and [Chinese LLaMA-2 / Alpaca-2](https://github.com/ymcui/Chinese-LLaMA-Alpaca-2)
|
- [X] [Chinese LLaMA / Alpaca](https://github.com/ymcui/Chinese-LLaMA-Alpaca) and [Chinese LLaMA-2 / Alpaca-2](https://github.com/ymcui/Chinese-LLaMA-Alpaca-2)
|
||||||
- [X] [Vigogne (French)](https://github.com/bofenghuang/vigogne)
|
- [X] [Vigogne (French)](https://github.com/bofenghuang/vigogne)
|
||||||
|
- [X] [BERT](https://github.com/ggerganov/llama.cpp/pull/5423)
|
||||||
- [X] [Koala](https://bair.berkeley.edu/blog/2023/04/03/koala/)
|
- [X] [Koala](https://bair.berkeley.edu/blog/2023/04/03/koala/)
|
||||||
- [X] [Baichuan 1 & 2](https://huggingface.co/models?search=baichuan-inc/Baichuan) + [derivations](https://huggingface.co/hiyouga/baichuan-7b-sft)
|
- [X] [Baichuan 1 & 2](https://huggingface.co/models?search=baichuan-inc/Baichuan) + [derivations](https://huggingface.co/hiyouga/baichuan-7b-sft)
|
||||||
- [X] [Aquila 1 & 2](https://huggingface.co/models?search=BAAI/Aquila)
|
- [X] [Aquila 1 & 2](https://huggingface.co/models?search=BAAI/Aquila)
|
||||||
@ -149,12 +111,6 @@ Typically finetunes of the base models below are supported as well.
|
|||||||
- [x] [Moondream](https://huggingface.co/vikhyatk/moondream2)
|
- [x] [Moondream](https://huggingface.co/vikhyatk/moondream2)
|
||||||
- [x] [Bunny](https://github.com/BAAI-DCAI/Bunny)
|
- [x] [Bunny](https://github.com/BAAI-DCAI/Bunny)
|
||||||
|
|
||||||
**HTTP server**
|
|
||||||
|
|
||||||
[llama.cpp web server](./examples/server) is a lightweight [OpenAI API](https://github.com/openai/openai-openapi) compatible HTTP server that can be used to serve local models and easily connect them to existing clients.
|
|
||||||
|
|
||||||
[simplechat](./examples/server/public_simplechat) is a simple chat client, which can be used to chat with the model exposed using above web server (use --path to point to simplechat), from a local web browser.
|
|
||||||
|
|
||||||
**Bindings:**
|
**Bindings:**
|
||||||
|
|
||||||
- Python: [abetlen/llama-cpp-python](https://github.com/abetlen/llama-cpp-python)
|
- Python: [abetlen/llama-cpp-python](https://github.com/abetlen/llama-cpp-python)
|
||||||
@ -175,6 +131,7 @@ Typically finetunes of the base models below are supported as well.
|
|||||||
- Zig: [deins/llama.cpp.zig](https://github.com/Deins/llama.cpp.zig)
|
- Zig: [deins/llama.cpp.zig](https://github.com/Deins/llama.cpp.zig)
|
||||||
- Flutter/Dart: [netdur/llama_cpp_dart](https://github.com/netdur/llama_cpp_dart)
|
- Flutter/Dart: [netdur/llama_cpp_dart](https://github.com/netdur/llama_cpp_dart)
|
||||||
- PHP (API bindings and features built on top of llama.cpp): [distantmagic/resonance](https://github.com/distantmagic/resonance) [(more info)](https://github.com/ggerganov/llama.cpp/pull/6326)
|
- PHP (API bindings and features built on top of llama.cpp): [distantmagic/resonance](https://github.com/distantmagic/resonance) [(more info)](https://github.com/ggerganov/llama.cpp/pull/6326)
|
||||||
|
- Guile Scheme: [guile_llama_cpp](https://savannah.nongnu.org/projects/guile-llama-cpp)
|
||||||
|
|
||||||
**UI:**
|
**UI:**
|
||||||
|
|
||||||
@ -217,10 +174,16 @@ Unless otherwise noted these projects are open-source with permissive licensing:
|
|||||||
**Tools:**
|
**Tools:**
|
||||||
|
|
||||||
- [akx/ggify](https://github.com/akx/ggify) – download PyTorch models from HuggingFace Hub and convert them to GGML
|
- [akx/ggify](https://github.com/akx/ggify) – download PyTorch models from HuggingFace Hub and convert them to GGML
|
||||||
|
- [crashr/gppm](https://github.com/crashr/gppm) – launch llama.cpp instances utilizing NVIDIA Tesla P40 or P100 GPUs with reduced idle power consumption
|
||||||
|
|
||||||
---
|
**Infrastructure:**
|
||||||
|
|
||||||
Here is a typical run using LLaMA v2 13B on M2 Ultra:
|
- [Paddler](https://github.com/distantmagic/paddler) - Stateful load balancer custom-tailored for llama.cpp
|
||||||
|
|
||||||
|
## Demo
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Typical run using LLaMA v2 13B on M2 Ultra</summary>
|
||||||
|
|
||||||
```
|
```
|
||||||
$ make -j && ./llama-cli -m models/llama-13b-v2/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e
|
$ make -j && ./llama-cli -m models/llama-13b-v2/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e
|
||||||
@ -300,454 +263,85 @@ llama_print_timings: eval time = 24513.59 ms / 399 runs ( 61.44 ms
|
|||||||
llama_print_timings: total time = 25431.49 ms
|
llama_print_timings: total time = 25431.49 ms
|
||||||
```
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Demo of running both LLaMA-7B and whisper.cpp on a single M1 Pro MacBook</summary>
|
||||||
|
|
||||||
And here is another demo of running both LLaMA-7B and [whisper.cpp](https://github.com/ggerganov/whisper.cpp) on a single M1 Pro MacBook:
|
And here is another demo of running both LLaMA-7B and [whisper.cpp](https://github.com/ggerganov/whisper.cpp) on a single M1 Pro MacBook:
|
||||||
|
|
||||||
https://user-images.githubusercontent.com/1991296/224442907-7693d4be-acaa-4e01-8b4f-add84093ffff.mp4
|
https://user-images.githubusercontent.com/1991296/224442907-7693d4be-acaa-4e01-8b4f-add84093ffff.mp4
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
Here are the end-to-end binary build and model conversion steps for most supported models.
|
Here are the end-to-end binary build and model conversion steps for most supported models.
|
||||||
|
|
||||||
### Get the Code
|
### Basic usage
|
||||||
|
|
||||||
|
Firstly, you need to get the binary. There are different methods that you can follow:
|
||||||
|
- Method 1: Clone this repository and build locally, see [how to build](./docs/build.md)
|
||||||
|
- Method 2: If you are using MacOS or Linux, you can install llama.cpp via [brew, flox or nix](./docs/install.md)
|
||||||
|
- Method 3: Use a Docker image, see [documentation for Docker](./docs/docker.md)
|
||||||
|
- Method 4: Download pre-built binary from [releases](https://github.com/ggerganov/llama.cpp/releases)
|
||||||
|
|
||||||
|
You can run a basic completion using this command:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/ggerganov/llama.cpp
|
llama-cli -m your_model.gguf -p "I believe the meaning of life is" -n 128
|
||||||
cd llama.cpp
|
|
||||||
|
# Output:
|
||||||
|
# I believe the meaning of life is to find your own truth and to live in accordance with it. For me, this means being true to myself and following my passions, even if they don't align with societal expectations. I think that's what I love about yoga – it's not just a physical practice, but a spiritual one too. It's about connecting with yourself, listening to your inner voice, and honoring your own unique journey.
|
||||||
```
|
```
|
||||||
|
|
||||||
### Build
|
See [this page](./examples/main/README.md) for a full list of parameters.
|
||||||
|
|
||||||
In order to build llama.cpp you have four different options.
|
### Conversation mode
|
||||||
|
|
||||||
- Using `make`:
|
If you want a more ChatGPT-like experience, you can run in conversation mode by passing `-cnv` as a parameter:
|
||||||
- On Linux or MacOS:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
make
|
|
||||||
```
|
|
||||||
|
|
||||||
- On Windows:
|
|
||||||
|
|
||||||
1. Download the latest fortran version of [w64devkit](https://github.com/skeeto/w64devkit/releases).
|
|
||||||
2. Extract `w64devkit` on your pc.
|
|
||||||
3. Run `w64devkit.exe`.
|
|
||||||
4. Use the `cd` command to reach the `llama.cpp` folder.
|
|
||||||
5. From here you can run:
|
|
||||||
```bash
|
|
||||||
make
|
|
||||||
```
|
|
||||||
|
|
||||||
- Notes:
|
|
||||||
- For faster compilation, add the `-j` argument to run multiple jobs in parallel. For example, `make -j 8` will run 8 jobs in parallel.
|
|
||||||
- For faster repeated compilation, install [ccache](https://ccache.dev/).
|
|
||||||
- For debug builds, run `make LLAMA_DEBUG=1`
|
|
||||||
|
|
||||||
- Using `CMake`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cmake -B build
|
|
||||||
cmake --build build --config Release
|
|
||||||
```
|
|
||||||
|
|
||||||
**Notes**:
|
|
||||||
|
|
||||||
- For faster compilation, add the `-j` argument to run multiple jobs in parallel. For example, `cmake --build build --config Release -j 8` will run 8 jobs in parallel.
|
|
||||||
- For faster repeated compilation, install [ccache](https://ccache.dev/).
|
|
||||||
- For debug builds, there are two cases:
|
|
||||||
|
|
||||||
1. Single-config generators (e.g. default = `Unix Makefiles`; note that they just ignore the `--config` flag):
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cmake -B build -DCMAKE_BUILD_TYPE=Debug
|
|
||||||
cmake --build build
|
|
||||||
```
|
|
||||||
|
|
||||||
2. Multi-config generators (`-G` param set to Visual Studio, XCode...):
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cmake -B build -G "Xcode"
|
|
||||||
cmake --build build --config Debug
|
|
||||||
```
|
|
||||||
|
|
||||||
- Using `gmake` (FreeBSD):
|
|
||||||
|
|
||||||
1. Install and activate [DRM in FreeBSD](https://wiki.freebsd.org/Graphics)
|
|
||||||
2. Add your user to **video** group
|
|
||||||
3. Install compilation dependencies.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo pkg install gmake automake autoconf pkgconf llvm15 openblas
|
|
||||||
|
|
||||||
gmake CC=/usr/local/bin/clang15 CXX=/usr/local/bin/clang++15 -j4
|
|
||||||
```
|
|
||||||
|
|
||||||
### Homebrew
|
|
||||||
|
|
||||||
On Mac and Linux, the homebrew package manager can be used via
|
|
||||||
```
|
|
||||||
brew install llama.cpp
|
|
||||||
```
|
|
||||||
The formula is automatically updated with new `llama.cpp` releases. More info: https://github.com/ggerganov/llama.cpp/discussions/7668
|
|
||||||
|
|
||||||
### Nix
|
|
||||||
|
|
||||||
On Mac and Linux, the Nix package manager can be used via
|
|
||||||
```
|
|
||||||
nix profile install nixpkgs#llama-cpp
|
|
||||||
```
|
|
||||||
For flake enabled installs.
|
|
||||||
|
|
||||||
Or
|
|
||||||
```
|
|
||||||
nix-env --file '<nixpkgs>' --install --attr llama-cpp
|
|
||||||
```
|
|
||||||
For non-flake enabled installs.
|
|
||||||
|
|
||||||
This expression is automatically updated within the [nixpkgs repo](https://github.com/NixOS/nixpkgs/blob/nixos-24.05/pkgs/by-name/ll/llama-cpp/package.nix#L164).
|
|
||||||
|
|
||||||
#### Flox
|
|
||||||
|
|
||||||
On Mac and Linux, Flox can be used to install llama.cpp within a Flox environment via
|
|
||||||
```
|
|
||||||
flox install llama-cpp
|
|
||||||
```
|
|
||||||
Flox follows the nixpkgs build of llama.cpp.
|
|
||||||
|
|
||||||
### Metal Build
|
|
||||||
|
|
||||||
On MacOS, Metal is enabled by default. Using Metal makes the computation run on the GPU.
|
|
||||||
To disable the Metal build at compile time use the `GGML_NO_METAL=1` flag or the `GGML_METAL=OFF` cmake option.
|
|
||||||
|
|
||||||
When built with Metal support, you can explicitly disable GPU inference with the `--n-gpu-layers|-ngl 0` command-line
|
|
||||||
argument.
|
|
||||||
|
|
||||||
### BLAS Build
|
|
||||||
|
|
||||||
Building the program with BLAS support may lead to some performance improvements in prompt processing using batch sizes higher than 32 (the default is 512). Support with CPU-only BLAS implementations doesn't affect the normal generation performance. We may see generation performance improvements with GPU-involved BLAS implementations, e.g. cuBLAS, hipBLAS. There are currently several different BLAS implementations available for build and use:
|
|
||||||
|
|
||||||
- #### Accelerate Framework:
|
|
||||||
|
|
||||||
This is only available on Mac PCs and it's enabled by default. You can just build using the normal instructions.
|
|
||||||
|
|
||||||
- #### OpenBLAS:
|
|
||||||
|
|
||||||
This provides BLAS acceleration using only the CPU. Make sure to have OpenBLAS installed on your machine.
|
|
||||||
|
|
||||||
- Using `make`:
|
|
||||||
- On Linux:
|
|
||||||
```bash
|
|
||||||
make GGML_OPENBLAS=1
|
|
||||||
```
|
|
||||||
|
|
||||||
- On Windows:
|
|
||||||
|
|
||||||
1. Download the latest fortran version of [w64devkit](https://github.com/skeeto/w64devkit/releases).
|
|
||||||
2. Download the latest version of [OpenBLAS for Windows](https://github.com/xianyi/OpenBLAS/releases).
|
|
||||||
3. Extract `w64devkit` on your pc.
|
|
||||||
4. From the OpenBLAS zip that you just downloaded copy `libopenblas.a`, located inside the `lib` folder, inside `w64devkit\x86_64-w64-mingw32\lib`.
|
|
||||||
5. From the same OpenBLAS zip copy the content of the `include` folder inside `w64devkit\x86_64-w64-mingw32\include`.
|
|
||||||
6. Run `w64devkit.exe`.
|
|
||||||
7. Use the `cd` command to reach the `llama.cpp` folder.
|
|
||||||
8. From here you can run:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
make GGML_OPENBLAS=1
|
|
||||||
```
|
|
||||||
|
|
||||||
- Using `CMake` on Linux:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cmake -B build -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
|
||||||
cmake --build build --config Release
|
|
||||||
```
|
|
||||||
|
|
||||||
- #### BLIS
|
|
||||||
|
|
||||||
Check [BLIS.md](docs/BLIS.md) for more information.
|
|
||||||
|
|
||||||
- #### SYCL
|
|
||||||
SYCL is a higher-level programming model to improve programming productivity on various hardware accelerators.
|
|
||||||
|
|
||||||
llama.cpp based on SYCL is used to **support Intel GPU** (Data Center Max series, Flex series, Arc series, Built-in GPU and iGPU).
|
|
||||||
|
|
||||||
For detailed info, please refer to [llama.cpp for SYCL](README-sycl.md).
|
|
||||||
|
|
||||||
- #### Intel oneMKL
|
|
||||||
Building through oneAPI compilers will make avx_vnni instruction set available for intel processors that do not support avx512 and avx512_vnni. Please note that this build config **does not support Intel GPU**. For Intel GPU support, please refer to [llama.cpp for SYCL](./README-sycl.md).
|
|
||||||
|
|
||||||
- Using manual oneAPI installation:
|
|
||||||
By default, `GGML_BLAS_VENDOR` is set to `Generic`, so if you already sourced intel environment script and assign `-DGGML_BLAS=ON` in cmake, the mkl version of Blas will automatically been selected. Otherwise please install oneAPI and follow the below steps:
|
|
||||||
```bash
|
|
||||||
source /opt/intel/oneapi/setvars.sh # You can skip this step if in oneapi-basekit docker image, only required for manual installation
|
|
||||||
cmake -B build -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=Intel10_64lp -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_NATIVE=ON
|
|
||||||
cmake --build build --config Release
|
|
||||||
```
|
|
||||||
|
|
||||||
- Using oneAPI docker image:
|
|
||||||
If you do not want to source the environment vars and install oneAPI manually, you can also build the code using intel docker container: [oneAPI-basekit](https://hub.docker.com/r/intel/oneapi-basekit). Then, you can use the commands given above.
|
|
||||||
|
|
||||||
Check [Optimizing and Running LLaMA2 on Intel® CPU](https://www.intel.com/content/www/us/en/content-details/791610/optimizing-and-running-llama2-on-intel-cpu.html) for more information.
|
|
||||||
|
|
||||||
- #### CUDA
|
|
||||||
|
|
||||||
This provides GPU acceleration using the CUDA cores of your Nvidia GPU. Make sure to have the CUDA toolkit installed. You can download it from your Linux distro's package manager (e.g. `apt install nvidia-cuda-toolkit`) or from here: [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads).
|
|
||||||
|
|
||||||
For Jetson user, if you have Jetson Orin, you can try this: [Offical Support](https://www.jetson-ai-lab.com/tutorial_text-generation.html). If you are using an old model(nano/TX2), need some additional operations before compiling.
|
|
||||||
|
|
||||||
- Using `make`:
|
|
||||||
```bash
|
|
||||||
make GGML_CUDA=1
|
|
||||||
```
|
|
||||||
- Using `CMake`:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cmake -B build -DGGML_CUDA=ON
|
|
||||||
cmake --build build --config Release
|
|
||||||
```
|
|
||||||
|
|
||||||
The environment variable [`CUDA_VISIBLE_DEVICES`](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars) can be used to specify which GPU(s) will be used. The following compilation options are also available to tweak performance:
|
|
||||||
|
|
||||||
| Option | Legal values | Default | Description |
|
|
||||||
|-------------------------------|------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| GGML_CUDA_FORCE_DMMV | Boolean | false | Force the use of dequantization + matrix vector multiplication kernels instead of using kernels that do matrix vector multiplication on quantized data. By default the decision is made based on compute capability (MMVQ for 6.1/Pascal/GTX 1000 or higher). Does not affect k-quants. |
|
|
||||||
| GGML_CUDA_DMMV_X | Positive integer >= 32 | 32 | Number of values in x direction processed by the CUDA dequantization + matrix vector multiplication kernel per iteration. Increasing this value can improve performance on fast GPUs. Power of 2 heavily recommended. Does not affect k-quants. |
|
|
||||||
| GGML_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the CUDA mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. |
|
|
||||||
| GGML_CUDA_FORCE_MMQ | Boolean | false | Force the use of custom matrix multiplication kernels for quantized models instead of FP16 cuBLAS even if there is no int8 tensor core implementation available (affects V100, RDNA3). MMQ kernels are enabled by default on GPUs with int8 tensor core support. With MMQ force enabled, speed for large batch sizes will be worse but VRAM consumption will be lower. |
|
|
||||||
| GGML_CUDA_FORCE_CUBLAS | Boolean | false | Force the use of FP16 cuBLAS instead of custom matrix multiplication kernels for quantized models |
|
|
||||||
| GGML_CUDA_F16 | Boolean | false | If enabled, use half-precision floating point arithmetic for the CUDA dequantization + mul mat vec kernels and for the q4_1 and q5_1 matrix matrix multiplication kernels. Can improve performance on relatively recent GPUs. |
|
|
||||||
| GGML_CUDA_KQUANTS_ITER | 1 or 2 | 2 | Number of values processed per iteration and per CUDA thread for Q2_K and Q6_K quantization formats. Setting this value to 1 can improve performance for slow GPUs. |
|
|
||||||
| GGML_CUDA_PEER_MAX_BATCH_SIZE | Positive integer | 128 | Maximum batch size for which to enable peer access between multiple GPUs. Peer access requires either Linux or NVLink. When using NVLink enabling peer access for larger batch sizes is potentially beneficial. |
|
|
||||||
| GGML_CUDA_FA_ALL_QUANTS | Boolean | false | Compile support for all KV cache quantization type (combinations) for the FlashAttention CUDA kernels. More fine-grained control over KV cache size but compilation takes much longer. |
|
|
||||||
|
|
||||||
- #### hipBLAS
|
|
||||||
|
|
||||||
This provides BLAS acceleration on HIP-supported AMD GPUs.
|
|
||||||
Make sure to have ROCm installed.
|
|
||||||
You can download it from your Linux distro's package manager or from here: [ROCm Quick Start (Linux)](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/tutorial/quick-start.html#rocm-install-quick).
|
|
||||||
|
|
||||||
- Using `make`:
|
|
||||||
```bash
|
|
||||||
make GGML_HIPBLAS=1
|
|
||||||
```
|
|
||||||
- Using `CMake` for Linux (assuming a gfx1030-compatible AMD GPU):
|
|
||||||
```bash
|
|
||||||
HIPCXX="$(hipconfig -l)/clang" HIP_PATH="$(hipconfig -R)" \
|
|
||||||
cmake -S . -B build -DGGML_HIPBLAS=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \
|
|
||||||
&& cmake --build build --config Release -- -j 16
|
|
||||||
```
|
|
||||||
On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DGGML_HIP_UMA=ON`.
|
|
||||||
However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs).
|
|
||||||
|
|
||||||
Note that if you get the following error:
|
|
||||||
```
|
|
||||||
clang: error: cannot find ROCm device library; provide its path via '--rocm-path' or '--rocm-device-lib-path', or pass '-nogpulib' to build without ROCm device library
|
|
||||||
```
|
|
||||||
Try searching for a directory under `HIP_PATH` that contains the file
|
|
||||||
`oclc_abi_version_400.bc`. Then, add the following to the start of the
|
|
||||||
command: `HIP_DEVICE_LIB_PATH=<directory-you-just-found>`, so something
|
|
||||||
like:
|
|
||||||
```bash
|
|
||||||
HIPCXX="$(hipconfig -l)/clang" HIP_PATH="$(hipconfig -p)" \
|
|
||||||
HIP_DEVICE_LIB_PATH=<directory-you-just-found> \
|
|
||||||
cmake -S . -B build -DGGML_HIPBLAS=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \
|
|
||||||
&& cmake --build build -- -j 16
|
|
||||||
```
|
|
||||||
|
|
||||||
- Using `make` (example for target gfx1030, build with 16 CPU threads):
|
|
||||||
```bash
|
|
||||||
make -j16 GGML_HIPBLAS=1 GGML_HIP_UMA=1 AMDGPU_TARGETS=gfx1030
|
|
||||||
```
|
|
||||||
|
|
||||||
- Using `CMake` for Windows (using x64 Native Tools Command Prompt for VS, and assuming a gfx1100-compatible AMD GPU):
|
|
||||||
```bash
|
|
||||||
set PATH=%HIP_PATH%\bin;%PATH%
|
|
||||||
cmake -S . -B build -G Ninja -DAMDGPU_TARGETS=gfx1100 -DGGML_HIPBLAS=ON -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_BUILD_TYPE=Release
|
|
||||||
cmake --build build
|
|
||||||
```
|
|
||||||
Make sure that `AMDGPU_TARGETS` is set to the GPU arch you want to compile for. The above example uses `gfx1100` that corresponds to Radeon RX 7900XTX/XT/GRE. You can find a list of targets [here](https://llvm.org/docs/AMDGPUUsage.html#processors)
|
|
||||||
Find your gpu version string by matching the most significant version information from `rocminfo | grep gfx | head -1 | awk '{print $2}'` with the list of processors, e.g. `gfx1035` maps to `gfx1030`.
|
|
||||||
|
|
||||||
|
|
||||||
The environment variable [`HIP_VISIBLE_DEVICES`](https://rocm.docs.amd.com/en/latest/understand/gpu_isolation.html#hip-visible-devices) can be used to specify which GPU(s) will be used.
|
|
||||||
If your GPU is not officially supported you can use the environment variable [`HSA_OVERRIDE_GFX_VERSION`] set to a similar GPU, for example 10.3.0 on RDNA2 (e.g. gfx1030, gfx1031, or gfx1035) or 11.0.0 on RDNA3.
|
|
||||||
The following compilation options are also available to tweak performance (yes, they refer to CUDA, not HIP, because it uses the same code as the cuBLAS version above):
|
|
||||||
|
|
||||||
| Option | Legal values | Default | Description |
|
|
||||||
|------------------------|------------------------|---------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| GGML_CUDA_DMMV_X | Positive integer >= 32 | 32 | Number of values in x direction processed by the HIP dequantization + matrix vector multiplication kernel per iteration. Increasing this value can improve performance on fast GPUs. Power of 2 heavily recommended. Does not affect k-quants. |
|
|
||||||
| GGML_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the HIP mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. Does not affect k-quants. |
|
|
||||||
| GGML_CUDA_KQUANTS_ITER | 1 or 2 | 2 | Number of values processed per iteration and per HIP thread for Q2_K and Q6_K quantization formats. Setting this value to 1 can improve performance for slow GPUs. |
|
|
||||||
|
|
||||||
- #### Vulkan
|
|
||||||
|
|
||||||
**With docker**:
|
|
||||||
|
|
||||||
You don't need to install Vulkan SDK. It will be installed inside the container.
|
|
||||||
|
|
||||||
```sh
|
|
||||||
# Build the image
|
|
||||||
docker build -t llama-cpp-vulkan -f .devops/llama-cli-vulkan.Dockerfile .
|
|
||||||
|
|
||||||
# Then, use it:
|
|
||||||
docker run -it --rm -v "$(pwd):/app:Z" --device /dev/dri/renderD128:/dev/dri/renderD128 --device /dev/dri/card1:/dev/dri/card1 llama-cpp-vulkan -m "/app/models/YOUR_MODEL_FILE" -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33
|
|
||||||
```
|
|
||||||
|
|
||||||
**Without docker**:
|
|
||||||
|
|
||||||
Firstly, you need to make sure you have installed [Vulkan SDK](https://vulkan.lunarg.com/doc/view/latest/linux/getting_started_ubuntu.html)
|
|
||||||
|
|
||||||
For example, on Ubuntu 22.04 (jammy), use the command below:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add -
|
|
||||||
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list
|
|
||||||
apt update -y
|
|
||||||
apt-get install -y vulkan-sdk
|
|
||||||
# To verify the installation, use the command below:
|
|
||||||
vulkaninfo
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively your package manager might be able to provide the appropriate libraries.
|
|
||||||
For example for Ubuntu 22.04 you can install `libvulkan-dev` instead.
|
|
||||||
For Fedora 40, you can install `vulkan-devel`, `glslc` and `glslang` packages.
|
|
||||||
|
|
||||||
Then, build llama.cpp using the cmake command below:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
cmake -B build -DGGML_VULKAN=1
|
|
||||||
cmake --build build --config Release
|
|
||||||
# Test the output binary (with "-ngl 33" to offload all layers to GPU)
|
|
||||||
./bin/llama-cli -m "PATH_TO_MODEL" -p "Hi you how are you" -n 50 -e -ngl 33 -t 4
|
|
||||||
|
|
||||||
# You should see in the output, ggml_vulkan detected your GPU. For example:
|
|
||||||
# ggml_vulkan: Using Intel(R) Graphics (ADL GT2) | uma: 1 | fp16: 1 | warp size: 32
|
|
||||||
```
|
|
||||||
|
|
||||||
### Prepare and Quantize
|
|
||||||
|
|
||||||
> [!NOTE]
|
|
||||||
> You can use the [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space on Hugging Face to quantise your model weights without any setup too. It is synced from `llama.cpp` main every 6 hours.
|
|
||||||
|
|
||||||
To obtain the official LLaMA 2 weights please see the <a href="#obtaining-and-using-the-facebook-llama-2-model">Obtaining and using the Facebook LLaMA 2 model</a> section. There is also a large selection of pre-quantized `gguf` models available on Hugging Face.
|
|
||||||
|
|
||||||
Note: `convert.py` has been moved to `examples/convert-legacy-llama.py` and shouldn't be used for anything other than `Llama/Llama2/Mistral` models and their derivatives.
|
|
||||||
It does not support LLaMA 3, you can use `convert-hf-to-gguf.py` with LLaMA 3 downloaded from Hugging Face.
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# obtain the official LLaMA model weights and place them in ./models
|
llama-cli -m your_model.gguf -p "You are a helpful assistant" -cnv
|
||||||
ls ./models
|
|
||||||
llama-2-7b tokenizer_checklist.chk tokenizer.model
|
|
||||||
# [Optional] for models using BPE tokenizers
|
|
||||||
ls ./models
|
|
||||||
<folder containing weights and tokenizer json> vocab.json
|
|
||||||
# [Optional] for PyTorch .bin models like Mistral-7B
|
|
||||||
ls ./models
|
|
||||||
<folder containing weights and tokenizer json>
|
|
||||||
|
|
||||||
# install Python dependencies
|
# Output:
|
||||||
python3 -m pip install -r requirements.txt
|
# > hi, who are you?
|
||||||
|
# Hi there! I'm your helpful assistant! I'm an AI-powered chatbot designed to assist and provide information to users like you. I'm here to help answer your questions, provide guidance, and offer support on a wide range of topics. I'm a friendly and knowledgeable AI, and I'm always happy to help with anything you need. What's on your mind, and how can I assist you today?
|
||||||
# convert the model to ggml FP16 format
|
#
|
||||||
python3 convert-hf-to-gguf.py models/mymodel/
|
# > what is 1+1?
|
||||||
|
# Easy peasy! The answer to 1+1 is... 2!
|
||||||
# quantize the model to 4-bits (using Q4_K_M method)
|
|
||||||
./llama-quantize ./models/mymodel/ggml-model-f16.gguf ./models/mymodel/ggml-model-Q4_K_M.gguf Q4_K_M
|
|
||||||
|
|
||||||
# update the gguf filetype to current version if older version is now unsupported
|
|
||||||
./llama-quantize ./models/mymodel/ggml-model-Q4_K_M.gguf ./models/mymodel/ggml-model-Q4_K_M-v2.gguf COPY
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Run the quantized model
|
By default, the chat template will be taken from the input model. If you want to use another chat template, pass `--chat-template NAME` as a parameter. See the list of [supported templates](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# start inference on a gguf model
|
./llama-cli -m your_model.gguf -p "You are a helpful assistant" -cnv --chat-template chatml
|
||||||
./llama-cli -m ./models/mymodel/ggml-model-Q4_K_M.gguf -n 128
|
|
||||||
```
|
```
|
||||||
|
|
||||||
When running the larger models, make sure you have enough disk space to store all the intermediate files.
|
You can also use your own template via in-prefix, in-suffix and reverse-prompt parameters:
|
||||||
|
|
||||||
### Running on Windows with prebuilt binaries
|
```bash
|
||||||
|
./llama-cli -m your_model.gguf -p "You are a helpful assistant" -cnv --in-prefix 'User: ' --reverse-prompt 'User:'
|
||||||
You will find prebuilt Windows binaries on the release page.
|
|
||||||
|
|
||||||
Simply download and extract the latest zip package of choice: (e.g. `llama-b1380-bin-win-avx2-x64.zip`)
|
|
||||||
|
|
||||||
From the unzipped folder, open a terminal/cmd window here and place a pre-converted `.gguf` model file. Test out the main example like so:
|
|
||||||
|
|
||||||
```
|
|
||||||
.\main -m llama-2-7b.Q4_0.gguf -n 128
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Memory/Disk Requirements
|
### Web server
|
||||||
|
|
||||||
As the models are currently fully loaded into memory, you will need adequate disk space to save them and sufficient RAM to load them. At the moment, memory and disk requirements are the same.
|
[llama.cpp web server](./examples/server/README.md) is a lightweight [OpenAI API](https://github.com/openai/openai-openapi) compatible HTTP server that can be used to serve local models and easily connect them to existing clients.
|
||||||
|
|
||||||
| Model | Original size | Quantized size (Q4_0) |
|
Example usage:
|
||||||
|------:|--------------:|----------------------:|
|
|
||||||
| 7B | 13 GB | 3.9 GB |
|
|
||||||
| 13B | 24 GB | 7.8 GB |
|
|
||||||
| 30B | 60 GB | 19.5 GB |
|
|
||||||
| 65B | 120 GB | 38.5 GB |
|
|
||||||
|
|
||||||
### Quantization
|
```bash
|
||||||
|
./llama-server -m your_model.gguf --port 8080
|
||||||
|
|
||||||
Several quantization methods are supported. They differ in the resulting model disk size and inference speed.
|
# Basic web UI can be accessed via browser: http://localhost:8080
|
||||||
|
# Chat completion endpoint: http://localhost:8080/v1/chat/completions
|
||||||
*(outdated)*
|
|
||||||
|
|
||||||
| Model | Measure | F16 | Q4_0 | Q4_1 | Q5_0 | Q5_1 | Q8_0 |
|
|
||||||
|------:|--------------|-------:|-------:|-------:|-------:|-------:|-------:|
|
|
||||||
| 7B | perplexity | 5.9066 | 6.1565 | 6.0912 | 5.9862 | 5.9481 | 5.9070 |
|
|
||||||
| 7B | file size | 13.0G | 3.5G | 3.9G | 4.3G | 4.7G | 6.7G |
|
|
||||||
| 7B | ms/tok @ 4th | 127 | 55 | 54 | 76 | 83 | 72 |
|
|
||||||
| 7B | ms/tok @ 8th | 122 | 43 | 45 | 52 | 56 | 67 |
|
|
||||||
| 7B | bits/weight | 16.0 | 4.5 | 5.0 | 5.5 | 6.0 | 8.5 |
|
|
||||||
| 13B | perplexity | 5.2543 | 5.3860 | 5.3608 | 5.2856 | 5.2706 | 5.2548 |
|
|
||||||
| 13B | file size | 25.0G | 6.8G | 7.6G | 8.3G | 9.1G | 13G |
|
|
||||||
| 13B | ms/tok @ 4th | - | 103 | 105 | 148 | 160 | 131 |
|
|
||||||
| 13B | ms/tok @ 8th | - | 73 | 82 | 98 | 105 | 128 |
|
|
||||||
| 13B | bits/weight | 16.0 | 4.5 | 5.0 | 5.5 | 6.0 | 8.5 |
|
|
||||||
|
|
||||||
- [k-quants](https://github.com/ggerganov/llama.cpp/pull/1684)
|
|
||||||
- recent k-quants improvements and new i-quants
|
|
||||||
- [#2707](https://github.com/ggerganov/llama.cpp/pull/2707)
|
|
||||||
- [#2807](https://github.com/ggerganov/llama.cpp/pull/2807)
|
|
||||||
- [#4773 - 2-bit i-quants (inference)](https://github.com/ggerganov/llama.cpp/pull/4773)
|
|
||||||
- [#4856 - 2-bit i-quants (inference)](https://github.com/ggerganov/llama.cpp/pull/4856)
|
|
||||||
- [#4861 - importance matrix](https://github.com/ggerganov/llama.cpp/pull/4861)
|
|
||||||
- [#4872 - MoE models](https://github.com/ggerganov/llama.cpp/pull/4872)
|
|
||||||
- [#4897 - 2-bit quantization](https://github.com/ggerganov/llama.cpp/pull/4897)
|
|
||||||
- [#4930 - imatrix for all k-quants](https://github.com/ggerganov/llama.cpp/pull/4930)
|
|
||||||
- [#4951 - imatrix on the GPU](https://github.com/ggerganov/llama.cpp/pull/4957)
|
|
||||||
- [#4969 - imatrix for legacy quants](https://github.com/ggerganov/llama.cpp/pull/4969)
|
|
||||||
- [#4996 - k-qunats tuning](https://github.com/ggerganov/llama.cpp/pull/4996)
|
|
||||||
- [#5060 - Q3_K_XS](https://github.com/ggerganov/llama.cpp/pull/5060)
|
|
||||||
- [#5196 - 3-bit i-quants](https://github.com/ggerganov/llama.cpp/pull/5196)
|
|
||||||
- [quantization tuning](https://github.com/ggerganov/llama.cpp/pull/5320), [another one](https://github.com/ggerganov/llama.cpp/pull/5334), and [another one](https://github.com/ggerganov/llama.cpp/pull/5361)
|
|
||||||
|
|
||||||
### Perplexity (measuring model quality)
|
|
||||||
|
|
||||||
You can use the `perplexity` example to measure perplexity over a given prompt (lower perplexity is better).
|
|
||||||
For more information, see [https://huggingface.co/docs/transformers/perplexity](https://huggingface.co/docs/transformers/perplexity).
|
|
||||||
|
|
||||||
The perplexity measurements in table above are done against the `wikitext2` test dataset (https://paperswithcode.com/dataset/wikitext-2), with context length of 512.
|
|
||||||
The time per token is measured on a MacBook M1 Pro 32GB RAM using 4 and 8 threads.
|
|
||||||
|
|
||||||
#### How to run
|
|
||||||
|
|
||||||
1. Download/extract: https://huggingface.co/datasets/ggml-org/ci/resolve/main/wikitext-2-raw-v1.zip
|
|
||||||
2. Run `./llama-perplexity -m models/7B/ggml-model-q4_0.gguf -f wiki.test.raw`
|
|
||||||
3. Output:
|
|
||||||
```
|
```
|
||||||
perplexity : calculating perplexity over 655 chunks
|
|
||||||
24.43 seconds per pass - ETA 4.45 hours
|
|
||||||
[1]4.5970,[2]5.1807,[3]6.0382,...
|
|
||||||
```
|
|
||||||
And after 4.45 hours, you will have the final perplexity.
|
|
||||||
|
|
||||||
### Interactive mode
|
### Interactive mode
|
||||||
|
|
||||||
If you want a more ChatGPT-like experience, you can run in interactive mode by passing `-i` as a parameter.
|
> [!NOTE]
|
||||||
|
> If you prefer basic usage, please consider using conversation mode instead of interactive mode
|
||||||
|
|
||||||
In this mode, you can always interrupt generation by pressing Ctrl+C and entering one or more lines of text, which will be converted into tokens and appended to the current context. You can also specify a *reverse prompt* with the parameter `-r "reverse prompt string"`. This will result in user input being prompted whenever the exact tokens of the reverse prompt string are encountered in the generation. A typical use is to use a prompt that makes LLaMA emulate a chat between multiple users, say Alice and Bob, and pass `-r "Alice:"`.
|
In this mode, you can always interrupt generation by pressing Ctrl+C and entering one or more lines of text, which will be converted into tokens and appended to the current context. You can also specify a *reverse prompt* with the parameter `-r "reverse prompt string"`. This will result in user input being prompted whenever the exact tokens of the reverse prompt string are encountered in the generation. A typical use is to use a prompt that makes LLaMA emulate a chat between multiple users, say Alice and Bob, and pass `-r "Alice:"`.
|
||||||
|
|
||||||
Here is an example of a few-shot interaction, invoked with the command
|
Here is an example of a few-shot interaction, invoked with the command
|
||||||
@ -798,18 +392,70 @@ The `grammars/` folder contains a handful of sample grammars. To write your own,
|
|||||||
|
|
||||||
For authoring more complex JSON grammars, you can also check out https://grammar.intrinsiclabs.ai/, a browser app that lets you write TypeScript interfaces which it compiles to GBNF grammars that you can save for local use. Note that the app is built and maintained by members of the community, please file any issues or FRs on [its repo](http://github.com/intrinsiclabsai/gbnfgen) and not this one.
|
For authoring more complex JSON grammars, you can also check out https://grammar.intrinsiclabs.ai/, a browser app that lets you write TypeScript interfaces which it compiles to GBNF grammars that you can save for local use. Note that the app is built and maintained by members of the community, please file any issues or FRs on [its repo](http://github.com/intrinsiclabsai/gbnfgen) and not this one.
|
||||||
|
|
||||||
### Obtaining and using the Facebook LLaMA 2 model
|
## Build
|
||||||
|
|
||||||
- Refer to [Facebook's LLaMA download page](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) if you want to access the model data.
|
Please refer to [Build llama.cpp locally](./docs/build.md)
|
||||||
- Alternatively, if you want to save time and space, you can download already converted and quantized models from [TheBloke](https://huggingface.co/TheBloke), including:
|
|
||||||
- [LLaMA 2 7B base](https://huggingface.co/TheBloke/Llama-2-7B-GGUF)
|
|
||||||
- [LLaMA 2 13B base](https://huggingface.co/TheBloke/Llama-2-13B-GGUF)
|
|
||||||
- [LLaMA 2 70B base](https://huggingface.co/TheBloke/Llama-2-70B-GGUF)
|
|
||||||
- [LLaMA 2 7B chat](https://huggingface.co/TheBloke/Llama-2-7B-chat-GGUF)
|
|
||||||
- [LLaMA 2 13B chat](https://huggingface.co/TheBloke/Llama-2-13B-chat-GGUF)
|
|
||||||
- [LLaMA 2 70B chat](https://huggingface.co/TheBloke/Llama-2-70B-chat-GGUF)
|
|
||||||
|
|
||||||
### Seminal papers and background on the models
|
## Supported backends
|
||||||
|
|
||||||
|
| Backend | Target devices |
|
||||||
|
| --- | --- |
|
||||||
|
| [Metal](./docs/build.md#metal-build) | Apple Silicon |
|
||||||
|
| [BLAS](./docs/build.md#blas-build) | All |
|
||||||
|
| [BLIS](./docs/backend/BLIS.md) | All |
|
||||||
|
| [SYCL](./docs/backend/SYCL.md) | Intel and Nvidia GPU |
|
||||||
|
| [CUDA](./docs/build.md#cuda) | Nvidia GPU |
|
||||||
|
| [hipBLAS](./docs/build.md#hipblas) | AMD GPU |
|
||||||
|
| [Vulkan](./docs/build.md#vulkan) | GPU |
|
||||||
|
|
||||||
|
## Tools
|
||||||
|
|
||||||
|
### Prepare and Quantize
|
||||||
|
|
||||||
|
> [!NOTE]
|
||||||
|
> You can use the [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space on Hugging Face to quantise your model weights without any setup too. It is synced from `llama.cpp` main every 6 hours.
|
||||||
|
|
||||||
|
To obtain the official LLaMA 2 weights please see the <a href="#obtaining-and-using-the-facebook-llama-2-model">Obtaining and using the Facebook LLaMA 2 model</a> section. There is also a large selection of pre-quantized `gguf` models available on Hugging Face.
|
||||||
|
|
||||||
|
Note: `convert.py` has been moved to `examples/convert_legacy_llama.py` and shouldn't be used for anything other than `Llama/Llama2/Mistral` models and their derivatives.
|
||||||
|
It does not support LLaMA 3, you can use `convert_hf_to_gguf.py` with LLaMA 3 downloaded from Hugging Face.
|
||||||
|
|
||||||
|
To learn more about quantizing model, [read this documentation](./examples/quantize/README.md)
|
||||||
|
|
||||||
|
### Perplexity (measuring model quality)
|
||||||
|
|
||||||
|
You can use the `perplexity` example to measure perplexity over a given prompt (lower perplexity is better).
|
||||||
|
For more information, see [https://huggingface.co/docs/transformers/perplexity](https://huggingface.co/docs/transformers/perplexity).
|
||||||
|
|
||||||
|
To learn more how to measure perplexity using llama.cpp, [read this documentation](./examples/perplexity/README.md)
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
- Contributors can open PRs
|
||||||
|
- Collaborators can push to branches in the `llama.cpp` repo and merge PRs into the `master` branch
|
||||||
|
- Collaborators will be invited based on contributions
|
||||||
|
- Any help with managing issues and PRs is very appreciated!
|
||||||
|
- See [good first issues](https://github.com/ggerganov/llama.cpp/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) for tasks suitable for first contributions
|
||||||
|
- Read the [CONTRIBUTING.md](CONTRIBUTING.md) for more information
|
||||||
|
- Make sure to read this: [Inference at the edge](https://github.com/ggerganov/llama.cpp/discussions/205)
|
||||||
|
- A bit of backstory for those who are interested: [Changelog podcast](https://changelog.com/podcast/532)
|
||||||
|
|
||||||
|
## Other documentations
|
||||||
|
|
||||||
|
- [main (cli)](./examples/main/README.md)
|
||||||
|
- [server](./examples/server/README.md)
|
||||||
|
- [jeopardy](./examples/jeopardy/README.md)
|
||||||
|
- [GBNF grammars](./grammars/README.md)
|
||||||
|
|
||||||
|
**Development documentations**
|
||||||
|
|
||||||
|
- [How to build](./docs/build.md)
|
||||||
|
- [Running on Docker](./docs/docker.md)
|
||||||
|
- [Build on Android](./docs/android.md)
|
||||||
|
- [Performance troubleshooting](./docs/token_generation_performance_tips.md)
|
||||||
|
- [GGML tips & tricks](https://github.com/ggerganov/llama.cpp/wiki/GGML-Tips-&-Tricks)
|
||||||
|
|
||||||
|
**Seminal papers and background on the models**
|
||||||
|
|
||||||
If your issue is with model generation quality, then please at least scan the following links and papers to understand the limitations of LLaMA models. This is especially important when choosing an appropriate model size and appreciating both the significant and subtle differences between LLaMA models and ChatGPT:
|
If your issue is with model generation quality, then please at least scan the following links and papers to understand the limitations of LLaMA models. This is especially important when choosing an appropriate model size and appreciating both the significant and subtle differences between LLaMA models and ChatGPT:
|
||||||
- LLaMA:
|
- LLaMA:
|
||||||
@ -820,178 +466,3 @@ If your issue is with model generation quality, then please at least scan the fo
|
|||||||
- GPT-3.5 / InstructGPT / ChatGPT:
|
- GPT-3.5 / InstructGPT / ChatGPT:
|
||||||
- [Aligning language models to follow instructions](https://openai.com/research/instruction-following)
|
- [Aligning language models to follow instructions](https://openai.com/research/instruction-following)
|
||||||
- [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155)
|
- [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155)
|
||||||
|
|
||||||
### Android
|
|
||||||
|
|
||||||
#### Build on Android using Termux
|
|
||||||
[Termux](https://github.com/termux/termux-app#installation) is a method to execute `llama.cpp` on an Android device (no root required).
|
|
||||||
```
|
|
||||||
apt update && apt upgrade -y
|
|
||||||
apt install git make cmake
|
|
||||||
```
|
|
||||||
|
|
||||||
It's recommended to move your model inside the `~/` directory for best performance:
|
|
||||||
```
|
|
||||||
cd storage/downloads
|
|
||||||
mv model.gguf ~/
|
|
||||||
```
|
|
||||||
|
|
||||||
[Get the code](https://github.com/ggerganov/llama.cpp#get-the-code) & [follow the Linux build instructions](https://github.com/ggerganov/llama.cpp#build) to build `llama.cpp`.
|
|
||||||
|
|
||||||
#### Building the Project using Android NDK
|
|
||||||
Obtain the [Android NDK](https://developer.android.com/ndk) and then build with CMake.
|
|
||||||
|
|
||||||
Execute the following commands on your computer to avoid downloading the NDK to your mobile. Alternatively, you can also do this in Termux:
|
|
||||||
```
|
|
||||||
$ mkdir build-android
|
|
||||||
$ cd build-android
|
|
||||||
$ export NDK=<your_ndk_directory>
|
|
||||||
$ cmake -DCMAKE_TOOLCHAIN_FILE=$NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=android-23 -DCMAKE_C_FLAGS=-march=armv8.4a+dotprod ..
|
|
||||||
$ make
|
|
||||||
```
|
|
||||||
|
|
||||||
Install [termux](https://github.com/termux/termux-app#installation) on your device and run `termux-setup-storage` to get access to your SD card (if Android 11+ then run the command twice).
|
|
||||||
|
|
||||||
Finally, copy these built `llama` binaries and the model file to your device storage. Because the file permissions in the Android sdcard cannot be changed, you can copy the executable files to the `/data/data/com.termux/files/home/bin` path, and then execute the following commands in Termux to add executable permission:
|
|
||||||
|
|
||||||
(Assumed that you have pushed the built executable files to the /sdcard/llama.cpp/bin path using `adb push`)
|
|
||||||
```
|
|
||||||
$cp -r /sdcard/llama.cpp/bin /data/data/com.termux/files/home/
|
|
||||||
$cd /data/data/com.termux/files/home/bin
|
|
||||||
$chmod +x ./*
|
|
||||||
```
|
|
||||||
|
|
||||||
Download model [llama-2-7b-chat.Q4_K_M.gguf](https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/blob/main/llama-2-7b-chat.Q4_K_M.gguf), and push it to `/sdcard/llama.cpp/`, then move it to `/data/data/com.termux/files/home/model/`
|
|
||||||
|
|
||||||
```
|
|
||||||
$mv /sdcard/llama.cpp/llama-2-7b-chat.Q4_K_M.gguf /data/data/com.termux/files/home/model/
|
|
||||||
```
|
|
||||||
|
|
||||||
Now, you can start chatting:
|
|
||||||
```
|
|
||||||
$cd /data/data/com.termux/files/home/bin
|
|
||||||
$./llama-cli -m ../model/llama-2-7b-chat.Q4_K_M.gguf -n 128 -cml
|
|
||||||
```
|
|
||||||
|
|
||||||
Here's a demo of an interactive session running on Pixel 5 phone:
|
|
||||||
|
|
||||||
https://user-images.githubusercontent.com/271616/225014776-1d567049-ad71-4ef2-b050-55b0b3b9274c.mp4
|
|
||||||
|
|
||||||
### Docker
|
|
||||||
|
|
||||||
#### Prerequisites
|
|
||||||
* Docker must be installed and running on your system.
|
|
||||||
* Create a folder to store big models & intermediate files (ex. /llama/models)
|
|
||||||
|
|
||||||
#### Images
|
|
||||||
We have three Docker images available for this project:
|
|
||||||
|
|
||||||
1. `ghcr.io/ggerganov/llama.cpp:full`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization. (platforms: `linux/amd64`, `linux/arm64`)
|
|
||||||
2. `ghcr.io/ggerganov/llama.cpp:light`: This image only includes the main executable file. (platforms: `linux/amd64`, `linux/arm64`)
|
|
||||||
3. `ghcr.io/ggerganov/llama.cpp:server`: This image only includes the server executable file. (platforms: `linux/amd64`, `linux/arm64`)
|
|
||||||
|
|
||||||
Additionally, there the following images, similar to the above:
|
|
||||||
|
|
||||||
- `ghcr.io/ggerganov/llama.cpp:full-cuda`: Same as `full` but compiled with CUDA support. (platforms: `linux/amd64`)
|
|
||||||
- `ghcr.io/ggerganov/llama.cpp:light-cuda`: Same as `light` but compiled with CUDA support. (platforms: `linux/amd64`)
|
|
||||||
- `ghcr.io/ggerganov/llama.cpp:server-cuda`: Same as `server` but compiled with CUDA support. (platforms: `linux/amd64`)
|
|
||||||
- `ghcr.io/ggerganov/llama.cpp:full-rocm`: Same as `full` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`)
|
|
||||||
- `ghcr.io/ggerganov/llama.cpp:light-rocm`: Same as `light` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`)
|
|
||||||
- `ghcr.io/ggerganov/llama.cpp:server-rocm`: Same as `server` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`)
|
|
||||||
|
|
||||||
The GPU enabled images are not currently tested by CI beyond being built. They are not built with any variation from the ones in the Dockerfiles defined in [.devops/](.devops/) and the GitHub Action defined in [.github/workflows/docker.yml](.github/workflows/docker.yml). If you need different settings (for example, a different CUDA or ROCm library, you'll need to build the images locally for now).
|
|
||||||
|
|
||||||
#### Usage
|
|
||||||
|
|
||||||
The easiest way to download the models, convert them to ggml and optimize them is with the --all-in-one command which includes the full docker image.
|
|
||||||
|
|
||||||
Replace `/path/to/models` below with the actual path where you downloaded the models.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --all-in-one "/models/" 7B
|
|
||||||
```
|
|
||||||
|
|
||||||
On completion, you are ready to play!
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512
|
|
||||||
```
|
|
||||||
|
|
||||||
or with a light image:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:light -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512
|
|
||||||
```
|
|
||||||
|
|
||||||
or with a server image:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker run -v /path/to/models:/models -p 8000:8000 ghcr.io/ggerganov/llama.cpp:server -m /models/7B/ggml-model-q4_0.gguf --port 8000 --host 0.0.0.0 -n 512
|
|
||||||
```
|
|
||||||
|
|
||||||
### Docker With CUDA
|
|
||||||
|
|
||||||
Assuming one has the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit) properly installed on Linux, or is using a GPU enabled cloud, `cuBLAS` should be accessible inside the container.
|
|
||||||
|
|
||||||
#### Building Locally
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker build -t local/llama.cpp:full-cuda -f .devops/full-cuda.Dockerfile .
|
|
||||||
docker build -t local/llama.cpp:light-cuda -f .devops/llama-cli-cuda.Dockerfile .
|
|
||||||
docker build -t local/llama.cpp:server-cuda -f .devops/llama-server-cuda.Dockerfile .
|
|
||||||
```
|
|
||||||
|
|
||||||
You may want to pass in some different `ARGS`, depending on the CUDA environment supported by your container host, as well as the GPU architecture.
|
|
||||||
|
|
||||||
The defaults are:
|
|
||||||
|
|
||||||
- `CUDA_VERSION` set to `11.7.1`
|
|
||||||
- `CUDA_DOCKER_ARCH` set to `all`
|
|
||||||
|
|
||||||
The resulting images, are essentially the same as the non-CUDA images:
|
|
||||||
|
|
||||||
1. `local/llama.cpp:full-cuda`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization.
|
|
||||||
2. `local/llama.cpp:light-cuda`: This image only includes the main executable file.
|
|
||||||
3. `local/llama.cpp:server-cuda`: This image only includes the server executable file.
|
|
||||||
|
|
||||||
#### Usage
|
|
||||||
|
|
||||||
After building locally, Usage is similar to the non-CUDA examples, but you'll need to add the `--gpus` flag. You will also want to use the `--n-gpu-layers` flag.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
docker run --gpus all -v /path/to/models:/models local/llama.cpp:full-cuda --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1
|
|
||||||
docker run --gpus all -v /path/to/models:/models local/llama.cpp:light-cuda -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1
|
|
||||||
docker run --gpus all -v /path/to/models:/models local/llama.cpp:server-cuda -m /models/7B/ggml-model-q4_0.gguf --port 8000 --host 0.0.0.0 -n 512 --n-gpu-layers 1
|
|
||||||
```
|
|
||||||
|
|
||||||
### Contributing
|
|
||||||
|
|
||||||
- Contributors can open PRs
|
|
||||||
- Collaborators can push to branches in the `llama.cpp` repo and merge PRs into the `master` branch
|
|
||||||
- Collaborators will be invited based on contributions
|
|
||||||
- Any help with managing issues and PRs is very appreciated!
|
|
||||||
- Make sure to read this: [Inference at the edge](https://github.com/ggerganov/llama.cpp/discussions/205)
|
|
||||||
- A bit of backstory for those who are interested: [Changelog podcast](https://changelog.com/podcast/532)
|
|
||||||
|
|
||||||
### Coding guidelines
|
|
||||||
|
|
||||||
- Avoid adding third-party dependencies, extra files, extra headers, etc.
|
|
||||||
- Always consider cross-compatibility with other operating systems and architectures
|
|
||||||
- Avoid fancy looking modern STL constructs, use basic `for` loops, avoid templates, keep it simple
|
|
||||||
- There are no strict rules for the code style, but try to follow the patterns in the code (indentation, spaces, etc.). Vertical alignment makes things more readable and easier to batch edit
|
|
||||||
- Clean-up any trailing whitespaces, use 4 spaces for indentation, brackets on the same line, `void * ptr`, `int & a`
|
|
||||||
- See [good first issues](https://github.com/ggerganov/llama.cpp/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) for tasks suitable for first contributions
|
|
||||||
- Tensors store data in row-major order. We refer to dimension 0 as columns, 1 as rows, 2 as matrices
|
|
||||||
- Matrix multiplication is unconventional: [`C = ggml_mul_mat(ctx, A, B)`](https://github.com/ggerganov/llama.cpp/blob/880e352277fc017df4d5794f0c21c44e1eae2b84/ggml.h#L1058-L1064) means $C^T = A B^T \Leftrightarrow C = B A^T.$
|
|
||||||
|
|
||||||
![matmul](media/matmul.png)
|
|
||||||
|
|
||||||
### Docs
|
|
||||||
|
|
||||||
- [main (cli)](./examples/main/README.md)
|
|
||||||
- [server](./examples/server/README.md)
|
|
||||||
- [jeopardy](./examples/jeopardy/README.md)
|
|
||||||
- [BLIS](./docs/BLIS.md)
|
|
||||||
- [Performance troubleshooting](./docs/token_generation_performance_tips.md)
|
|
||||||
- [GGML tips & tricks](https://github.com/ggerganov/llama.cpp/wiki/GGML-Tips-&-Tricks)
|
|
||||||
- [GBNF grammars](./grammars/README.md)
|
|
||||||
|
@ -287,7 +287,7 @@ function gg_run_open_llama_7b_v2 {
|
|||||||
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} -DGGML_CUDA=1 .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} -DGGML_CUDA=1 .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
||||||
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
||||||
|
|
||||||
python3 ../examples/convert-legacy-llama.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf
|
python3 ../examples/convert_legacy_llama.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf
|
||||||
|
|
||||||
model_f16="${path_models}/ggml-model-f16.gguf"
|
model_f16="${path_models}/ggml-model-f16.gguf"
|
||||||
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
||||||
@ -421,7 +421,7 @@ function gg_run_pythia_1_4b {
|
|||||||
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
||||||
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
||||||
|
|
||||||
python3 ../convert-hf-to-gguf.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf
|
python3 ../convert_hf_to_gguf.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf
|
||||||
|
|
||||||
model_f16="${path_models}/ggml-model-f16.gguf"
|
model_f16="${path_models}/ggml-model-f16.gguf"
|
||||||
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
||||||
@ -553,7 +553,7 @@ function gg_run_pythia_2_8b {
|
|||||||
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} -DGGML_CUDA=1 .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} -DGGML_CUDA=1 .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
||||||
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
||||||
|
|
||||||
python3 ../convert-hf-to-gguf.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf
|
python3 ../convert_hf_to_gguf.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf
|
||||||
|
|
||||||
model_f16="${path_models}/ggml-model-f16.gguf"
|
model_f16="${path_models}/ggml-model-f16.gguf"
|
||||||
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
||||||
@ -688,7 +688,7 @@ function gg_run_embd_bge_small {
|
|||||||
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
|
||||||
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
|
||||||
|
|
||||||
python3 ../convert-hf-to-gguf.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf
|
python3 ../convert_hf_to_gguf.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf
|
||||||
|
|
||||||
model_f16="${path_models}/ggml-model-f16.gguf"
|
model_f16="${path_models}/ggml-model-f16.gguf"
|
||||||
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
||||||
|
@ -190,6 +190,12 @@ int32_t cpu_get_num_math() {
|
|||||||
// CLI argument parsing
|
// CLI argument parsing
|
||||||
//
|
//
|
||||||
|
|
||||||
|
void gpt_params_handle_hf_token(gpt_params & params) {
|
||||||
|
if (params.hf_token.empty() && std::getenv("HF_TOKEN")) {
|
||||||
|
params.hf_token = std::getenv("HF_TOKEN");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void gpt_params_handle_model_default(gpt_params & params) {
|
void gpt_params_handle_model_default(gpt_params & params) {
|
||||||
if (!params.hf_repo.empty()) {
|
if (!params.hf_repo.empty()) {
|
||||||
// short-hand to avoid specifying --hf-file -> default it to --model
|
// short-hand to avoid specifying --hf-file -> default it to --model
|
||||||
@ -237,6 +243,8 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
|
|||||||
|
|
||||||
gpt_params_handle_model_default(params);
|
gpt_params_handle_model_default(params);
|
||||||
|
|
||||||
|
gpt_params_handle_hf_token(params);
|
||||||
|
|
||||||
if (params.escape) {
|
if (params.escape) {
|
||||||
string_process_escapes(params.prompt);
|
string_process_escapes(params.prompt);
|
||||||
string_process_escapes(params.input_prefix);
|
string_process_escapes(params.input_prefix);
|
||||||
@ -472,6 +480,14 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
|||||||
else { invalid_param = true; }
|
else { invalid_param = true; }
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
if (arg == "--attention") {
|
||||||
|
CHECK_ARG
|
||||||
|
std::string value(argv[i]);
|
||||||
|
/**/ if (value == "causal") { params.attention_type = LLAMA_ATTENTION_TYPE_CAUSAL; }
|
||||||
|
else if (value == "non-causal") { params.attention_type = LLAMA_ATTENTION_TYPE_NON_CAUSAL; }
|
||||||
|
else { invalid_param = true; }
|
||||||
|
return true;
|
||||||
|
}
|
||||||
if (arg == "--defrag-thold" || arg == "-dt") {
|
if (arg == "--defrag-thold" || arg == "-dt") {
|
||||||
CHECK_ARG
|
CHECK_ARG
|
||||||
params.defrag_thold = std::stof(argv[i]);
|
params.defrag_thold = std::stof(argv[i]);
|
||||||
@ -644,6 +660,14 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
|||||||
params.model_url = argv[i];
|
params.model_url = argv[i];
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
if (arg == "-hft" || arg == "--hf-token") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
params.hf_token = argv[i];
|
||||||
|
return true;
|
||||||
|
}
|
||||||
if (arg == "-hfr" || arg == "--hf-repo") {
|
if (arg == "-hfr" || arg == "--hf-repo") {
|
||||||
CHECK_ARG
|
CHECK_ARG
|
||||||
params.hf_repo = argv[i];
|
params.hf_repo = argv[i];
|
||||||
@ -757,7 +781,7 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
|||||||
params.cache_type_v = argv[++i];
|
params.cache_type_v = argv[++i];
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (arg == "--multiline-input") {
|
if (arg == "-mli" || arg == "--multiline-input") {
|
||||||
params.multiline_input = true;
|
params.multiline_input = true;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -1014,16 +1038,19 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
|||||||
}
|
}
|
||||||
if (arg == "--in-prefix-bos") {
|
if (arg == "--in-prefix-bos") {
|
||||||
params.input_prefix_bos = true;
|
params.input_prefix_bos = true;
|
||||||
|
params.enable_chat_template = false;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (arg == "--in-prefix") {
|
if (arg == "--in-prefix") {
|
||||||
CHECK_ARG
|
CHECK_ARG
|
||||||
params.input_prefix = argv[i];
|
params.input_prefix = argv[i];
|
||||||
|
params.enable_chat_template = false;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (arg == "--in-suffix") {
|
if (arg == "--in-suffix") {
|
||||||
CHECK_ARG
|
CHECK_ARG
|
||||||
params.input_suffix = argv[i];
|
params.input_suffix = argv[i];
|
||||||
|
params.enable_chat_template = false;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (arg == "--spm-infill") {
|
if (arg == "--spm-infill") {
|
||||||
@ -1391,7 +1418,9 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
|
|||||||
options.push_back({ "*", " --keep N", "number of tokens to keep from the initial prompt (default: %d, -1 = all)", params.n_keep });
|
options.push_back({ "*", " --keep N", "number of tokens to keep from the initial prompt (default: %d, -1 = all)", params.n_keep });
|
||||||
options.push_back({ "*", " --chunks N", "max number of chunks to process (default: %d, -1 = all)", params.n_chunks });
|
options.push_back({ "*", " --chunks N", "max number of chunks to process (default: %d, -1 = all)", params.n_chunks });
|
||||||
options.push_back({ "*", "-fa, --flash-attn", "enable Flash Attention (default: %s)", params.flash_attn ? "enabled" : "disabled" });
|
options.push_back({ "*", "-fa, --flash-attn", "enable Flash Attention (default: %s)", params.flash_attn ? "enabled" : "disabled" });
|
||||||
options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with (default: '%s')", params.prompt.c_str() });
|
options.push_back({ "*", "-p, --prompt PROMPT", "prompt to start generation with\n"
|
||||||
|
"in conversation mode, this will be used as system prompt\n"
|
||||||
|
"(default: '%s')", params.prompt.c_str() });
|
||||||
options.push_back({ "*", "-f, --file FNAME", "a file containing the prompt (default: none)" });
|
options.push_back({ "*", "-f, --file FNAME", "a file containing the prompt (default: none)" });
|
||||||
options.push_back({ "*", " --in-file FNAME", "an input file (repeat to specify multiple files)" });
|
options.push_back({ "*", " --in-file FNAME", "an input file (repeat to specify multiple files)" });
|
||||||
options.push_back({ "*", "-bf, --binary-file FNAME", "binary file containing the prompt (default: none)" });
|
options.push_back({ "*", "-bf, --binary-file FNAME", "binary file containing the prompt (default: none)" });
|
||||||
@ -1406,7 +1435,9 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
|
|||||||
"halt generation at PROMPT, return control in interactive mode\n"
|
"halt generation at PROMPT, return control in interactive mode\n"
|
||||||
"can be specified more than once for multiple prompts" });
|
"can be specified more than once for multiple prompts" });
|
||||||
options.push_back({ "main", "-sp, --special", "special tokens output enabled (default: %s)", params.special ? "true" : "false" });
|
options.push_back({ "main", "-sp, --special", "special tokens output enabled (default: %s)", params.special ? "true" : "false" });
|
||||||
options.push_back({ "main", "-cnv, --conversation", "run in conversation mode (does not print special tokens and suffix/prefix) (default: %s)", params.conversation ? "true" : "false" });
|
options.push_back({ "main", "-cnv, --conversation", "run in conversation mode, does not print special tokens and suffix/prefix\n"
|
||||||
|
"if suffix/prefix are not specified, default chat template will be used\n"
|
||||||
|
"(default: %s)", params.conversation ? "true" : "false" });
|
||||||
options.push_back({ "main infill", "-i, --interactive", "run in interactive mode (default: %s)", params.interactive ? "true" : "false" });
|
options.push_back({ "main infill", "-i, --interactive", "run in interactive mode (default: %s)", params.interactive ? "true" : "false" });
|
||||||
options.push_back({ "main infill", "-if, --interactive-first", "run in interactive mode and wait for input right away (default: %s)", params.interactive_first ? "true" : "false" });
|
options.push_back({ "main infill", "-if, --interactive-first", "run in interactive mode and wait for input right away (default: %s)", params.interactive_first ? "true" : "false" });
|
||||||
options.push_back({ "main infill", "-mli, --multiline-input", "allows you to write or paste multiple lines without ending each in '\\'" });
|
options.push_back({ "main infill", "-mli, --multiline-input", "allows you to write or paste multiple lines without ending each in '\\'" });
|
||||||
@ -1450,6 +1481,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
|
|||||||
options.push_back({ "main", " --cfg-scale N", "strength of guidance (default: %.1f, 1.0 = disable)", (double)sparams.cfg_scale });
|
options.push_back({ "main", " --cfg-scale N", "strength of guidance (default: %.1f, 1.0 = disable)", (double)sparams.cfg_scale });
|
||||||
options.push_back({ "main", " --chat-template JINJA_TEMPLATE",
|
options.push_back({ "main", " --chat-template JINJA_TEMPLATE",
|
||||||
"set custom jinja chat template (default: template taken from model's metadata)\n"
|
"set custom jinja chat template (default: template taken from model's metadata)\n"
|
||||||
|
"if suffix/prefix are specified, template will be disabled\n"
|
||||||
"only commonly used templates are accepted:\n"
|
"only commonly used templates are accepted:\n"
|
||||||
"https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template" });
|
"https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template" });
|
||||||
options.push_back({ "grammar" });
|
options.push_back({ "grammar" });
|
||||||
@ -1460,8 +1492,10 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
|
|||||||
"For schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead" });
|
"For schemas w/ external $refs, use --grammar + example/json_schema_to_grammar.py instead" });
|
||||||
|
|
||||||
options.push_back({ "embedding" });
|
options.push_back({ "embedding" });
|
||||||
options.push_back({ "embedding", " --pooling {none,mean,cls}",
|
options.push_back({ "embedding", " --pooling {none,mean,cls,last}",
|
||||||
"pooling type for embeddings, use model default if unspecified" });
|
"pooling type for embeddings, use model default if unspecified" });
|
||||||
|
options.push_back({ "embedding", " --attention {causal,non-causal}",
|
||||||
|
"attention type for embeddings, use model default if unspecified" });
|
||||||
|
|
||||||
options.push_back({ "context hacking" });
|
options.push_back({ "context hacking" });
|
||||||
options.push_back({ "*", " --rope-scaling {none,linear,yarn}",
|
options.push_back({ "*", " --rope-scaling {none,linear,yarn}",
|
||||||
@ -1558,6 +1592,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
|
|||||||
options.push_back({ "*", "-mu, --model-url MODEL_URL", "model download url (default: unused)" });
|
options.push_back({ "*", "-mu, --model-url MODEL_URL", "model download url (default: unused)" });
|
||||||
options.push_back({ "*", "-hfr, --hf-repo REPO", "Hugging Face model repository (default: unused)" });
|
options.push_back({ "*", "-hfr, --hf-repo REPO", "Hugging Face model repository (default: unused)" });
|
||||||
options.push_back({ "*", "-hff, --hf-file FILE", "Hugging Face model file (default: unused)" });
|
options.push_back({ "*", "-hff, --hf-file FILE", "Hugging Face model file (default: unused)" });
|
||||||
|
options.push_back({ "*", "-hft, --hf-token TOKEN", "Hugging Face access token (default: value from HF_TOKEN environment variable)" });
|
||||||
|
|
||||||
options.push_back({ "retrieval" });
|
options.push_back({ "retrieval" });
|
||||||
options.push_back({ "retrieval", " --context-file FNAME", "file to load context from (repeat to specify multiple files)" });
|
options.push_back({ "retrieval", " --context-file FNAME", "file to load context from (repeat to specify multiple files)" });
|
||||||
@ -1997,9 +2032,9 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
|
|||||||
llama_model * model = nullptr;
|
llama_model * model = nullptr;
|
||||||
|
|
||||||
if (!params.hf_repo.empty() && !params.hf_file.empty()) {
|
if (!params.hf_repo.empty() && !params.hf_file.empty()) {
|
||||||
model = llama_load_model_from_hf(params.hf_repo.c_str(), params.hf_file.c_str(), params.model.c_str(), mparams);
|
model = llama_load_model_from_hf(params.hf_repo.c_str(), params.hf_file.c_str(), params.model.c_str(), params.hf_token.c_str(), mparams);
|
||||||
} else if (!params.model_url.empty()) {
|
} else if (!params.model_url.empty()) {
|
||||||
model = llama_load_model_from_url(params.model_url.c_str(), params.model.c_str(), mparams);
|
model = llama_load_model_from_url(params.model_url.c_str(), params.model.c_str(), params.hf_token.c_str(), mparams);
|
||||||
} else {
|
} else {
|
||||||
model = llama_load_model_from_file(params.model.c_str(), mparams);
|
model = llama_load_model_from_file(params.model.c_str(), mparams);
|
||||||
}
|
}
|
||||||
@ -2067,7 +2102,24 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
|
|||||||
if (params.warmup) {
|
if (params.warmup) {
|
||||||
LOG("warming up the model with an empty run\n");
|
LOG("warming up the model with an empty run\n");
|
||||||
|
|
||||||
std::vector<llama_token> tmp = { llama_token_bos(model), llama_token_eos(model), };
|
std::vector<llama_token> tmp;
|
||||||
|
llama_token bos = llama_token_bos(model);
|
||||||
|
llama_token eos = llama_token_eos(model);
|
||||||
|
// some models (e.g. T5) don't have a BOS token
|
||||||
|
if (bos != -1) {
|
||||||
|
tmp.push_back(bos);
|
||||||
|
}
|
||||||
|
tmp.push_back(eos);
|
||||||
|
|
||||||
|
if (llama_model_has_encoder(model)) {
|
||||||
|
llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size(), 0, 0));
|
||||||
|
llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
|
||||||
|
if (decoder_start_token_id == -1) {
|
||||||
|
decoder_start_token_id = bos;
|
||||||
|
}
|
||||||
|
tmp.clear();
|
||||||
|
tmp.push_back(decoder_start_token_id);
|
||||||
|
}
|
||||||
llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch), 0, 0));
|
llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch), 0, 0));
|
||||||
llama_kv_cache_clear(lctx);
|
llama_kv_cache_clear(lctx);
|
||||||
llama_synchronize(lctx);
|
llama_synchronize(lctx);
|
||||||
@ -2150,6 +2202,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
|
|||||||
cparams.yarn_beta_slow = params.yarn_beta_slow;
|
cparams.yarn_beta_slow = params.yarn_beta_slow;
|
||||||
cparams.yarn_orig_ctx = params.yarn_orig_ctx;
|
cparams.yarn_orig_ctx = params.yarn_orig_ctx;
|
||||||
cparams.pooling_type = params.pooling_type;
|
cparams.pooling_type = params.pooling_type;
|
||||||
|
cparams.attention_type = params.attention_type;
|
||||||
cparams.defrag_thold = params.defrag_thold;
|
cparams.defrag_thold = params.defrag_thold;
|
||||||
cparams.cb_eval = params.cb_eval;
|
cparams.cb_eval = params.cb_eval;
|
||||||
cparams.cb_eval_user_data = params.cb_eval_user_data;
|
cparams.cb_eval_user_data = params.cb_eval_user_data;
|
||||||
@ -2169,7 +2222,7 @@ static bool starts_with(const std::string & str, const std::string & prefix) {
|
|||||||
return str.rfind(prefix, 0) == 0;
|
return str.rfind(prefix, 0) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool llama_download_file(const std::string & url, const std::string & path) {
|
static bool llama_download_file(const std::string & url, const std::string & path, const std::string & hf_token) {
|
||||||
|
|
||||||
// Initialize libcurl
|
// Initialize libcurl
|
||||||
std::unique_ptr<CURL, decltype(&curl_easy_cleanup)> curl(curl_easy_init(), &curl_easy_cleanup);
|
std::unique_ptr<CURL, decltype(&curl_easy_cleanup)> curl(curl_easy_init(), &curl_easy_cleanup);
|
||||||
@ -2184,6 +2237,15 @@ static bool llama_download_file(const std::string & url, const std::string & pat
|
|||||||
curl_easy_setopt(curl.get(), CURLOPT_URL, url.c_str());
|
curl_easy_setopt(curl.get(), CURLOPT_URL, url.c_str());
|
||||||
curl_easy_setopt(curl.get(), CURLOPT_FOLLOWLOCATION, 1L);
|
curl_easy_setopt(curl.get(), CURLOPT_FOLLOWLOCATION, 1L);
|
||||||
|
|
||||||
|
// Check if hf-token or bearer-token was specified
|
||||||
|
if (!hf_token.empty()) {
|
||||||
|
std::string auth_header = "Authorization: Bearer ";
|
||||||
|
auth_header += hf_token.c_str();
|
||||||
|
struct curl_slist *http_headers = NULL;
|
||||||
|
http_headers = curl_slist_append(http_headers, auth_header.c_str());
|
||||||
|
curl_easy_setopt(curl.get(), CURLOPT_HTTPHEADER, http_headers);
|
||||||
|
}
|
||||||
|
|
||||||
#if defined(_WIN32)
|
#if defined(_WIN32)
|
||||||
// CURLSSLOPT_NATIVE_CA tells libcurl to use standard certificate store of
|
// CURLSSLOPT_NATIVE_CA tells libcurl to use standard certificate store of
|
||||||
// operating system. Currently implemented under MS-Windows.
|
// operating system. Currently implemented under MS-Windows.
|
||||||
@ -2379,6 +2441,7 @@ static bool llama_download_file(const std::string & url, const std::string & pat
|
|||||||
struct llama_model * llama_load_model_from_url(
|
struct llama_model * llama_load_model_from_url(
|
||||||
const char * model_url,
|
const char * model_url,
|
||||||
const char * path_model,
|
const char * path_model,
|
||||||
|
const char * hf_token,
|
||||||
const struct llama_model_params & params) {
|
const struct llama_model_params & params) {
|
||||||
// Basic validation of the model_url
|
// Basic validation of the model_url
|
||||||
if (!model_url || strlen(model_url) == 0) {
|
if (!model_url || strlen(model_url) == 0) {
|
||||||
@ -2386,7 +2449,7 @@ struct llama_model * llama_load_model_from_url(
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!llama_download_file(model_url, path_model)) {
|
if (!llama_download_file(model_url, path_model, hf_token)) {
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2434,14 +2497,14 @@ struct llama_model * llama_load_model_from_url(
|
|||||||
// Prepare download in parallel
|
// Prepare download in parallel
|
||||||
std::vector<std::future<bool>> futures_download;
|
std::vector<std::future<bool>> futures_download;
|
||||||
for (int idx = 1; idx < n_split; idx++) {
|
for (int idx = 1; idx < n_split; idx++) {
|
||||||
futures_download.push_back(std::async(std::launch::async, [&split_prefix, &split_url_prefix, &n_split](int download_idx) -> bool {
|
futures_download.push_back(std::async(std::launch::async, [&split_prefix, &split_url_prefix, &n_split, hf_token](int download_idx) -> bool {
|
||||||
char split_path[PATH_MAX] = {0};
|
char split_path[PATH_MAX] = {0};
|
||||||
llama_split_path(split_path, sizeof(split_path), split_prefix, download_idx, n_split);
|
llama_split_path(split_path, sizeof(split_path), split_prefix, download_idx, n_split);
|
||||||
|
|
||||||
char split_url[LLAMA_CURL_MAX_URL_LENGTH] = {0};
|
char split_url[LLAMA_CURL_MAX_URL_LENGTH] = {0};
|
||||||
llama_split_path(split_url, sizeof(split_url), split_url_prefix, download_idx, n_split);
|
llama_split_path(split_url, sizeof(split_url), split_url_prefix, download_idx, n_split);
|
||||||
|
|
||||||
return llama_download_file(split_url, split_path);
|
return llama_download_file(split_url, split_path, hf_token);
|
||||||
}, idx));
|
}, idx));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2460,6 +2523,7 @@ struct llama_model * llama_load_model_from_hf(
|
|||||||
const char * repo,
|
const char * repo,
|
||||||
const char * model,
|
const char * model,
|
||||||
const char * path_model,
|
const char * path_model,
|
||||||
|
const char * hf_token,
|
||||||
const struct llama_model_params & params) {
|
const struct llama_model_params & params) {
|
||||||
// construct hugging face model url:
|
// construct hugging face model url:
|
||||||
//
|
//
|
||||||
@ -2475,7 +2539,7 @@ struct llama_model * llama_load_model_from_hf(
|
|||||||
model_url += "/resolve/main/";
|
model_url += "/resolve/main/";
|
||||||
model_url += model;
|
model_url += model;
|
||||||
|
|
||||||
return llama_load_model_from_url(model_url.c_str(), path_model, params);
|
return llama_load_model_from_url(model_url.c_str(), path_model, hf_token, params);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
@ -2483,6 +2547,7 @@ struct llama_model * llama_load_model_from_hf(
|
|||||||
struct llama_model * llama_load_model_from_url(
|
struct llama_model * llama_load_model_from_url(
|
||||||
const char * /*model_url*/,
|
const char * /*model_url*/,
|
||||||
const char * /*path_model*/,
|
const char * /*path_model*/,
|
||||||
|
const char * /*hf_token*/,
|
||||||
const struct llama_model_params & /*params*/) {
|
const struct llama_model_params & /*params*/) {
|
||||||
fprintf(stderr, "%s: llama.cpp built without libcurl, downloading from an url not supported.\n", __func__);
|
fprintf(stderr, "%s: llama.cpp built without libcurl, downloading from an url not supported.\n", __func__);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
@ -2492,6 +2557,7 @@ struct llama_model * llama_load_model_from_hf(
|
|||||||
const char * /*repo*/,
|
const char * /*repo*/,
|
||||||
const char * /*model*/,
|
const char * /*model*/,
|
||||||
const char * /*path_model*/,
|
const char * /*path_model*/,
|
||||||
|
const char * /*hf_token*/,
|
||||||
const struct llama_model_params & /*params*/) {
|
const struct llama_model_params & /*params*/) {
|
||||||
fprintf(stderr, "%s: llama.cpp built without libcurl, downloading from Hugging Face not supported.\n", __func__);
|
fprintf(stderr, "%s: llama.cpp built without libcurl, downloading from Hugging Face not supported.\n", __func__);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
@ -2556,51 +2622,35 @@ std::vector<llama_token> llama_tokenize(
|
|||||||
}
|
}
|
||||||
|
|
||||||
std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token, bool special) {
|
std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token, bool special) {
|
||||||
std::vector<char> result(8, 0);
|
std::string piece;
|
||||||
const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size(), special);
|
piece.resize(piece.capacity()); // using string internal cache, 15 bytes + '\n'
|
||||||
if (n_tokens < 0) {
|
const int n_chars = llama_token_to_piece(llama_get_model(ctx), token, &piece[0], piece.size(), 0, special);
|
||||||
result.resize(-n_tokens);
|
if (n_chars < 0) {
|
||||||
int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size(), special);
|
piece.resize(-n_chars);
|
||||||
GGML_ASSERT(check == -n_tokens);
|
int check = llama_token_to_piece(llama_get_model(ctx), token, &piece[0], piece.size(), 0, special);
|
||||||
} else {
|
GGML_ASSERT(check == -n_chars);
|
||||||
result.resize(n_tokens);
|
}
|
||||||
|
else {
|
||||||
|
piece.resize(n_chars);
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::string(result.data(), result.size());
|
return piece;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string llama_detokenize_spm(llama_context * ctx, const std::vector<llama_token> & tokens) {
|
std::string llama_detokenize(llama_context * ctx, const std::vector<llama_token> & tokens, bool special) {
|
||||||
const llama_token bos_id = llama_token_bos(llama_get_model(ctx));
|
std::string text;
|
||||||
|
text.resize(std::max(text.capacity(), tokens.size()));
|
||||||
std::string piece;
|
int32_t n_chars = llama_detokenize(llama_get_model(ctx), tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
|
||||||
std::string result;
|
if (n_chars < 0) {
|
||||||
|
text.resize(-n_chars);
|
||||||
for (size_t i = 0; i < tokens.size(); ++i) {
|
n_chars = llama_detokenize(llama_get_model(ctx), tokens.data(), (int32_t)tokens.size(), &text[0], (int32_t)text.size(), false, special);
|
||||||
piece = llama_token_to_piece(ctx, tokens[i]);
|
GGML_ASSERT(n_chars <= (int32_t)text.size()); // whitespace trimming is performed after per-token detokenization
|
||||||
|
|
||||||
// remove the leading space of the first non-BOS token
|
|
||||||
if (((tokens[0] == bos_id && i == 1) || (tokens[0] != bos_id && i == 0)) && piece[0] == ' ') {
|
|
||||||
piece = piece.substr(1);
|
|
||||||
}
|
|
||||||
|
|
||||||
result += piece;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return result;
|
text.resize(n_chars);
|
||||||
}
|
|
||||||
|
|
||||||
std::string llama_detokenize_bpe(llama_context * ctx, const std::vector<llama_token> & tokens) {
|
|
||||||
std::string piece;
|
|
||||||
std::string result;
|
|
||||||
|
|
||||||
for (size_t i = 0; i < tokens.size(); ++i) {
|
|
||||||
piece = llama_token_to_piece(ctx, tokens[i]);
|
|
||||||
|
|
||||||
result += piece;
|
|
||||||
}
|
|
||||||
|
|
||||||
// NOTE: the original tokenizer decodes bytes after collecting the pieces.
|
// NOTE: the original tokenizer decodes bytes after collecting the pieces.
|
||||||
return result;
|
return text;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool llama_should_add_bos_token(const llama_model * model) {
|
bool llama_should_add_bos_token(const llama_model * model) {
|
||||||
@ -2668,12 +2718,19 @@ std::string llama_chat_format_single(const struct llama_model * model,
|
|||||||
const std::vector<llama_chat_msg> & past_msg,
|
const std::vector<llama_chat_msg> & past_msg,
|
||||||
const llama_chat_msg & new_msg,
|
const llama_chat_msg & new_msg,
|
||||||
bool add_ass) {
|
bool add_ass) {
|
||||||
|
std::ostringstream ss;
|
||||||
auto fmt_past_msg = llama_chat_apply_template(model, tmpl, past_msg, false);
|
auto fmt_past_msg = llama_chat_apply_template(model, tmpl, past_msg, false);
|
||||||
std::vector<llama_chat_msg> chat_new(past_msg);
|
std::vector<llama_chat_msg> chat_new(past_msg);
|
||||||
|
// if the past_msg ends with a newline, we must preserve it in the formatted version
|
||||||
|
if (add_ass && !fmt_past_msg.empty() && fmt_past_msg.back() == '\n') {
|
||||||
|
ss << "\n";
|
||||||
|
};
|
||||||
|
// format chat with new_msg
|
||||||
chat_new.push_back(new_msg);
|
chat_new.push_back(new_msg);
|
||||||
auto fmt_new_msg = llama_chat_apply_template(model, tmpl, chat_new, add_ass);
|
auto fmt_new_msg = llama_chat_apply_template(model, tmpl, chat_new, add_ass);
|
||||||
auto formatted = fmt_new_msg.substr(fmt_past_msg.size(), fmt_new_msg.size() - fmt_past_msg.size());
|
// get the diff part
|
||||||
return formatted;
|
ss << fmt_new_msg.substr(fmt_past_msg.size(), fmt_new_msg.size() - fmt_past_msg.size());
|
||||||
|
return ss.str();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string llama_chat_format_example(const struct llama_model * model,
|
std::string llama_chat_format_example(const struct llama_model * model,
|
||||||
|
@ -99,6 +99,7 @@ struct gpt_params {
|
|||||||
enum llama_split_mode split_mode = LLAMA_SPLIT_MODE_LAYER; // how to split the model across GPUs
|
enum llama_split_mode split_mode = LLAMA_SPLIT_MODE_LAYER; // how to split the model across GPUs
|
||||||
enum llama_rope_scaling_type rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED;
|
enum llama_rope_scaling_type rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED;
|
||||||
enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_UNSPECIFIED; // pooling type for embeddings
|
enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_UNSPECIFIED; // pooling type for embeddings
|
||||||
|
enum llama_attention_type attention_type = LLAMA_ATTENTION_TYPE_UNSPECIFIED; // attention type for embeddings
|
||||||
|
|
||||||
// // sampling parameters
|
// // sampling parameters
|
||||||
struct llama_sampling_params sparams;
|
struct llama_sampling_params sparams;
|
||||||
@ -107,6 +108,7 @@ struct gpt_params {
|
|||||||
std::string model_draft = ""; // draft model for speculative decoding
|
std::string model_draft = ""; // draft model for speculative decoding
|
||||||
std::string model_alias = "unknown"; // model alias
|
std::string model_alias = "unknown"; // model alias
|
||||||
std::string model_url = ""; // model url to download
|
std::string model_url = ""; // model url to download
|
||||||
|
std::string hf_token = ""; // HF token
|
||||||
std::string hf_repo = ""; // HF repo
|
std::string hf_repo = ""; // HF repo
|
||||||
std::string hf_file = ""; // HF file
|
std::string hf_file = ""; // HF file
|
||||||
std::string prompt = "";
|
std::string prompt = "";
|
||||||
@ -200,6 +202,7 @@ struct gpt_params {
|
|||||||
std::string public_path = "";
|
std::string public_path = "";
|
||||||
std::string chat_template = "";
|
std::string chat_template = "";
|
||||||
std::string system_prompt = "";
|
std::string system_prompt = "";
|
||||||
|
bool enable_chat_template = true;
|
||||||
|
|
||||||
std::vector<std::string> api_keys;
|
std::vector<std::string> api_keys;
|
||||||
|
|
||||||
@ -254,6 +257,7 @@ struct gpt_params {
|
|||||||
bool spm_infill = false; // suffix/prefix/middle pattern for infill
|
bool spm_infill = false; // suffix/prefix/middle pattern for infill
|
||||||
};
|
};
|
||||||
|
|
||||||
|
void gpt_params_handle_hf_token(gpt_params & params);
|
||||||
void gpt_params_handle_model_default(gpt_params & params);
|
void gpt_params_handle_model_default(gpt_params & params);
|
||||||
|
|
||||||
bool gpt_params_parse_ex (int argc, char ** argv, gpt_params & params);
|
bool gpt_params_parse_ex (int argc, char ** argv, gpt_params & params);
|
||||||
@ -309,8 +313,8 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
|
|||||||
struct llama_model_params llama_model_params_from_gpt_params (const gpt_params & params);
|
struct llama_model_params llama_model_params_from_gpt_params (const gpt_params & params);
|
||||||
struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params);
|
struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params);
|
||||||
|
|
||||||
struct llama_model * llama_load_model_from_url(const char * model_url, const char * path_model, const struct llama_model_params & params);
|
struct llama_model * llama_load_model_from_url(const char * model_url, const char * path_model, const char * hf_token, const struct llama_model_params & params);
|
||||||
struct llama_model * llama_load_model_from_hf(const char * repo, const char * file, const char * path_model, const struct llama_model_params & params);
|
struct llama_model * llama_load_model_from_hf(const char * repo, const char * file, const char * path_model, const char * hf_token, const struct llama_model_params & params);
|
||||||
|
|
||||||
// Batch utils
|
// Batch utils
|
||||||
|
|
||||||
@ -348,21 +352,13 @@ std::string llama_token_to_piece(
|
|||||||
llama_token token,
|
llama_token token,
|
||||||
bool special = true);
|
bool special = true);
|
||||||
|
|
||||||
// TODO: these should be moved in llama.h C-style API under single `llama_detokenize` function
|
|
||||||
// that takes into account the tokenizer type and decides how to handle the leading space
|
|
||||||
//
|
|
||||||
// detokenizes a vector of tokens into a string
|
// detokenizes a vector of tokens into a string
|
||||||
// should work similar to Python's `tokenizer.decode`
|
// should work similar to Python's `tokenizer.decode`
|
||||||
// removes the leading space from the first non-BOS token
|
// optionally renders special/control tokens
|
||||||
std::string llama_detokenize_spm(
|
std::string llama_detokenize(
|
||||||
llama_context * ctx,
|
llama_context * ctx,
|
||||||
const std::vector<llama_token> & tokens);
|
const std::vector<llama_token> & tokens,
|
||||||
|
bool special = true);
|
||||||
// detokenizes a vector of tokens into a string
|
|
||||||
// should work similar to Python's `tokenizer.decode`
|
|
||||||
std::string llama_detokenize_bpe(
|
|
||||||
llama_context * ctx,
|
|
||||||
const std::vector<llama_token> & tokens);
|
|
||||||
|
|
||||||
// Uses the value from the model metadata if possible, otherwise
|
// Uses the value from the model metadata if possible, otherwise
|
||||||
// defaults to true when model type is SPM, otherwise false.
|
// defaults to true when model type is SPM, otherwise false.
|
||||||
@ -458,4 +454,3 @@ void yaml_dump_string_multiline(FILE * stream, const char * prop_name, const cha
|
|||||||
void yaml_dump_non_result_info(
|
void yaml_dump_non_result_info(
|
||||||
FILE * stream, const gpt_params & params, const llama_context * lctx,
|
FILE * stream, const gpt_params & params, const llama_context * lctx,
|
||||||
const std::string & timestamp, const std::vector<int> & prompt_tokens, const char * model_desc);
|
const std::string & timestamp, const std::vector<int> & prompt_tokens, const char * model_desc);
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ import sys
|
|||||||
from enum import IntEnum
|
from enum import IntEnum
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from hashlib import sha256
|
from hashlib import sha256
|
||||||
from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Sequence, TypeVar, cast
|
from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Literal, Sequence, TypeVar, cast
|
||||||
|
|
||||||
import math
|
import math
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@ -404,7 +404,7 @@ class Model:
|
|||||||
|
|
||||||
return tokens, toktypes, tokpre
|
return tokens, toktypes, tokpre
|
||||||
|
|
||||||
# NOTE: this function is generated by convert-hf-to-gguf-update.py
|
# NOTE: this function is generated by convert_hf_to_gguf_update.py
|
||||||
# do not modify it manually!
|
# do not modify it manually!
|
||||||
# ref: https://github.com/ggerganov/llama.cpp/pull/6920
|
# ref: https://github.com/ggerganov/llama.cpp/pull/6920
|
||||||
# Marker: Start get_vocab_base_pre
|
# Marker: Start get_vocab_base_pre
|
||||||
@ -424,7 +424,7 @@ class Model:
|
|||||||
|
|
||||||
res = None
|
res = None
|
||||||
|
|
||||||
# NOTE: if you get an error here, you need to update the convert-hf-to-gguf-update.py script
|
# NOTE: if you get an error here, you need to update the convert_hf_to_gguf_update.py script
|
||||||
# or pull the latest version of the model from Huggingface
|
# or pull the latest version of the model from Huggingface
|
||||||
# don't edit the hashes manually!
|
# don't edit the hashes manually!
|
||||||
if chkhsh == "0ef9807a4087ebef797fc749390439009c3b9eda9ad1a097abbe738f486c01e5":
|
if chkhsh == "0ef9807a4087ebef797fc749390439009c3b9eda9ad1a097abbe738f486c01e5":
|
||||||
@ -487,18 +487,24 @@ class Model:
|
|||||||
if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a":
|
if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a":
|
||||||
# ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code
|
# ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code
|
||||||
res = "jina-v2-code"
|
res = "jina-v2-code"
|
||||||
|
if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b":
|
||||||
|
# ref: https://huggingface.co/THUDM/glm-4-9b-chat
|
||||||
|
res = "chatglm-bpe"
|
||||||
if chkhsh == "7fc505bd3104ca1083b150b17d088b59534ede9bde81f0dd2090967d7fe52cee":
|
if chkhsh == "7fc505bd3104ca1083b150b17d088b59534ede9bde81f0dd2090967d7fe52cee":
|
||||||
# ref: https://huggingface.co/LumiOpen/Viking-7B
|
# ref: https://huggingface.co/LumiOpen/Viking-7B
|
||||||
res = "viking"
|
res = "viking"
|
||||||
|
if chkhsh == "b53802fb28e26d645c3a310b34bfe07da813026ec7c7716883404d5e0f8b1901":
|
||||||
|
# ref: https://huggingface.co/core42/jais-13b
|
||||||
|
res = "jais"
|
||||||
|
|
||||||
if res is None:
|
if res is None:
|
||||||
logger.warning("\n")
|
logger.warning("\n")
|
||||||
logger.warning("**************************************************************************************")
|
logger.warning("**************************************************************************************")
|
||||||
logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!")
|
logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!")
|
||||||
logger.warning("** There are 2 possible reasons for this:")
|
logger.warning("** There are 2 possible reasons for this:")
|
||||||
logger.warning("** - the model has not been added to convert-hf-to-gguf-update.py yet")
|
logger.warning("** - the model has not been added to convert_hf_to_gguf_update.py yet")
|
||||||
logger.warning("** - the pre-tokenization config has changed upstream")
|
logger.warning("** - the pre-tokenization config has changed upstream")
|
||||||
logger.warning("** Check your model files and convert-hf-to-gguf-update.py and update them accordingly.")
|
logger.warning("** Check your model files and convert_hf_to_gguf_update.py and update them accordingly.")
|
||||||
logger.warning("** ref: https://github.com/ggerganov/llama.cpp/pull/6920")
|
logger.warning("** ref: https://github.com/ggerganov/llama.cpp/pull/6920")
|
||||||
logger.warning("**")
|
logger.warning("**")
|
||||||
logger.warning(f"** chkhsh: {chkhsh}")
|
logger.warning(f"** chkhsh: {chkhsh}")
|
||||||
@ -576,7 +582,19 @@ class Model:
|
|||||||
special_vocab._set_special_token("unk", tokenizer.special_tokens["<|endoftext|>"])
|
special_vocab._set_special_token("unk", tokenizer.special_tokens["<|endoftext|>"])
|
||||||
special_vocab.add_to_gguf(self.gguf_writer)
|
special_vocab.add_to_gguf(self.gguf_writer)
|
||||||
|
|
||||||
def _set_vocab_sentencepiece(self):
|
def _set_vocab_sentencepiece(self, add_to_gguf=True):
|
||||||
|
tokens, scores, toktypes = self._create_vocab_sentencepiece()
|
||||||
|
|
||||||
|
self.gguf_writer.add_tokenizer_model("llama")
|
||||||
|
self.gguf_writer.add_tokenizer_pre("default")
|
||||||
|
self.gguf_writer.add_token_list(tokens)
|
||||||
|
self.gguf_writer.add_token_scores(scores)
|
||||||
|
self.gguf_writer.add_token_types(toktypes)
|
||||||
|
|
||||||
|
special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
|
||||||
|
special_vocab.add_to_gguf(self.gguf_writer)
|
||||||
|
|
||||||
|
def _create_vocab_sentencepiece(self):
|
||||||
from sentencepiece import SentencePieceProcessor
|
from sentencepiece import SentencePieceProcessor
|
||||||
|
|
||||||
tokenizer_path = self.dir_model / 'tokenizer.model'
|
tokenizer_path = self.dir_model / 'tokenizer.model'
|
||||||
@ -638,14 +656,7 @@ class Model:
|
|||||||
scores.append(-1000.0)
|
scores.append(-1000.0)
|
||||||
toktypes.append(SentencePieceTokenTypes.UNUSED)
|
toktypes.append(SentencePieceTokenTypes.UNUSED)
|
||||||
|
|
||||||
self.gguf_writer.add_tokenizer_model("llama")
|
return tokens, scores, toktypes
|
||||||
self.gguf_writer.add_tokenizer_pre("default")
|
|
||||||
self.gguf_writer.add_token_list(tokens)
|
|
||||||
self.gguf_writer.add_token_scores(scores)
|
|
||||||
self.gguf_writer.add_token_types(toktypes)
|
|
||||||
|
|
||||||
special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
|
|
||||||
special_vocab.add_to_gguf(self.gguf_writer)
|
|
||||||
|
|
||||||
def _set_vocab_llama_hf(self):
|
def _set_vocab_llama_hf(self):
|
||||||
vocab = gguf.LlamaHfVocab(self.dir_model)
|
vocab = gguf.LlamaHfVocab(self.dir_model)
|
||||||
@ -669,6 +680,51 @@ class Model:
|
|||||||
special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
|
special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
|
||||||
special_vocab.add_to_gguf(self.gguf_writer)
|
special_vocab.add_to_gguf(self.gguf_writer)
|
||||||
|
|
||||||
|
def _set_vocab_builtin(self, model_name: Literal["gpt-neox", "llama-spm"], vocab_size: int):
|
||||||
|
tokenizer_path = Path(sys.path[0]) / "models" / f"ggml-vocab-{model_name}.gguf"
|
||||||
|
logger.warning(f"Using tokenizer from '{os.path.relpath(tokenizer_path, os.getcwd())}'")
|
||||||
|
vocab_reader = gguf.GGUFReader(tokenizer_path, "r")
|
||||||
|
|
||||||
|
default_pre = "mpt" if model_name == "gpt-neox" else "default"
|
||||||
|
|
||||||
|
field = vocab_reader.get_field(gguf.Keys.Tokenizer.MODEL)
|
||||||
|
assert field # tokenizer model
|
||||||
|
self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]).decode("utf-8"))
|
||||||
|
|
||||||
|
field = vocab_reader.get_field(gguf.Keys.Tokenizer.PRE)
|
||||||
|
self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]).decode("utf-8") if field else default_pre)
|
||||||
|
|
||||||
|
field = vocab_reader.get_field(gguf.Keys.Tokenizer.LIST)
|
||||||
|
assert field # token list
|
||||||
|
self.gguf_writer.add_token_list([bytes(field.parts[i]) for i in field.data][:vocab_size])
|
||||||
|
|
||||||
|
if model_name == "llama-spm":
|
||||||
|
field = vocab_reader.get_field(gguf.Keys.Tokenizer.SCORES)
|
||||||
|
assert field # token scores
|
||||||
|
self.gguf_writer.add_token_scores([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
|
||||||
|
|
||||||
|
field = vocab_reader.get_field(gguf.Keys.Tokenizer.TOKEN_TYPE)
|
||||||
|
assert field # token types
|
||||||
|
self.gguf_writer.add_token_types([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
|
||||||
|
|
||||||
|
if model_name != "llama-spm":
|
||||||
|
field = vocab_reader.get_field(gguf.Keys.Tokenizer.MERGES)
|
||||||
|
assert field # token merges
|
||||||
|
self.gguf_writer.add_token_merges([bytes(field.parts[i]) for i in field.data])
|
||||||
|
|
||||||
|
if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.BOS_ID)) is not None:
|
||||||
|
self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0])
|
||||||
|
if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.EOS_ID)) is not None:
|
||||||
|
self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0])
|
||||||
|
if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.UNK_ID)) is not None:
|
||||||
|
self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0])
|
||||||
|
if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.PAD_ID)) is not None:
|
||||||
|
self.gguf_writer.add_pad_token_id(field.parts[-1].tolist()[0])
|
||||||
|
if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_BOS)) is not None:
|
||||||
|
self.gguf_writer.add_add_bos_token(field.parts[-1].tolist()[0])
|
||||||
|
if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_EOS)) is not None:
|
||||||
|
self.gguf_writer.add_add_eos_token(field.parts[-1].tolist()[0])
|
||||||
|
|
||||||
|
|
||||||
@Model.register("GPTNeoXForCausalLM")
|
@Model.register("GPTNeoXForCausalLM")
|
||||||
class GPTNeoXModel(Model):
|
class GPTNeoXModel(Model):
|
||||||
@ -1934,7 +1990,7 @@ class Phi3MiniModel(Model):
|
|||||||
if len(rope_scaling_type) == 0:
|
if len(rope_scaling_type) == 0:
|
||||||
raise KeyError('Missing the required key rope_scaling.type')
|
raise KeyError('Missing the required key rope_scaling.type')
|
||||||
|
|
||||||
if rope_scaling_type == 'su':
|
if rope_scaling_type == 'su' or rope_scaling_type == 'longrope':
|
||||||
attn_factor = math.sqrt(1 + math.log(scale) / math.log(orig_max_pos_embds)) if scale > 1.0 else 1.0
|
attn_factor = math.sqrt(1 + math.log(scale) / math.log(orig_max_pos_embds)) if scale > 1.0 else 1.0
|
||||||
elif rope_scaling_type == 'yarn':
|
elif rope_scaling_type == 'yarn':
|
||||||
attn_factor = 0.1 * math.log(scale) + 1.0 if scale > 1.0 else 1.0
|
attn_factor = 0.1 * math.log(scale) + 1.0 if scale > 1.0 else 1.0
|
||||||
@ -2308,6 +2364,8 @@ class GemmaModel(Model):
|
|||||||
special_vocab._set_special_token("eot", 107)
|
special_vocab._set_special_token("eot", 107)
|
||||||
special_vocab.add_to_gguf(self.gguf_writer)
|
special_vocab.add_to_gguf(self.gguf_writer)
|
||||||
|
|
||||||
|
self.gguf_writer.add_add_space_prefix(False)
|
||||||
|
|
||||||
def set_gguf_parameters(self):
|
def set_gguf_parameters(self):
|
||||||
hparams = self.hparams
|
hparams = self.hparams
|
||||||
block_count = hparams["num_hidden_layers"]
|
block_count = hparams["num_hidden_layers"]
|
||||||
@ -2345,7 +2403,20 @@ class Gemma2Model(Model):
|
|||||||
model_arch = gguf.MODEL_ARCH.GEMMA2
|
model_arch = gguf.MODEL_ARCH.GEMMA2
|
||||||
|
|
||||||
def set_vocab(self):
|
def set_vocab(self):
|
||||||
self._set_vocab_llama_hf()
|
tokens, scores, toktypes = self._create_vocab_sentencepiece()
|
||||||
|
# hack: This is required so that we can properly use start/end-of-turn for chat template
|
||||||
|
for i in range(108):
|
||||||
|
# including <unusedX>, <start_of_turn>, <end_of_turn>
|
||||||
|
toktypes[i] = SentencePieceTokenTypes.CONTROL
|
||||||
|
self.gguf_writer.add_tokenizer_model("llama")
|
||||||
|
self.gguf_writer.add_tokenizer_pre("default")
|
||||||
|
self.gguf_writer.add_token_list(tokens)
|
||||||
|
self.gguf_writer.add_token_scores(scores)
|
||||||
|
self.gguf_writer.add_token_types(toktypes)
|
||||||
|
|
||||||
|
special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
|
||||||
|
special_vocab.add_to_gguf(self.gguf_writer)
|
||||||
|
|
||||||
self.gguf_writer.add_add_space_prefix(False)
|
self.gguf_writer.add_add_space_prefix(False)
|
||||||
|
|
||||||
def set_gguf_parameters(self):
|
def set_gguf_parameters(self):
|
||||||
@ -2369,9 +2440,15 @@ class Gemma2Model(Model):
|
|||||||
self.gguf_writer.add_final_logit_softcapping(
|
self.gguf_writer.add_final_logit_softcapping(
|
||||||
self.hparams["final_logit_softcapping"]
|
self.hparams["final_logit_softcapping"]
|
||||||
)
|
)
|
||||||
|
self.gguf_writer.add_sliding_window(self.hparams["sliding_window"])
|
||||||
|
|
||||||
|
# sanity check
|
||||||
|
attn_scalar = self.hparams["query_pre_attn_scalar"]
|
||||||
|
if attn_scalar != hparams["hidden_size"] / hparams["num_attention_heads"]:
|
||||||
|
raise ValueError("query_pre_attn_scalar must be equal to n_embd / n_head")
|
||||||
|
|
||||||
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
||||||
del bid # unusem
|
del bid # unused
|
||||||
|
|
||||||
# lm_head is not used in llama.cpp, while autoawq will include this tensor in model
|
# lm_head is not used in llama.cpp, while autoawq will include this tensor in model
|
||||||
# To prevent errors, skip loading lm_head.weight.
|
# To prevent errors, skip loading lm_head.weight.
|
||||||
@ -2410,39 +2487,7 @@ class MambaModel(Model):
|
|||||||
self._set_vocab_sentencepiece()
|
self._set_vocab_sentencepiece()
|
||||||
else:
|
else:
|
||||||
# Use the GPT-NeoX tokenizer when no tokenizer files are present
|
# Use the GPT-NeoX tokenizer when no tokenizer files are present
|
||||||
tokenizer_path = Path(sys.path[0]) / "models" / "ggml-vocab-gpt-neox.gguf"
|
self._set_vocab_builtin("gpt-neox", vocab_size)
|
||||||
logger.warning(f"Using tokenizer from '{os.path.relpath(tokenizer_path, os.getcwd())}'")
|
|
||||||
neox_reader = gguf.GGUFReader(tokenizer_path, "r")
|
|
||||||
|
|
||||||
field = neox_reader.get_field(gguf.Keys.Tokenizer.MODEL)
|
|
||||||
self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]).decode("utf-8") if field else "gpt2")
|
|
||||||
|
|
||||||
field = neox_reader.get_field(gguf.Keys.Tokenizer.PRE)
|
|
||||||
self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]).decode("utf-8") if field else "mpt")
|
|
||||||
|
|
||||||
field = neox_reader.get_field(gguf.Keys.Tokenizer.LIST)
|
|
||||||
assert field
|
|
||||||
self.gguf_writer.add_token_list([bytes(field.parts[i]) for i in field.data][:vocab_size])
|
|
||||||
|
|
||||||
field = neox_reader.get_field(gguf.Keys.Tokenizer.TOKEN_TYPE)
|
|
||||||
assert field
|
|
||||||
self.gguf_writer.add_token_types([field.parts[i].tolist()[0] for i in field.data][:vocab_size])
|
|
||||||
|
|
||||||
field = neox_reader.get_field(gguf.Keys.Tokenizer.MERGES)
|
|
||||||
assert field
|
|
||||||
self.gguf_writer.add_token_merges([bytes(field.parts[i]) for i in field.data])
|
|
||||||
|
|
||||||
field = neox_reader.get_field(gguf.Keys.Tokenizer.BOS_ID)
|
|
||||||
self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0] if field else 1)
|
|
||||||
|
|
||||||
field = neox_reader.get_field(gguf.Keys.Tokenizer.EOS_ID)
|
|
||||||
self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0] if field else 0)
|
|
||||||
|
|
||||||
field = neox_reader.get_field(gguf.Keys.Tokenizer.UNK_ID)
|
|
||||||
self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0] if field else 0)
|
|
||||||
|
|
||||||
field = neox_reader.get_field(gguf.Keys.Tokenizer.PAD_ID)
|
|
||||||
self.gguf_writer.add_pad_token_id(field.parts[-1].tolist()[0] if field else 0)
|
|
||||||
|
|
||||||
def set_gguf_parameters(self):
|
def set_gguf_parameters(self):
|
||||||
d_model = self.find_hparam(["hidden_size", "d_model"])
|
d_model = self.find_hparam(["hidden_size", "d_model"])
|
||||||
@ -2594,6 +2639,82 @@ class JinaBertV2Model(BertModel):
|
|||||||
self.gguf_writer.add_add_eos_token(True)
|
self.gguf_writer.add_add_eos_token(True)
|
||||||
|
|
||||||
|
|
||||||
|
@Model.register("OpenELMForCausalLM")
|
||||||
|
class OpenELMModel(Model):
|
||||||
|
model_arch = gguf.MODEL_ARCH.OPENELM
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _make_divisible(v: float | int, divisor: int) -> int:
|
||||||
|
# ref: https://huggingface.co/apple/OpenELM-270M-Instruct/blob/eb111ff2e6724348e5b905984063d4064d4bc579/configuration_openelm.py#L34-L38
|
||||||
|
new_v = max(divisor, int(v + divisor / 2) // divisor * divisor)
|
||||||
|
# Make sure that round down does not go down by more than 10%.
|
||||||
|
if new_v < 0.9 * v:
|
||||||
|
new_v += divisor
|
||||||
|
return new_v
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
ffn_multipliers: list[float] = self.hparams["ffn_multipliers"]
|
||||||
|
ffn_dim_divisor: int = self.hparams["ffn_dim_divisor"]
|
||||||
|
self._n_embd: int = self.hparams["model_dim"]
|
||||||
|
self._num_kv_heads: list[int] = self.hparams["num_kv_heads"]
|
||||||
|
self._num_query_heads: list[int] = self.hparams["num_query_heads"]
|
||||||
|
self._ffn_dims: list[int] = [
|
||||||
|
OpenELMModel._make_divisible(multiplier * self._n_embd, ffn_dim_divisor)
|
||||||
|
for multiplier in ffn_multipliers
|
||||||
|
]
|
||||||
|
assert isinstance(self._num_kv_heads, list) and isinstance(self._num_kv_heads[0], int)
|
||||||
|
assert isinstance(self._num_query_heads, list) and isinstance(self._num_query_heads[0], int)
|
||||||
|
|
||||||
|
# Uses the tokenizer from meta-llama/Llama-2-7b-hf
|
||||||
|
def set_vocab(self):
|
||||||
|
try:
|
||||||
|
self._set_vocab_sentencepiece()
|
||||||
|
except FileNotFoundError:
|
||||||
|
self._set_vocab_builtin("llama-spm", self.hparams["vocab_size"])
|
||||||
|
|
||||||
|
def set_gguf_parameters(self):
|
||||||
|
n_embd = self._n_embd
|
||||||
|
head_dim = self.hparams["head_dim"]
|
||||||
|
rot_pct = 1.0
|
||||||
|
assert self.block_count == len(self._num_kv_heads)
|
||||||
|
assert self.block_count == len(self._num_query_heads)
|
||||||
|
assert self.block_count == len(self._ffn_dims)
|
||||||
|
|
||||||
|
self.gguf_writer.add_name(self.dir_model.name if self.model_name is None else self.model_name)
|
||||||
|
self.gguf_writer.add_block_count(self.block_count)
|
||||||
|
self.gguf_writer.add_context_length(self.hparams["max_context_length"])
|
||||||
|
self.gguf_writer.add_embedding_length(n_embd)
|
||||||
|
self.gguf_writer.add_feed_forward_length(self._ffn_dims)
|
||||||
|
self.gguf_writer.add_head_count(self._num_query_heads)
|
||||||
|
self.gguf_writer.add_head_count_kv(self._num_kv_heads)
|
||||||
|
self.gguf_writer.add_rope_freq_base(self.hparams["rope_freq_constant"])
|
||||||
|
# https://huggingface.co/apple/OpenELM-270M-Instruct/blob/c401df2/modeling_openelm.py#L30
|
||||||
|
self.gguf_writer.add_layer_norm_rms_eps(1e-6)
|
||||||
|
self.gguf_writer.add_rope_dimension_count(int(rot_pct * head_dim))
|
||||||
|
self.gguf_writer.add_key_length(head_dim)
|
||||||
|
self.gguf_writer.add_value_length(head_dim)
|
||||||
|
self.gguf_writer.add_file_type(self.ftype)
|
||||||
|
|
||||||
|
def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any:
|
||||||
|
if "n_layers" in keys:
|
||||||
|
return self.hparams["num_transformer_layers"]
|
||||||
|
|
||||||
|
return super().find_hparam(keys, optional)
|
||||||
|
|
||||||
|
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
||||||
|
|
||||||
|
# split ff
|
||||||
|
if bid is not None and name == f"transformer.layers.{bid}.ffn.proj_1.weight":
|
||||||
|
ff_dim = self._ffn_dims[bid]
|
||||||
|
yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim])
|
||||||
|
yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:])
|
||||||
|
return
|
||||||
|
|
||||||
|
yield (self.map_tensor_name(name), data_torch)
|
||||||
|
|
||||||
|
|
||||||
@Model.register("ArcticForCausalLM")
|
@Model.register("ArcticForCausalLM")
|
||||||
class ArcticModel(Model):
|
class ArcticModel(Model):
|
||||||
model_arch = gguf.MODEL_ARCH.ARCTIC
|
model_arch = gguf.MODEL_ARCH.ARCTIC
|
||||||
@ -2824,11 +2945,17 @@ class DeepseekV2Model(Model):
|
|||||||
raise ValueError(f"Unprocessed experts: {experts}")
|
raise ValueError(f"Unprocessed experts: {experts}")
|
||||||
|
|
||||||
|
|
||||||
@Model.register("T5ForConditionalGeneration")
|
|
||||||
@Model.register("T5WithLMHeadModel")
|
@Model.register("T5WithLMHeadModel")
|
||||||
|
@Model.register("T5ForConditionalGeneration")
|
||||||
|
@Model.register("MT5ForConditionalGeneration")
|
||||||
|
@Model.register("UMT5ForConditionalGeneration")
|
||||||
class T5Model(Model):
|
class T5Model(Model):
|
||||||
model_arch = gguf.MODEL_ARCH.T5
|
model_arch = gguf.MODEL_ARCH.T5
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.shared_token_embeddings_found = False
|
||||||
|
|
||||||
def set_vocab(self):
|
def set_vocab(self):
|
||||||
# to avoid TypeError: Descriptors cannot be created directly
|
# to avoid TypeError: Descriptors cannot be created directly
|
||||||
# exception when importing sentencepiece_model_pb2
|
# exception when importing sentencepiece_model_pb2
|
||||||
@ -2836,17 +2963,29 @@ class T5Model(Model):
|
|||||||
from sentencepiece import SentencePieceProcessor
|
from sentencepiece import SentencePieceProcessor
|
||||||
from sentencepiece import sentencepiece_model_pb2 as model
|
from sentencepiece import sentencepiece_model_pb2 as model
|
||||||
|
|
||||||
tokenizer_path = self.dir_model / 'spiece.model'
|
tokenizer_path = self.dir_model / 'tokenizer.model'
|
||||||
|
|
||||||
|
# many older models use spiece.model tokenizer model filename
|
||||||
|
if not tokenizer_path.is_file():
|
||||||
|
tokenizer_path = self.dir_model / 'spiece.model'
|
||||||
|
|
||||||
if not tokenizer_path.is_file():
|
if not tokenizer_path.is_file():
|
||||||
raise FileNotFoundError(f"File not found: {tokenizer_path}")
|
raise FileNotFoundError(f"File not found: {tokenizer_path}")
|
||||||
|
|
||||||
sentencepiece_model = model.ModelProto()
|
sentencepiece_model = model.ModelProto()
|
||||||
sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
|
sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
|
||||||
|
|
||||||
|
# some models like Pile-T5 family use BPE tokenizer instead of Unigram
|
||||||
|
if sentencepiece_model.trainer_spec.model_type == 2: # BPE
|
||||||
|
# assure the tokenizer model file name is correct
|
||||||
|
assert tokenizer_path.name == 'tokenizer.model'
|
||||||
|
return self._set_vocab_sentencepiece()
|
||||||
|
else:
|
||||||
|
assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
|
||||||
|
|
||||||
add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
|
add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
|
||||||
remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
|
remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces
|
||||||
precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
|
precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap
|
||||||
assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM
|
|
||||||
|
|
||||||
tokenizer = SentencePieceProcessor()
|
tokenizer = SentencePieceProcessor()
|
||||||
tokenizer.LoadFromFile(str(tokenizer_path))
|
tokenizer.LoadFromFile(str(tokenizer_path))
|
||||||
@ -2916,7 +3055,10 @@ class T5Model(Model):
|
|||||||
|
|
||||||
def set_gguf_parameters(self):
|
def set_gguf_parameters(self):
|
||||||
self.gguf_writer.add_name("T5")
|
self.gguf_writer.add_name("T5")
|
||||||
self.gguf_writer.add_context_length(self.hparams["n_positions"])
|
if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None:
|
||||||
|
logger.warning("Couldn't find context length in config.json, assuming default value of 512")
|
||||||
|
n_ctx = 512
|
||||||
|
self.gguf_writer.add_context_length(n_ctx)
|
||||||
self.gguf_writer.add_embedding_length(self.hparams["d_model"])
|
self.gguf_writer.add_embedding_length(self.hparams["d_model"])
|
||||||
self.gguf_writer.add_feed_forward_length(self.hparams["d_ff"])
|
self.gguf_writer.add_feed_forward_length(self.hparams["d_ff"])
|
||||||
self.gguf_writer.add_block_count(self.hparams["num_layers"])
|
self.gguf_writer.add_block_count(self.hparams["num_layers"])
|
||||||
@ -2932,16 +3074,295 @@ class T5Model(Model):
|
|||||||
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
||||||
del bid # unused
|
del bid # unused
|
||||||
|
|
||||||
# Sometimes T5 and Flan-T5 based models contain "encoder.embed_tokens.weight" tensor or
|
# T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight",
|
||||||
# "decoder.embed_tokens.weight" tensors that are duplicates of "shared.weight" tensor
|
# "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored
|
||||||
# To prevent errors caused by an unnecessary unmapped tensor, skip both of them and use only "shared.weight".
|
# in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder
|
||||||
if name == "decoder.embed_tokens.weight" or name == "encoder.embed_tokens.weight":
|
# and decoder and ignore the remaining ones.
|
||||||
logger.debug(f"Skipping tensor {name!r} in safetensors so that convert can end normally.")
|
if name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "shared.weight"]:
|
||||||
return []
|
if not self.shared_token_embeddings_found:
|
||||||
|
name = "shared.weight"
|
||||||
|
self.shared_token_embeddings_found = True
|
||||||
|
else:
|
||||||
|
logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.")
|
||||||
|
return []
|
||||||
|
|
||||||
return [(self.map_tensor_name(name), data_torch)]
|
return [(self.map_tensor_name(name), data_torch)]
|
||||||
|
|
||||||
|
|
||||||
|
@Model.register("JAISLMHeadModel")
|
||||||
|
class JaisModel(Model):
|
||||||
|
model_arch = gguf.MODEL_ARCH.JAIS
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
# SwigLU activation
|
||||||
|
assert self.hparams["activation_function"] == "swiglu"
|
||||||
|
# ALiBi position embedding
|
||||||
|
assert self.hparams["position_embedding_type"] == "alibi"
|
||||||
|
|
||||||
|
# Embeddings scale
|
||||||
|
self.embeddings_scale = 1.0
|
||||||
|
# note: For some JAIS flavors, output is tied to (same as) wte in original model
|
||||||
|
self.output_is_wte = False
|
||||||
|
if 'mup_embeddings_scale' in self.hparams:
|
||||||
|
self.output_is_wte = True # Hack (?)
|
||||||
|
self.embeddings_scale = self.hparams['mup_embeddings_scale']
|
||||||
|
elif 'embeddings_scale' in self.hparams:
|
||||||
|
self.embeddings_scale = self.hparams['embeddings_scale']
|
||||||
|
else:
|
||||||
|
assert False
|
||||||
|
|
||||||
|
self.width_scale = 1.0
|
||||||
|
if 'mup_output_alpha' in self.hparams:
|
||||||
|
assert 'mup_width_scale' in self.hparams
|
||||||
|
self.width_scale = self.hparams['mup_output_alpha'] * self.hparams['mup_width_scale']
|
||||||
|
elif 'width_scale' in self.hparams:
|
||||||
|
self.width_scale = self.hparams['width_scale']
|
||||||
|
else:
|
||||||
|
assert False
|
||||||
|
|
||||||
|
self.max_alibi_bias = 8.0
|
||||||
|
|
||||||
|
def set_vocab(self):
|
||||||
|
self._set_vocab_gpt2()
|
||||||
|
|
||||||
|
def set_gguf_parameters(self):
|
||||||
|
self.gguf_writer.add_name(self.dir_model.name)
|
||||||
|
self.gguf_writer.add_block_count(self.hparams["n_layer"])
|
||||||
|
self.gguf_writer.add_context_length(self.hparams["n_positions"])
|
||||||
|
self.gguf_writer.add_embedding_length(self.hparams["n_embd"])
|
||||||
|
self.gguf_writer.add_feed_forward_length(self.hparams["n_inner"])
|
||||||
|
self.gguf_writer.add_head_count(self.hparams["n_head"])
|
||||||
|
self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"])
|
||||||
|
self.gguf_writer.add_file_type(self.ftype)
|
||||||
|
|
||||||
|
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
||||||
|
del bid # unused
|
||||||
|
|
||||||
|
tensors: list[tuple[str, Tensor]] = []
|
||||||
|
|
||||||
|
# we don't need these
|
||||||
|
if name.endswith((".attn.bias")):
|
||||||
|
return tensors
|
||||||
|
|
||||||
|
if name.endswith(("relative_pe.slopes")):
|
||||||
|
# Calculate max ALiBi bias (this is the inverse of the ALiBi calculation)
|
||||||
|
# Some other models has max_alibi_bias spelled out explicitly in the hyperparams,
|
||||||
|
# but Jais's PyTorch model simply precalculates the slope values and places them
|
||||||
|
# in relative_pes.slopes
|
||||||
|
n_head_closest_log2 = 2 ** math.floor(math.log2(self.hparams["n_head"]))
|
||||||
|
first_val = float(data_torch._data[0])
|
||||||
|
self.max_alibi_bias = -round(math.log2(first_val) * n_head_closest_log2)
|
||||||
|
|
||||||
|
return tensors
|
||||||
|
|
||||||
|
if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_fc2.weight")):
|
||||||
|
data_torch = data_torch.transpose(1, 0)
|
||||||
|
|
||||||
|
new_name = self.map_tensor_name(name)
|
||||||
|
|
||||||
|
if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD):
|
||||||
|
tensors.append((new_name, data_torch * self.embeddings_scale))
|
||||||
|
if self.output_is_wte:
|
||||||
|
tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch * self.width_scale))
|
||||||
|
elif new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT):
|
||||||
|
assert not self.output_is_wte
|
||||||
|
tensors.append((new_name, data_torch * self.width_scale))
|
||||||
|
else:
|
||||||
|
tensors.append((new_name, data_torch))
|
||||||
|
|
||||||
|
return tensors
|
||||||
|
|
||||||
|
def write_tensors(self):
|
||||||
|
super().write_tensors()
|
||||||
|
self.gguf_writer.add_max_alibi_bias(self.max_alibi_bias)
|
||||||
|
|
||||||
|
|
||||||
|
@Model.register("ChatGLMModel", "ChatGLMForConditionalGeneration")
|
||||||
|
class ChatGLMModel(Model):
|
||||||
|
model_arch = gguf.MODEL_ARCH.CHATGLM
|
||||||
|
|
||||||
|
def set_vocab_chatglm3(self):
|
||||||
|
dir_model = self.dir_model
|
||||||
|
hparams = self.hparams
|
||||||
|
tokens: list[bytearray] = []
|
||||||
|
toktypes: list[int] = []
|
||||||
|
scores: list[float] = []
|
||||||
|
|
||||||
|
from transformers import AutoTokenizer
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
|
||||||
|
vocab_size = hparams.get("padded_vocab_size", len(tokenizer.get_vocab()))
|
||||||
|
assert max(tokenizer.get_vocab().values()) < vocab_size
|
||||||
|
role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"]
|
||||||
|
special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens
|
||||||
|
for token_id in range(vocab_size):
|
||||||
|
piece = tokenizer._convert_id_to_token(token_id)
|
||||||
|
if token_id == 0:
|
||||||
|
piece = "<unk>"
|
||||||
|
elif token_id == 1:
|
||||||
|
piece = "<bos>"
|
||||||
|
elif token_id == 2:
|
||||||
|
piece = "<eos>"
|
||||||
|
|
||||||
|
text = piece.encode("utf-8")
|
||||||
|
score = 0.0
|
||||||
|
# Referencing the tokenizer Python implementation(https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py),
|
||||||
|
# it is only valid if it is less than tokenizer.tokenizer.sp_model.vocab_size()
|
||||||
|
if len(piece) != 0 and token_id < tokenizer.tokenizer.sp_model.vocab_size():
|
||||||
|
score = tokenizer.tokenizer.sp_model.get_score(token_id)
|
||||||
|
|
||||||
|
if len(piece) == 0:
|
||||||
|
text = f"[PAD{token_id}]".encode("utf-8")
|
||||||
|
|
||||||
|
if token_id >= tokenizer.tokenizer.sp_model.vocab_size():
|
||||||
|
if piece in special_tokens:
|
||||||
|
# show special tokens in prompt
|
||||||
|
toktype = SentencePieceTokenTypes.USER_DEFINED
|
||||||
|
else:
|
||||||
|
toktype = SentencePieceTokenTypes.UNKNOWN
|
||||||
|
tokens.append(text)
|
||||||
|
scores.append(score)
|
||||||
|
toktypes.append(toktype)
|
||||||
|
continue
|
||||||
|
|
||||||
|
toktype = SentencePieceTokenTypes.NORMAL
|
||||||
|
if tokenizer.tokenizer.sp_model.is_unknown(token_id):
|
||||||
|
toktype = SentencePieceTokenTypes.UNKNOWN
|
||||||
|
elif tokenizer.tokenizer.sp_model.is_control(token_id):
|
||||||
|
toktype = SentencePieceTokenTypes.CONTROL
|
||||||
|
elif tokenizer.tokenizer.sp_model.is_unused(token_id):
|
||||||
|
toktype = SentencePieceTokenTypes.UNUSED
|
||||||
|
elif tokenizer.tokenizer.sp_model.is_byte(token_id):
|
||||||
|
toktype = SentencePieceTokenTypes.BYTE
|
||||||
|
|
||||||
|
tokens.append(text)
|
||||||
|
scores.append(score)
|
||||||
|
toktypes.append(toktype)
|
||||||
|
|
||||||
|
self.gguf_writer.add_tokenizer_model("llama")
|
||||||
|
# glm3 needs prefix and suffix formatted as:
|
||||||
|
# prompt = "[gMASK]sop<|user|>\n" + prompt + "<|assistant|>"
|
||||||
|
self.gguf_writer.add_tokenizer_pre("chatglm-spm")
|
||||||
|
self.gguf_writer.add_token_list(tokens)
|
||||||
|
self.gguf_writer.add_token_scores(scores)
|
||||||
|
self.gguf_writer.add_token_types(toktypes)
|
||||||
|
|
||||||
|
special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
|
||||||
|
special_vocab.add_to_gguf(self.gguf_writer)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def token_bytes_to_string(b):
|
||||||
|
from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
|
||||||
|
byte_encoder = bytes_to_unicode()
|
||||||
|
return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]:
|
||||||
|
parts = [bytes([b]) for b in token]
|
||||||
|
while True:
|
||||||
|
min_idx = None
|
||||||
|
min_rank = None
|
||||||
|
for i, pair in enumerate(zip(parts[:-1], parts[1:])):
|
||||||
|
rank = mergeable_ranks.get(pair[0] + pair[1])
|
||||||
|
if rank is not None and (min_rank is None or rank < min_rank):
|
||||||
|
min_idx = i
|
||||||
|
min_rank = rank
|
||||||
|
if min_rank is None or (max_rank is not None and min_rank >= max_rank):
|
||||||
|
break
|
||||||
|
assert min_idx is not None
|
||||||
|
parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
|
||||||
|
return parts
|
||||||
|
|
||||||
|
def set_vocab(self):
|
||||||
|
if "THUDM/chatglm3-6b" in self.hparams.get("_name_or_path", ""):
|
||||||
|
self.set_vocab_chatglm3()
|
||||||
|
return
|
||||||
|
|
||||||
|
dir_model = self.dir_model
|
||||||
|
hparams = self.hparams
|
||||||
|
tokens: list[str] = []
|
||||||
|
toktypes: list[int] = []
|
||||||
|
|
||||||
|
from transformers import AutoTokenizer
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
|
||||||
|
vocab_size = hparams["padded_vocab_size"]
|
||||||
|
assert max(tokenizer.get_vocab().values()) < vocab_size
|
||||||
|
|
||||||
|
tokpre = self.get_vocab_base_pre(tokenizer)
|
||||||
|
|
||||||
|
merges = []
|
||||||
|
vocab = {}
|
||||||
|
mergeable_ranks = tokenizer.mergeable_ranks
|
||||||
|
for token, rank in mergeable_ranks.items():
|
||||||
|
vocab[ChatGLMModel.token_bytes_to_string(token)] = rank
|
||||||
|
if len(token) == 1:
|
||||||
|
continue
|
||||||
|
merged = ChatGLMModel.bpe(mergeable_ranks, token, max_rank=rank)
|
||||||
|
assert len(merged) >= 2 and len(merged) <= 7
|
||||||
|
merges.append(' '.join(map(ChatGLMModel.token_bytes_to_string, merged)))
|
||||||
|
|
||||||
|
# for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined
|
||||||
|
added_vocab = tokenizer.get_added_vocab()
|
||||||
|
reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()}
|
||||||
|
|
||||||
|
for i in range(vocab_size):
|
||||||
|
if i not in reverse_vocab:
|
||||||
|
tokens.append(f"[PAD{i}]")
|
||||||
|
toktypes.append(gguf.TokenType.USER_DEFINED)
|
||||||
|
elif reverse_vocab[i] in added_vocab:
|
||||||
|
tokens.append(reverse_vocab[i])
|
||||||
|
if tokenizer.added_tokens_decoder[i].special:
|
||||||
|
toktypes.append(gguf.TokenType.CONTROL)
|
||||||
|
else:
|
||||||
|
toktypes.append(gguf.TokenType.USER_DEFINED)
|
||||||
|
else:
|
||||||
|
tokens.append(reverse_vocab[i])
|
||||||
|
toktypes.append(gguf.TokenType.NORMAL)
|
||||||
|
|
||||||
|
self.gguf_writer.add_tokenizer_model("gpt2")
|
||||||
|
self.gguf_writer.add_tokenizer_pre(tokpre)
|
||||||
|
self.gguf_writer.add_token_list(tokens)
|
||||||
|
self.gguf_writer.add_token_types(toktypes)
|
||||||
|
|
||||||
|
special_vocab = gguf.SpecialVocab(dir_model, load_merges=False)
|
||||||
|
special_vocab.merges = merges
|
||||||
|
# only add special tokens when they were not already loaded from config.json
|
||||||
|
special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"])
|
||||||
|
special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"])
|
||||||
|
# this one is usually not in config.json anyway
|
||||||
|
special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"])
|
||||||
|
special_vocab.add_to_gguf(self.gguf_writer)
|
||||||
|
|
||||||
|
def set_gguf_parameters(self):
|
||||||
|
self.gguf_writer.add_name(self.hparams.get("_name_or_path").split("/")[1]) # THUDM/glm4-9b-chat or THUDM/chatglm3-6b
|
||||||
|
n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
|
||||||
|
n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
|
||||||
|
n_head_kv = self.hparams.get("multi_query_group_num", n_head)
|
||||||
|
self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed))
|
||||||
|
self.gguf_writer.add_embedding_length(n_embed)
|
||||||
|
self.gguf_writer.add_feed_forward_length(self.hparams.get("ffn_hidden_size", 4 * n_embed))
|
||||||
|
self.gguf_writer.add_block_count(self.hparams["num_layers"])
|
||||||
|
self.gguf_writer.add_head_count(n_head)
|
||||||
|
self.gguf_writer.add_head_count_kv(n_head_kv)
|
||||||
|
self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layernorm_epsilon"])
|
||||||
|
self.gguf_writer.add_file_type(self.ftype)
|
||||||
|
self.gguf_writer.add_rope_dimension_count(64)
|
||||||
|
self.gguf_writer.add_add_bos_token(False)
|
||||||
|
rope_freq = 10000
|
||||||
|
if "rope_ratio" in self.hparams:
|
||||||
|
rope_freq = rope_freq * self.hparams["rope_ratio"]
|
||||||
|
self.gguf_writer.add_rope_freq_base(rope_freq)
|
||||||
|
|
||||||
|
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
||||||
|
del bid # unused
|
||||||
|
|
||||||
|
if name.endswith(".rotary_pos_emb.inv_freq"):
|
||||||
|
return []
|
||||||
|
|
||||||
|
name = name.removeprefix("transformer.")
|
||||||
|
return [(self.map_tensor_name(name), data_torch)]
|
||||||
|
|
||||||
###### CONVERSION LOGIC ######
|
###### CONVERSION LOGIC ######
|
||||||
|
|
||||||
|
|
||||||
@ -2991,10 +3412,6 @@ def parse_args() -> argparse.Namespace:
|
|||||||
"--vocab-only", action="store_true",
|
"--vocab-only", action="store_true",
|
||||||
help="extract only the vocab",
|
help="extract only the vocab",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
|
||||||
"--awq-path", type=Path, default=None,
|
|
||||||
help="Path to scale awq cache file",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--outfile", type=Path,
|
"--outfile", type=Path,
|
||||||
help="path to write to; default: based on input. {ftype} will be replaced by the outtype.",
|
help="path to write to; default: based on input. {ftype} will be replaced by the outtype.",
|
||||||
@ -3072,19 +3489,6 @@ def main() -> None:
|
|||||||
|
|
||||||
dir_model = args.model
|
dir_model = args.model
|
||||||
|
|
||||||
if args.awq_path:
|
|
||||||
sys.path.insert(1, str(Path(__file__).parent / 'awq-py'))
|
|
||||||
from awq.apply_awq import add_scale_weights # type: ignore[import-not-found]
|
|
||||||
tmp_model_path = args.model / "weighted_model"
|
|
||||||
dir_model = tmp_model_path
|
|
||||||
if tmp_model_path.is_dir():
|
|
||||||
logger.info(f"{tmp_model_path} exists as a weighted model.")
|
|
||||||
else:
|
|
||||||
tmp_model_path.mkdir(parents=True, exist_ok=True)
|
|
||||||
logger.info("Saving new weighted model ...")
|
|
||||||
add_scale_weights(str(args.model), str(args.awq_path), str(tmp_model_path))
|
|
||||||
logger.info(f"Saved weighted model at {tmp_model_path}.")
|
|
||||||
|
|
||||||
if not dir_model.is_dir():
|
if not dir_model.is_dir():
|
||||||
logger.error(f'Error: {args.model} is not a directory')
|
logger.error(f'Error: {args.model} is not a directory')
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
@ -3097,7 +3501,8 @@ def main() -> None:
|
|||||||
"auto": gguf.LlamaFileType.GUESSED,
|
"auto": gguf.LlamaFileType.GUESSED,
|
||||||
}
|
}
|
||||||
|
|
||||||
if args.use_temp_file and (args.split_max_tensors > 0 or args.split_max_size != "0"):
|
is_split = args.split_max_tensors > 0 or args.split_max_size != "0"
|
||||||
|
if args.use_temp_file and is_split:
|
||||||
logger.error("Error: Cannot use temp file when splitting")
|
logger.error("Error: Cannot use temp file when splitting")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
@ -3134,11 +3539,12 @@ def main() -> None:
|
|||||||
if args.vocab_only:
|
if args.vocab_only:
|
||||||
logger.info("Exporting model vocab...")
|
logger.info("Exporting model vocab...")
|
||||||
model_instance.write_vocab()
|
model_instance.write_vocab()
|
||||||
logger.info("Model vocab successfully exported.")
|
logger.info(f"Model vocab successfully exported to {model_instance.fname_out}")
|
||||||
else:
|
else:
|
||||||
logger.info("Exporting model...")
|
logger.info("Exporting model...")
|
||||||
model_instance.write()
|
model_instance.write()
|
||||||
logger.info("Model successfully exported.")
|
out_path = f"{model_instance.fname_out.parent}{os.sep}" if is_split else model_instance.fname_out
|
||||||
|
logger.info(f"Model successfully exported to {out_path}")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
@ -2,7 +2,7 @@
|
|||||||
# -*- coding: utf-8 -*-
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
# This script downloads the tokenizer models of the specified models from Huggingface and
|
# This script downloads the tokenizer models of the specified models from Huggingface and
|
||||||
# generates the get_vocab_base_pre() function for convert-hf-to-gguf.py
|
# generates the get_vocab_base_pre() function for convert_hf_to_gguf.py
|
||||||
#
|
#
|
||||||
# This is necessary in order to analyze the type of pre-tokenizer used by the model and
|
# This is necessary in order to analyze the type of pre-tokenizer used by the model and
|
||||||
# provide the necessary information to llama.cpp via the GGUF header in order to implement
|
# provide the necessary information to llama.cpp via the GGUF header in order to implement
|
||||||
@ -15,9 +15,9 @@
|
|||||||
# - Add a new model to the "models" list
|
# - Add a new model to the "models" list
|
||||||
# - Run the script with your huggingface token:
|
# - Run the script with your huggingface token:
|
||||||
#
|
#
|
||||||
# python3 convert-hf-to-gguf-update.py <huggingface_token>
|
# python3 convert_hf_to_gguf_update.py <huggingface_token>
|
||||||
#
|
#
|
||||||
# - Copy-paste the generated get_vocab_base_pre() function into convert-hf-to-gguf.py
|
# - Copy-paste the generated get_vocab_base_pre() function into convert_hf_to_gguf.py
|
||||||
# - Update llama.cpp with the new pre-tokenizer if necessary
|
# - Update llama.cpp with the new pre-tokenizer if necessary
|
||||||
#
|
#
|
||||||
# TODO: generate tokenizer tests for llama.cpp
|
# TODO: generate tokenizer tests for llama.cpp
|
||||||
@ -37,7 +37,7 @@ from enum import IntEnum, auto
|
|||||||
from transformers import AutoTokenizer
|
from transformers import AutoTokenizer
|
||||||
|
|
||||||
logging.basicConfig(level=logging.DEBUG)
|
logging.basicConfig(level=logging.DEBUG)
|
||||||
logger = logging.getLogger("convert-hf-to-gguf-update")
|
logger = logging.getLogger("convert_hf_to_gguf_update")
|
||||||
sess = requests.Session()
|
sess = requests.Session()
|
||||||
|
|
||||||
|
|
||||||
@ -45,6 +45,7 @@ class TOKENIZER_TYPE(IntEnum):
|
|||||||
SPM = auto()
|
SPM = auto()
|
||||||
BPE = auto()
|
BPE = auto()
|
||||||
WPM = auto()
|
WPM = auto()
|
||||||
|
UGM = auto()
|
||||||
|
|
||||||
|
|
||||||
# TODO: this string has to exercise as much pre-tokenizer functionality as possible
|
# TODO: this string has to exercise as much pre-tokenizer functionality as possible
|
||||||
@ -55,10 +56,10 @@ if len(sys.argv) == 2:
|
|||||||
token = sys.argv[1]
|
token = sys.argv[1]
|
||||||
if not token.startswith("hf_"):
|
if not token.startswith("hf_"):
|
||||||
logger.info("Huggingface token seems invalid")
|
logger.info("Huggingface token seems invalid")
|
||||||
logger.info("Usage: python convert-hf-to-gguf-update.py <huggingface_token>")
|
logger.info("Usage: python convert_hf_to_gguf_update.py <huggingface_token>")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
logger.info("Usage: python convert-hf-to-gguf-update.py <huggingface_token>")
|
logger.info("Usage: python convert_hf_to_gguf_update.py <huggingface_token>")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
# TODO: add models here, base models preferred
|
# TODO: add models here, base models preferred
|
||||||
@ -86,6 +87,10 @@ models = [
|
|||||||
{"name": "poro-chat", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LumiOpen/Poro-34B-chat", },
|
{"name": "poro-chat", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LumiOpen/Poro-34B-chat", },
|
||||||
{"name": "jina-v2-code", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-code", },
|
{"name": "jina-v2-code", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-code", },
|
||||||
{"name": "viking", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LumiOpen/Viking-7B", }, # Also used for Viking 13B and 33B
|
{"name": "viking", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LumiOpen/Viking-7B", }, # Also used for Viking 13B and 33B
|
||||||
|
{"name": "gemma", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/google/gemma-2b", },
|
||||||
|
{"name": "gemma-2", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/google/gemma-2-9b", },
|
||||||
|
{"name": "jais", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/core42/jais-13b", },
|
||||||
|
{"name": "t5", "tokt": TOKENIZER_TYPE.UGM, "repo": "https://huggingface.co/google-t5/t5-small", },
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@ -107,9 +112,13 @@ def download_model(model):
|
|||||||
os.makedirs(f"models/tokenizers/{name}", exist_ok=True)
|
os.makedirs(f"models/tokenizers/{name}", exist_ok=True)
|
||||||
|
|
||||||
files = ["config.json", "tokenizer.json", "tokenizer_config.json"]
|
files = ["config.json", "tokenizer.json", "tokenizer_config.json"]
|
||||||
|
|
||||||
if tokt == TOKENIZER_TYPE.SPM:
|
if tokt == TOKENIZER_TYPE.SPM:
|
||||||
files.append("tokenizer.model")
|
files.append("tokenizer.model")
|
||||||
|
|
||||||
|
if tokt == TOKENIZER_TYPE.UGM:
|
||||||
|
files.append("spiece.model")
|
||||||
|
|
||||||
for file in files:
|
for file in files:
|
||||||
save_path = f"models/tokenizers/{name}/{file}"
|
save_path = f"models/tokenizers/{name}/{file}"
|
||||||
if os.path.isfile(save_path):
|
if os.path.isfile(save_path):
|
||||||
@ -125,14 +134,14 @@ for model in models:
|
|||||||
logger.error(f"Failed to download model {model['name']}. Error: {e}")
|
logger.error(f"Failed to download model {model['name']}. Error: {e}")
|
||||||
|
|
||||||
|
|
||||||
# generate the source code for the convert-hf-to-gguf.py:get_vocab_base_pre() function:
|
# generate the source code for the convert_hf_to_gguf.py:get_vocab_base_pre() function:
|
||||||
|
|
||||||
src_ifs = ""
|
src_ifs = ""
|
||||||
for model in models:
|
for model in models:
|
||||||
name = model["name"]
|
name = model["name"]
|
||||||
tokt = model["tokt"]
|
tokt = model["tokt"]
|
||||||
|
|
||||||
if tokt == TOKENIZER_TYPE.SPM:
|
if tokt == TOKENIZER_TYPE.SPM or tokt == TOKENIZER_TYPE.UGM:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Skip if the tokenizer folder does not exist or there are other download issues previously
|
# Skip if the tokenizer folder does not exist or there are other download issues previously
|
||||||
@ -142,7 +151,10 @@ for model in models:
|
|||||||
|
|
||||||
# create the tokenizer
|
# create the tokenizer
|
||||||
try:
|
try:
|
||||||
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
|
if name == "t5":
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}", use_fast=False)
|
||||||
|
else:
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
logger.error(f"Error loading tokenizer for model {name}. The model may not exist or is not accessible with the provided token. Error: {e}")
|
logger.error(f"Error loading tokenizer for model {name}. The model may not exist or is not accessible with the provided token. Error: {e}")
|
||||||
continue # Skip to the next model if the tokenizer can't be loaded
|
continue # Skip to the next model if the tokenizer can't be loaded
|
||||||
@ -189,7 +201,7 @@ src_func = f"""
|
|||||||
|
|
||||||
res = None
|
res = None
|
||||||
|
|
||||||
# NOTE: if you get an error here, you need to update the convert-hf-to-gguf-update.py script
|
# NOTE: if you get an error here, you need to update the convert_hf_to_gguf_update.py script
|
||||||
# or pull the latest version of the model from Huggingface
|
# or pull the latest version of the model from Huggingface
|
||||||
# don't edit the hashes manually!
|
# don't edit the hashes manually!
|
||||||
{src_ifs}
|
{src_ifs}
|
||||||
@ -198,9 +210,9 @@ src_func = f"""
|
|||||||
logger.warning("**************************************************************************************")
|
logger.warning("**************************************************************************************")
|
||||||
logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!")
|
logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!")
|
||||||
logger.warning("** There are 2 possible reasons for this:")
|
logger.warning("** There are 2 possible reasons for this:")
|
||||||
logger.warning("** - the model has not been added to convert-hf-to-gguf-update.py yet")
|
logger.warning("** - the model has not been added to convert_hf_to_gguf_update.py yet")
|
||||||
logger.warning("** - the pre-tokenization config has changed upstream")
|
logger.warning("** - the pre-tokenization config has changed upstream")
|
||||||
logger.warning("** Check your model files and convert-hf-to-gguf-update.py and update them accordingly.")
|
logger.warning("** Check your model files and convert_hf_to_gguf_update.py and update them accordingly.")
|
||||||
logger.warning("** ref: https://github.com/ggerganov/llama.cpp/pull/6920")
|
logger.warning("** ref: https://github.com/ggerganov/llama.cpp/pull/6920")
|
||||||
logger.warning("**")
|
logger.warning("**")
|
||||||
logger.warning(f"** chkhsh: {{chkhsh}}")
|
logger.warning(f"** chkhsh: {{chkhsh}}")
|
||||||
@ -214,7 +226,7 @@ src_func = f"""
|
|||||||
return res
|
return res
|
||||||
"""
|
"""
|
||||||
|
|
||||||
convert_py_pth = pathlib.Path("convert-hf-to-gguf.py")
|
convert_py_pth = pathlib.Path("convert_hf_to_gguf.py")
|
||||||
convert_py = convert_py_pth.read_text(encoding="utf-8")
|
convert_py = convert_py_pth.read_text(encoding="utf-8")
|
||||||
convert_py = re.sub(
|
convert_py = re.sub(
|
||||||
r"(# Marker: Start get_vocab_base_pre)(.+?)( +# Marker: End get_vocab_base_pre)",
|
r"(# Marker: Start get_vocab_base_pre)(.+?)( +# Marker: End get_vocab_base_pre)",
|
||||||
@ -225,7 +237,7 @@ convert_py = re.sub(
|
|||||||
|
|
||||||
convert_py_pth.write_text(convert_py, encoding="utf-8")
|
convert_py_pth.write_text(convert_py, encoding="utf-8")
|
||||||
|
|
||||||
logger.info("+++ convert-hf-to-gguf.py was updated")
|
logger.info("+++ convert_hf_to_gguf.py was updated")
|
||||||
|
|
||||||
# generate tests for each tokenizer model
|
# generate tests for each tokenizer model
|
||||||
|
|
||||||
@ -263,6 +275,7 @@ tests = [
|
|||||||
"\n =",
|
"\n =",
|
||||||
"' era",
|
"' era",
|
||||||
"Hello, y'all! How are you 😁 ?我想在apple工作1314151天~",
|
"Hello, y'all! How are you 😁 ?我想在apple工作1314151天~",
|
||||||
|
"!!!!!!",
|
||||||
"3",
|
"3",
|
||||||
"33",
|
"33",
|
||||||
"333",
|
"333",
|
||||||
@ -272,7 +285,8 @@ tests = [
|
|||||||
"3333333",
|
"3333333",
|
||||||
"33333333",
|
"33333333",
|
||||||
"333333333",
|
"333333333",
|
||||||
# "Cửa Việt", # llama-bpe fails on this
|
"Cửa Việt", # llama-bpe fails on this
|
||||||
|
" discards",
|
||||||
chktxt,
|
chktxt,
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -300,7 +314,10 @@ for model in models:
|
|||||||
|
|
||||||
# create the tokenizer
|
# create the tokenizer
|
||||||
try:
|
try:
|
||||||
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
|
if name == "t5":
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}", use_fast=False)
|
||||||
|
else:
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
logger.error(f"Failed to load tokenizer for model {name}. Error: {e}")
|
logger.error(f"Failed to load tokenizer for model {name}. Error: {e}")
|
||||||
continue # Skip this model and continue with the next one in the loop
|
continue # Skip this model and continue with the next one in the loop
|
||||||
@ -326,6 +343,6 @@ logger.info("\nRun the following commands to generate the vocab files for testin
|
|||||||
for model in models:
|
for model in models:
|
||||||
name = model["name"]
|
name = model["name"]
|
||||||
|
|
||||||
print(f"python3 convert-hf-to-gguf.py models/tokenizers/{name}/ --outfile models/ggml-vocab-{name}.gguf --vocab-only") # noqa: NP100
|
print(f"python3 convert_hf_to_gguf.py models/tokenizers/{name}/ --outfile models/ggml-vocab-{name}.gguf --vocab-only") # noqa: NP100
|
||||||
|
|
||||||
logger.info("\n")
|
logger.info("\n")
|
56
docs/android.md
Normal file
56
docs/android.md
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
|
||||||
|
# Android
|
||||||
|
|
||||||
|
## Build on Android using Termux
|
||||||
|
[Termux](https://github.com/termux/termux-app#installation) is a method to execute `llama.cpp` on an Android device (no root required).
|
||||||
|
```
|
||||||
|
apt update && apt upgrade -y
|
||||||
|
apt install git make cmake
|
||||||
|
```
|
||||||
|
|
||||||
|
It's recommended to move your model inside the `~/` directory for best performance:
|
||||||
|
```
|
||||||
|
cd storage/downloads
|
||||||
|
mv model.gguf ~/
|
||||||
|
```
|
||||||
|
|
||||||
|
[Get the code](https://github.com/ggerganov/llama.cpp#get-the-code) & [follow the Linux build instructions](https://github.com/ggerganov/llama.cpp#build) to build `llama.cpp`.
|
||||||
|
|
||||||
|
## Building the Project using Android NDK
|
||||||
|
Obtain the [Android NDK](https://developer.android.com/ndk) and then build with CMake.
|
||||||
|
|
||||||
|
Execute the following commands on your computer to avoid downloading the NDK to your mobile. Alternatively, you can also do this in Termux:
|
||||||
|
```
|
||||||
|
$ mkdir build-android
|
||||||
|
$ cd build-android
|
||||||
|
$ export NDK=<your_ndk_directory>
|
||||||
|
$ cmake -DCMAKE_TOOLCHAIN_FILE=$NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=android-23 -DCMAKE_C_FLAGS=-march=armv8.4a+dotprod ..
|
||||||
|
$ make
|
||||||
|
```
|
||||||
|
|
||||||
|
Install [termux](https://github.com/termux/termux-app#installation) on your device and run `termux-setup-storage` to get access to your SD card (if Android 11+ then run the command twice).
|
||||||
|
|
||||||
|
Finally, copy these built `llama` binaries and the model file to your device storage. Because the file permissions in the Android sdcard cannot be changed, you can copy the executable files to the `/data/data/com.termux/files/home/bin` path, and then execute the following commands in Termux to add executable permission:
|
||||||
|
|
||||||
|
(Assumed that you have pushed the built executable files to the /sdcard/llama.cpp/bin path using `adb push`)
|
||||||
|
```
|
||||||
|
$cp -r /sdcard/llama.cpp/bin /data/data/com.termux/files/home/
|
||||||
|
$cd /data/data/com.termux/files/home/bin
|
||||||
|
$chmod +x ./*
|
||||||
|
```
|
||||||
|
|
||||||
|
Download model [llama-2-7b-chat.Q4_K_M.gguf](https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/blob/main/llama-2-7b-chat.Q4_K_M.gguf), and push it to `/sdcard/llama.cpp/`, then move it to `/data/data/com.termux/files/home/model/`
|
||||||
|
|
||||||
|
```
|
||||||
|
$mv /sdcard/llama.cpp/llama-2-7b-chat.Q4_K_M.gguf /data/data/com.termux/files/home/model/
|
||||||
|
```
|
||||||
|
|
||||||
|
Now, you can start chatting:
|
||||||
|
```
|
||||||
|
$cd /data/data/com.termux/files/home/bin
|
||||||
|
$./llama-cli -m ../model/llama-2-7b-chat.Q4_K_M.gguf -n 128 -cml
|
||||||
|
```
|
||||||
|
|
||||||
|
Here's a demo of an interactive session running on Pixel 5 phone:
|
||||||
|
|
||||||
|
https://user-images.githubusercontent.com/271616/225014776-1d567049-ad71-4ef2-b050-55b0b3b9274c.mp4
|
288
docs/build.md
Normal file
288
docs/build.md
Normal file
@ -0,0 +1,288 @@
|
|||||||
|
# Build llama.cpp locally
|
||||||
|
|
||||||
|
**To get the Code:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://github.com/ggerganov/llama.cpp
|
||||||
|
cd llama.cpp
|
||||||
|
```
|
||||||
|
|
||||||
|
In order to build llama.cpp you have four different options.
|
||||||
|
|
||||||
|
- Using `make`:
|
||||||
|
- On Linux or MacOS:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make
|
||||||
|
```
|
||||||
|
|
||||||
|
- On Windows:
|
||||||
|
|
||||||
|
1. Download the latest fortran version of [w64devkit](https://github.com/skeeto/w64devkit/releases).
|
||||||
|
2. Extract `w64devkit` on your pc.
|
||||||
|
3. Run `w64devkit.exe`.
|
||||||
|
4. Use the `cd` command to reach the `llama.cpp` folder.
|
||||||
|
5. From here you can run:
|
||||||
|
```bash
|
||||||
|
make
|
||||||
|
```
|
||||||
|
|
||||||
|
- Notes:
|
||||||
|
- For faster compilation, add the `-j` argument to run multiple jobs in parallel. For example, `make -j 8` will run 8 jobs in parallel.
|
||||||
|
- For faster repeated compilation, install [ccache](https://ccache.dev/).
|
||||||
|
- For debug builds, run `make LLAMA_DEBUG=1`
|
||||||
|
|
||||||
|
- Using `CMake`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cmake -B build
|
||||||
|
cmake --build build --config Release
|
||||||
|
```
|
||||||
|
|
||||||
|
**Notes**:
|
||||||
|
|
||||||
|
- For faster compilation, add the `-j` argument to run multiple jobs in parallel. For example, `cmake --build build --config Release -j 8` will run 8 jobs in parallel.
|
||||||
|
- For faster repeated compilation, install [ccache](https://ccache.dev/).
|
||||||
|
- For debug builds, there are two cases:
|
||||||
|
|
||||||
|
1. Single-config generators (e.g. default = `Unix Makefiles`; note that they just ignore the `--config` flag):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cmake -B build -DCMAKE_BUILD_TYPE=Debug
|
||||||
|
cmake --build build
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Multi-config generators (`-G` param set to Visual Studio, XCode...):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cmake -B build -G "Xcode"
|
||||||
|
cmake --build build --config Debug
|
||||||
|
```
|
||||||
|
|
||||||
|
- Using `gmake` (FreeBSD):
|
||||||
|
|
||||||
|
1. Install and activate [DRM in FreeBSD](https://wiki.freebsd.org/Graphics)
|
||||||
|
2. Add your user to **video** group
|
||||||
|
3. Install compilation dependencies.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
sudo pkg install gmake automake autoconf pkgconf llvm15 openblas
|
||||||
|
|
||||||
|
gmake CC=/usr/local/bin/clang15 CXX=/usr/local/bin/clang++15 -j4
|
||||||
|
```
|
||||||
|
|
||||||
|
## Metal Build
|
||||||
|
|
||||||
|
On MacOS, Metal is enabled by default. Using Metal makes the computation run on the GPU.
|
||||||
|
To disable the Metal build at compile time use the `GGML_NO_METAL=1` flag or the `GGML_METAL=OFF` cmake option.
|
||||||
|
|
||||||
|
When built with Metal support, you can explicitly disable GPU inference with the `--n-gpu-layers|-ngl 0` command-line
|
||||||
|
argument.
|
||||||
|
|
||||||
|
## BLAS Build
|
||||||
|
|
||||||
|
Building the program with BLAS support may lead to some performance improvements in prompt processing using batch sizes higher than 32 (the default is 512). Support with CPU-only BLAS implementations doesn't affect the normal generation performance. We may see generation performance improvements with GPU-involved BLAS implementations, e.g. cuBLAS, hipBLAS. There are currently several different BLAS implementations available for build and use:
|
||||||
|
|
||||||
|
### Accelerate Framework:
|
||||||
|
|
||||||
|
This is only available on Mac PCs and it's enabled by default. You can just build using the normal instructions.
|
||||||
|
|
||||||
|
### OpenBLAS:
|
||||||
|
|
||||||
|
This provides BLAS acceleration using only the CPU. Make sure to have OpenBLAS installed on your machine.
|
||||||
|
|
||||||
|
- Using `make`:
|
||||||
|
- On Linux:
|
||||||
|
```bash
|
||||||
|
make GGML_OPENBLAS=1
|
||||||
|
```
|
||||||
|
|
||||||
|
- On Windows:
|
||||||
|
|
||||||
|
1. Download the latest fortran version of [w64devkit](https://github.com/skeeto/w64devkit/releases).
|
||||||
|
2. Download the latest version of [OpenBLAS for Windows](https://github.com/xianyi/OpenBLAS/releases).
|
||||||
|
3. Extract `w64devkit` on your pc.
|
||||||
|
4. From the OpenBLAS zip that you just downloaded copy `libopenblas.a`, located inside the `lib` folder, inside `w64devkit\x86_64-w64-mingw32\lib`.
|
||||||
|
5. From the same OpenBLAS zip copy the content of the `include` folder inside `w64devkit\x86_64-w64-mingw32\include`.
|
||||||
|
6. Run `w64devkit.exe`.
|
||||||
|
7. Use the `cd` command to reach the `llama.cpp` folder.
|
||||||
|
8. From here you can run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
make GGML_OPENBLAS=1
|
||||||
|
```
|
||||||
|
|
||||||
|
- Using `CMake` on Linux:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cmake -B build -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS
|
||||||
|
cmake --build build --config Release
|
||||||
|
```
|
||||||
|
|
||||||
|
### BLIS
|
||||||
|
|
||||||
|
Check [BLIS.md](./backend/BLIS.md) for more information.
|
||||||
|
|
||||||
|
### SYCL
|
||||||
|
|
||||||
|
SYCL is a higher-level programming model to improve programming productivity on various hardware accelerators.
|
||||||
|
|
||||||
|
llama.cpp based on SYCL is used to **support Intel GPU** (Data Center Max series, Flex series, Arc series, Built-in GPU and iGPU).
|
||||||
|
|
||||||
|
For detailed info, please refer to [llama.cpp for SYCL](./backend/SYCL.md).
|
||||||
|
|
||||||
|
### Intel oneMKL
|
||||||
|
|
||||||
|
Building through oneAPI compilers will make avx_vnni instruction set available for intel processors that do not support avx512 and avx512_vnni. Please note that this build config **does not support Intel GPU**. For Intel GPU support, please refer to [llama.cpp for SYCL](./backend/SYCL.md).
|
||||||
|
|
||||||
|
- Using manual oneAPI installation:
|
||||||
|
By default, `GGML_BLAS_VENDOR` is set to `Generic`, so if you already sourced intel environment script and assign `-DGGML_BLAS=ON` in cmake, the mkl version of Blas will automatically been selected. Otherwise please install oneAPI and follow the below steps:
|
||||||
|
```bash
|
||||||
|
source /opt/intel/oneapi/setvars.sh # You can skip this step if in oneapi-basekit docker image, only required for manual installation
|
||||||
|
cmake -B build -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=Intel10_64lp -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_NATIVE=ON
|
||||||
|
cmake --build build --config Release
|
||||||
|
```
|
||||||
|
|
||||||
|
- Using oneAPI docker image:
|
||||||
|
If you do not want to source the environment vars and install oneAPI manually, you can also build the code using intel docker container: [oneAPI-basekit](https://hub.docker.com/r/intel/oneapi-basekit). Then, you can use the commands given above.
|
||||||
|
|
||||||
|
Check [Optimizing and Running LLaMA2 on Intel® CPU](https://www.intel.com/content/www/us/en/content-details/791610/optimizing-and-running-llama2-on-intel-cpu.html) for more information.
|
||||||
|
|
||||||
|
### CUDA
|
||||||
|
|
||||||
|
This provides GPU acceleration using the CUDA cores of your Nvidia GPU. Make sure to have the CUDA toolkit installed. You can download it from your Linux distro's package manager (e.g. `apt install nvidia-cuda-toolkit`) or from here: [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads).
|
||||||
|
|
||||||
|
For Jetson user, if you have Jetson Orin, you can try this: [Offical Support](https://www.jetson-ai-lab.com/tutorial_text-generation.html). If you are using an old model(nano/TX2), need some additional operations before compiling.
|
||||||
|
|
||||||
|
- Using `make`:
|
||||||
|
```bash
|
||||||
|
make GGML_CUDA=1
|
||||||
|
```
|
||||||
|
- Using `CMake`:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cmake -B build -DGGML_CUDA=ON
|
||||||
|
cmake --build build --config Release
|
||||||
|
```
|
||||||
|
|
||||||
|
The environment variable [`CUDA_VISIBLE_DEVICES`](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars) can be used to specify which GPU(s) will be used. The following compilation options are also available to tweak performance:
|
||||||
|
|
||||||
|
| Option | Legal values | Default | Description |
|
||||||
|
|-------------------------------|------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| GGML_CUDA_FORCE_DMMV | Boolean | false | Force the use of dequantization + matrix vector multiplication kernels instead of using kernels that do matrix vector multiplication on quantized data. By default the decision is made based on compute capability (MMVQ for 6.1/Pascal/GTX 1000 or higher). Does not affect k-quants. |
|
||||||
|
| GGML_CUDA_DMMV_X | Positive integer >= 32 | 32 | Number of values in x direction processed by the CUDA dequantization + matrix vector multiplication kernel per iteration. Increasing this value can improve performance on fast GPUs. Power of 2 heavily recommended. Does not affect k-quants. |
|
||||||
|
| GGML_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the CUDA mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. |
|
||||||
|
| GGML_CUDA_FORCE_MMQ | Boolean | false | Force the use of custom matrix multiplication kernels for quantized models instead of FP16 cuBLAS even if there is no int8 tensor core implementation available (affects V100, RDNA3). MMQ kernels are enabled by default on GPUs with int8 tensor core support. With MMQ force enabled, speed for large batch sizes will be worse but VRAM consumption will be lower. |
|
||||||
|
| GGML_CUDA_FORCE_CUBLAS | Boolean | false | Force the use of FP16 cuBLAS instead of custom matrix multiplication kernels for quantized models |
|
||||||
|
| GGML_CUDA_F16 | Boolean | false | If enabled, use half-precision floating point arithmetic for the CUDA dequantization + mul mat vec kernels and for the q4_1 and q5_1 matrix matrix multiplication kernels. Can improve performance on relatively recent GPUs. |
|
||||||
|
| GGML_CUDA_KQUANTS_ITER | 1 or 2 | 2 | Number of values processed per iteration and per CUDA thread for Q2_K and Q6_K quantization formats. Setting this value to 1 can improve performance for slow GPUs. |
|
||||||
|
| GGML_CUDA_PEER_MAX_BATCH_SIZE | Positive integer | 128 | Maximum batch size for which to enable peer access between multiple GPUs. Peer access requires either Linux or NVLink. When using NVLink enabling peer access for larger batch sizes is potentially beneficial. |
|
||||||
|
| GGML_CUDA_FA_ALL_QUANTS | Boolean | false | Compile support for all KV cache quantization type (combinations) for the FlashAttention CUDA kernels. More fine-grained control over KV cache size but compilation takes much longer. |
|
||||||
|
|
||||||
|
### hipBLAS
|
||||||
|
|
||||||
|
This provides BLAS acceleration on HIP-supported AMD GPUs.
|
||||||
|
Make sure to have ROCm installed.
|
||||||
|
You can download it from your Linux distro's package manager or from here: [ROCm Quick Start (Linux)](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/tutorial/quick-start.html#rocm-install-quick).
|
||||||
|
|
||||||
|
- Using `make`:
|
||||||
|
```bash
|
||||||
|
make GGML_HIPBLAS=1
|
||||||
|
```
|
||||||
|
- Using `CMake` for Linux (assuming a gfx1030-compatible AMD GPU):
|
||||||
|
```bash
|
||||||
|
HIPCXX="$(hipconfig -l)/clang" HIP_PATH="$(hipconfig -R)" \
|
||||||
|
cmake -S . -B build -DGGML_HIPBLAS=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \
|
||||||
|
&& cmake --build build --config Release -- -j 16
|
||||||
|
```
|
||||||
|
On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DGGML_HIP_UMA=ON`.
|
||||||
|
However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs).
|
||||||
|
|
||||||
|
Note that if you get the following error:
|
||||||
|
```
|
||||||
|
clang: error: cannot find ROCm device library; provide its path via '--rocm-path' or '--rocm-device-lib-path', or pass '-nogpulib' to build without ROCm device library
|
||||||
|
```
|
||||||
|
Try searching for a directory under `HIP_PATH` that contains the file
|
||||||
|
`oclc_abi_version_400.bc`. Then, add the following to the start of the
|
||||||
|
command: `HIP_DEVICE_LIB_PATH=<directory-you-just-found>`, so something
|
||||||
|
like:
|
||||||
|
```bash
|
||||||
|
HIPCXX="$(hipconfig -l)/clang" HIP_PATH="$(hipconfig -p)" \
|
||||||
|
HIP_DEVICE_LIB_PATH=<directory-you-just-found> \
|
||||||
|
cmake -S . -B build -DGGML_HIPBLAS=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \
|
||||||
|
&& cmake --build build -- -j 16
|
||||||
|
```
|
||||||
|
|
||||||
|
- Using `make` (example for target gfx1030, build with 16 CPU threads):
|
||||||
|
```bash
|
||||||
|
make -j16 GGML_HIPBLAS=1 GGML_HIP_UMA=1 AMDGPU_TARGETS=gfx1030
|
||||||
|
```
|
||||||
|
|
||||||
|
- Using `CMake` for Windows (using x64 Native Tools Command Prompt for VS, and assuming a gfx1100-compatible AMD GPU):
|
||||||
|
```bash
|
||||||
|
set PATH=%HIP_PATH%\bin;%PATH%
|
||||||
|
cmake -S . -B build -G Ninja -DAMDGPU_TARGETS=gfx1100 -DGGML_HIPBLAS=ON -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_BUILD_TYPE=Release
|
||||||
|
cmake --build build
|
||||||
|
```
|
||||||
|
Make sure that `AMDGPU_TARGETS` is set to the GPU arch you want to compile for. The above example uses `gfx1100` that corresponds to Radeon RX 7900XTX/XT/GRE. You can find a list of targets [here](https://llvm.org/docs/AMDGPUUsage.html#processors)
|
||||||
|
Find your gpu version string by matching the most significant version information from `rocminfo | grep gfx | head -1 | awk '{print $2}'` with the list of processors, e.g. `gfx1035` maps to `gfx1030`.
|
||||||
|
|
||||||
|
|
||||||
|
The environment variable [`HIP_VISIBLE_DEVICES`](https://rocm.docs.amd.com/en/latest/understand/gpu_isolation.html#hip-visible-devices) can be used to specify which GPU(s) will be used.
|
||||||
|
If your GPU is not officially supported you can use the environment variable [`HSA_OVERRIDE_GFX_VERSION`] set to a similar GPU, for example 10.3.0 on RDNA2 (e.g. gfx1030, gfx1031, or gfx1035) or 11.0.0 on RDNA3.
|
||||||
|
The following compilation options are also available to tweak performance (yes, they refer to CUDA, not HIP, because it uses the same code as the cuBLAS version above):
|
||||||
|
|
||||||
|
| Option | Legal values | Default | Description |
|
||||||
|
|------------------------|------------------------|---------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| GGML_CUDA_DMMV_X | Positive integer >= 32 | 32 | Number of values in x direction processed by the HIP dequantization + matrix vector multiplication kernel per iteration. Increasing this value can improve performance on fast GPUs. Power of 2 heavily recommended. Does not affect k-quants. |
|
||||||
|
| GGML_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the HIP mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. Does not affect k-quants. |
|
||||||
|
| GGML_CUDA_KQUANTS_ITER | 1 or 2 | 2 | Number of values processed per iteration and per HIP thread for Q2_K and Q6_K quantization formats. Setting this value to 1 can improve performance for slow GPUs. |
|
||||||
|
|
||||||
|
### Vulkan
|
||||||
|
|
||||||
|
**With docker**:
|
||||||
|
|
||||||
|
You don't need to install Vulkan SDK. It will be installed inside the container.
|
||||||
|
|
||||||
|
```sh
|
||||||
|
# Build the image
|
||||||
|
docker build -t llama-cpp-vulkan -f .devops/llama-cli-vulkan.Dockerfile .
|
||||||
|
|
||||||
|
# Then, use it:
|
||||||
|
docker run -it --rm -v "$(pwd):/app:Z" --device /dev/dri/renderD128:/dev/dri/renderD128 --device /dev/dri/card1:/dev/dri/card1 llama-cpp-vulkan -m "/app/models/YOUR_MODEL_FILE" -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33
|
||||||
|
```
|
||||||
|
|
||||||
|
**Without docker**:
|
||||||
|
|
||||||
|
Firstly, you need to make sure you have installed [Vulkan SDK](https://vulkan.lunarg.com/doc/view/latest/linux/getting_started_ubuntu.html)
|
||||||
|
|
||||||
|
For example, on Ubuntu 22.04 (jammy), use the command below:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
wget -qO - https://packages.lunarg.com/lunarg-signing-key-pub.asc | apt-key add -
|
||||||
|
wget -qO /etc/apt/sources.list.d/lunarg-vulkan-jammy.list https://packages.lunarg.com/vulkan/lunarg-vulkan-jammy.list
|
||||||
|
apt update -y
|
||||||
|
apt-get install -y vulkan-sdk
|
||||||
|
# To verify the installation, use the command below:
|
||||||
|
vulkaninfo
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternatively your package manager might be able to provide the appropriate libraries.
|
||||||
|
For example for Ubuntu 22.04 you can install `libvulkan-dev` instead.
|
||||||
|
For Fedora 40, you can install `vulkan-devel`, `glslc` and `glslang` packages.
|
||||||
|
|
||||||
|
Then, build llama.cpp using the cmake command below:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cmake -B build -DGGML_VULKAN=1
|
||||||
|
cmake --build build --config Release
|
||||||
|
# Test the output binary (with "-ngl 33" to offload all layers to GPU)
|
||||||
|
./bin/llama-cli -m "PATH_TO_MODEL" -p "Hi you how are you" -n 50 -e -ngl 33 -t 4
|
||||||
|
|
||||||
|
# You should see in the output, ggml_vulkan detected your GPU. For example:
|
||||||
|
# ggml_vulkan: Using Intel(R) Graphics (ADL GT2) | uma: 1 | fp16: 1 | warp size: 32
|
||||||
|
```
|
||||||
|
|
||||||
|
### Android
|
||||||
|
|
||||||
|
To read documentation for how to build on Android, [click here](./android.md)
|
@ -1,4 +1,4 @@
|
|||||||
## Add a new model architecture to `llama.cpp`
|
# Add a new model architecture to `llama.cpp`
|
||||||
|
|
||||||
Adding a model requires few steps:
|
Adding a model requires few steps:
|
||||||
|
|
||||||
@ -17,7 +17,7 @@ Also, it is important to check that the examples and main ggml backends (CUDA, M
|
|||||||
### 1. Convert the model to GGUF
|
### 1. Convert the model to GGUF
|
||||||
|
|
||||||
This step is done in python with a `convert` script using the [gguf](https://pypi.org/project/gguf/) library.
|
This step is done in python with a `convert` script using the [gguf](https://pypi.org/project/gguf/) library.
|
||||||
Depending on the model architecture, you can use either [convert-hf-to-gguf.py](../convert-hf-to-gguf.py) or [examples/convert-legacy-llama.py](../examples/convert-legacy-llama.py) (for `llama/llama2` models in `.pth` format).
|
Depending on the model architecture, you can use either [convert_hf_to_gguf.py](../convert_hf_to_gguf.py) or [examples/convert_legacy_llama.py](../examples/convert_legacy_llama.py) (for `llama/llama2` models in `.pth` format).
|
||||||
|
|
||||||
The convert script reads the model configuration, tokenizer, tensor names+data and converts them to GGUF metadata and tensors.
|
The convert script reads the model configuration, tokenizer, tensor names+data and converts them to GGUF metadata and tensors.
|
||||||
|
|
86
docs/docker.md
Normal file
86
docs/docker.md
Normal file
@ -0,0 +1,86 @@
|
|||||||
|
# Docker
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
* Docker must be installed and running on your system.
|
||||||
|
* Create a folder to store big models & intermediate files (ex. /llama/models)
|
||||||
|
|
||||||
|
## Images
|
||||||
|
We have three Docker images available for this project:
|
||||||
|
|
||||||
|
1. `ghcr.io/ggerganov/llama.cpp:full`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization. (platforms: `linux/amd64`, `linux/arm64`)
|
||||||
|
2. `ghcr.io/ggerganov/llama.cpp:light`: This image only includes the main executable file. (platforms: `linux/amd64`, `linux/arm64`)
|
||||||
|
3. `ghcr.io/ggerganov/llama.cpp:server`: This image only includes the server executable file. (platforms: `linux/amd64`, `linux/arm64`)
|
||||||
|
|
||||||
|
Additionally, there the following images, similar to the above:
|
||||||
|
|
||||||
|
- `ghcr.io/ggerganov/llama.cpp:full-cuda`: Same as `full` but compiled with CUDA support. (platforms: `linux/amd64`)
|
||||||
|
- `ghcr.io/ggerganov/llama.cpp:light-cuda`: Same as `light` but compiled with CUDA support. (platforms: `linux/amd64`)
|
||||||
|
- `ghcr.io/ggerganov/llama.cpp:server-cuda`: Same as `server` but compiled with CUDA support. (platforms: `linux/amd64`)
|
||||||
|
- `ghcr.io/ggerganov/llama.cpp:full-rocm`: Same as `full` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`)
|
||||||
|
- `ghcr.io/ggerganov/llama.cpp:light-rocm`: Same as `light` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`)
|
||||||
|
- `ghcr.io/ggerganov/llama.cpp:server-rocm`: Same as `server` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`)
|
||||||
|
|
||||||
|
The GPU enabled images are not currently tested by CI beyond being built. They are not built with any variation from the ones in the Dockerfiles defined in [.devops/](.devops/) and the GitHub Action defined in [.github/workflows/docker.yml](.github/workflows/docker.yml). If you need different settings (for example, a different CUDA or ROCm library, you'll need to build the images locally for now).
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
The easiest way to download the models, convert them to ggml and optimize them is with the --all-in-one command which includes the full docker image.
|
||||||
|
|
||||||
|
Replace `/path/to/models` below with the actual path where you downloaded the models.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --all-in-one "/models/" 7B
|
||||||
|
```
|
||||||
|
|
||||||
|
On completion, you are ready to play!
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512
|
||||||
|
```
|
||||||
|
|
||||||
|
or with a light image:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:light -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512
|
||||||
|
```
|
||||||
|
|
||||||
|
or with a server image:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -v /path/to/models:/models -p 8000:8000 ghcr.io/ggerganov/llama.cpp:server -m /models/7B/ggml-model-q4_0.gguf --port 8000 --host 0.0.0.0 -n 512
|
||||||
|
```
|
||||||
|
|
||||||
|
## Docker With CUDA
|
||||||
|
|
||||||
|
Assuming one has the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit) properly installed on Linux, or is using a GPU enabled cloud, `cuBLAS` should be accessible inside the container.
|
||||||
|
|
||||||
|
## Building Docker locally
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker build -t local/llama.cpp:full-cuda -f .devops/full-cuda.Dockerfile .
|
||||||
|
docker build -t local/llama.cpp:light-cuda -f .devops/llama-cli-cuda.Dockerfile .
|
||||||
|
docker build -t local/llama.cpp:server-cuda -f .devops/llama-server-cuda.Dockerfile .
|
||||||
|
```
|
||||||
|
|
||||||
|
You may want to pass in some different `ARGS`, depending on the CUDA environment supported by your container host, as well as the GPU architecture.
|
||||||
|
|
||||||
|
The defaults are:
|
||||||
|
|
||||||
|
- `CUDA_VERSION` set to `11.7.1`
|
||||||
|
- `CUDA_DOCKER_ARCH` set to `all`
|
||||||
|
|
||||||
|
The resulting images, are essentially the same as the non-CUDA images:
|
||||||
|
|
||||||
|
1. `local/llama.cpp:full-cuda`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization.
|
||||||
|
2. `local/llama.cpp:light-cuda`: This image only includes the main executable file.
|
||||||
|
3. `local/llama.cpp:server-cuda`: This image only includes the server executable file.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
After building locally, Usage is similar to the non-CUDA examples, but you'll need to add the `--gpus` flag. You will also want to use the `--n-gpu-layers` flag.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run --gpus all -v /path/to/models:/models local/llama.cpp:full-cuda --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1
|
||||||
|
docker run --gpus all -v /path/to/models:/models local/llama.cpp:light-cuda -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1
|
||||||
|
docker run --gpus all -v /path/to/models:/models local/llama.cpp:server-cuda -m /models/7B/ggml-model-q4_0.gguf --port 8000 --host 0.0.0.0 -n 512 --n-gpu-layers 1
|
||||||
|
```
|
39
docs/install.md
Normal file
39
docs/install.md
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
# Install pre-built version of llama.cpp
|
||||||
|
|
||||||
|
## Homebrew
|
||||||
|
|
||||||
|
On Mac and Linux, the homebrew package manager can be used via
|
||||||
|
|
||||||
|
```sh
|
||||||
|
brew install llama.cpp
|
||||||
|
```
|
||||||
|
The formula is automatically updated with new `llama.cpp` releases. More info: https://github.com/ggerganov/llama.cpp/discussions/7668
|
||||||
|
|
||||||
|
## Nix
|
||||||
|
|
||||||
|
On Mac and Linux, the Nix package manager can be used via
|
||||||
|
|
||||||
|
```sh
|
||||||
|
nix profile install nixpkgs#llama-cpp
|
||||||
|
```
|
||||||
|
For flake enabled installs.
|
||||||
|
|
||||||
|
Or
|
||||||
|
|
||||||
|
```sh
|
||||||
|
nix-env --file '<nixpkgs>' --install --attr llama-cpp
|
||||||
|
```
|
||||||
|
|
||||||
|
For non-flake enabled installs.
|
||||||
|
|
||||||
|
This expression is automatically updated within the [nixpkgs repo](https://github.com/NixOS/nixpkgs/blob/nixos-24.05/pkgs/by-name/ll/llama-cpp/package.nix#L164).
|
||||||
|
|
||||||
|
## Flox
|
||||||
|
|
||||||
|
On Mac and Linux, Flox can be used to install llama.cpp within a Flox environment via
|
||||||
|
|
||||||
|
```sh
|
||||||
|
flox install llama-cpp
|
||||||
|
```
|
||||||
|
|
||||||
|
Flox follows the nixpkgs build of llama.cpp.
|
@ -23,6 +23,7 @@ else()
|
|||||||
add_subdirectory(export-lora)
|
add_subdirectory(export-lora)
|
||||||
add_subdirectory(finetune)
|
add_subdirectory(finetune)
|
||||||
add_subdirectory(gbnf-validator)
|
add_subdirectory(gbnf-validator)
|
||||||
|
add_subdirectory(gguf-hash)
|
||||||
add_subdirectory(gguf-split)
|
add_subdirectory(gguf-split)
|
||||||
add_subdirectory(gguf)
|
add_subdirectory(gguf)
|
||||||
add_subdirectory(gritlm)
|
add_subdirectory(gritlm)
|
||||||
|
@ -229,7 +229,7 @@ private func tokenize(text: String, add_bos: Bool) -> [llama_token] {
|
|||||||
|
|
||||||
private func token_to_piece(token: llama_token, buffer: inout [CChar]) -> String? {
|
private func token_to_piece(token: llama_token, buffer: inout [CChar]) -> String? {
|
||||||
var result = [CChar](repeating: 0, count: 8)
|
var result = [CChar](repeating: 0, count: 8)
|
||||||
let nTokens = llama_token_to_piece(model, token, &result, Int32(result.count), false)
|
let nTokens = llama_token_to_piece(model, token, &result, Int32(result.count), 0, false)
|
||||||
if nTokens < 0 {
|
if nTokens < 0 {
|
||||||
let actualTokensCount = -Int(nTokens)
|
let actualTokensCount = -Int(nTokens)
|
||||||
result = .init(repeating: 0, count: actualTokensCount)
|
result = .init(repeating: 0, count: actualTokensCount)
|
||||||
@ -238,6 +238,7 @@ private func token_to_piece(token: llama_token, buffer: inout [CChar]) -> String
|
|||||||
token,
|
token,
|
||||||
&result,
|
&result,
|
||||||
Int32(result.count),
|
Int32(result.count),
|
||||||
|
0,
|
||||||
false
|
false
|
||||||
)
|
)
|
||||||
assert(check == actualTokensCount)
|
assert(check == actualTokensCount)
|
||||||
|
@ -93,14 +93,34 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
// create a llama_batch
|
// create a llama_batch
|
||||||
// we use this object to submit token data for decoding
|
// we use this object to submit token data for decoding
|
||||||
llama_batch batch = llama_batch_init(std::max(tokens_list.size(), (size_t)n_parallel), 0, 1);
|
llama_batch batch = llama_batch_init(std::max(tokens_list.size(), (size_t) n_parallel), 0, n_parallel);
|
||||||
|
|
||||||
|
std::vector<llama_seq_id> seq_ids(n_parallel, 0);
|
||||||
|
for (int32_t i = 0; i < n_parallel; ++i) {
|
||||||
|
seq_ids[i] = i;
|
||||||
|
}
|
||||||
|
|
||||||
// evaluate the initial prompt
|
// evaluate the initial prompt
|
||||||
for (size_t i = 0; i < tokens_list.size(); ++i) {
|
for (size_t i = 0; i < tokens_list.size(); ++i) {
|
||||||
llama_batch_add(batch, tokens_list[i], i, { 0 }, false);
|
llama_batch_add(batch, tokens_list[i], i, seq_ids, false);
|
||||||
}
|
}
|
||||||
GGML_ASSERT(batch.n_tokens == (int) tokens_list.size());
|
GGML_ASSERT(batch.n_tokens == (int) tokens_list.size());
|
||||||
|
|
||||||
|
if (llama_model_has_encoder(model)) {
|
||||||
|
if (llama_encode(ctx, batch)) {
|
||||||
|
LOG_TEE("%s : failed to eval\n", __func__);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
|
||||||
|
if (decoder_start_token_id == -1) {
|
||||||
|
decoder_start_token_id = llama_token_bos(model);
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_batch_clear(batch);
|
||||||
|
llama_batch_add(batch, decoder_start_token_id, 0, seq_ids, false);
|
||||||
|
}
|
||||||
|
|
||||||
// llama_decode will output logits only for the last token of the prompt
|
// llama_decode will output logits only for the last token of the prompt
|
||||||
batch.logits[batch.n_tokens - 1] = true;
|
batch.logits[batch.n_tokens - 1] = true;
|
||||||
|
|
||||||
@ -109,11 +129,11 @@ int main(int argc, char ** argv) {
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// assign the system KV cache to all parallel sequences
|
//// assign the system KV cache to all parallel sequences
|
||||||
// this way, the parallel sequences will "reuse" the prompt tokens without having to copy them
|
//// this way, the parallel sequences will "reuse" the prompt tokens without having to copy them
|
||||||
for (int32_t i = 1; i < n_parallel; ++i) {
|
//for (int32_t i = 1; i < n_parallel; ++i) {
|
||||||
llama_kv_cache_seq_cp(ctx, 0, i, -1, -1);
|
// llama_kv_cache_seq_cp(ctx, 0, i, -1, -1);
|
||||||
}
|
//}
|
||||||
|
|
||||||
if (n_parallel > 1) {
|
if (n_parallel > 1) {
|
||||||
LOG_TEE("\n\n%s: generating %d sequences ...\n", __func__, n_parallel);
|
LOG_TEE("\n\n%s: generating %d sequences ...\n", __func__, n_parallel);
|
||||||
|
@ -58,4 +58,3 @@ The above command will output space-separated float values.
|
|||||||
```powershell
|
```powershell
|
||||||
embedding.exe -p 'Castle<#sep#>Stronghold<#sep#>Dog<#sep#>Cat' --embd-separator '<#sep#>' --embd-normalize 2 --embd-output-format '' -m './path/to/model.gguf' --n-gpu-layers 99 --log-disable 2>/dev/null
|
embedding.exe -p 'Castle<#sep#>Stronghold<#sep#>Dog<#sep#>Cat' --embd-separator '<#sep#>' --embd-normalize 2 --embd-output-format '' -m './path/to/model.gguf' --n-gpu-layers 99 --log-disable 2>/dev/null
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -87,4 +87,4 @@ The LORA rank can be configured for each model tensor type separately with these
|
|||||||
|
|
||||||
The LORA rank of 'norm' tensors should always be 1.
|
The LORA rank of 'norm' tensors should always be 1.
|
||||||
|
|
||||||
To see all available options use `finetune --help`.
|
To see all available options use `llama-finetune --help`.
|
||||||
|
@ -8,7 +8,7 @@ if [[ ! $LLAMA_MODEL_DIR ]]; then LLAMA_MODEL_DIR="./models"; fi
|
|||||||
if [[ ! $LLAMA_TRAINING_DIR ]]; then LLAMA_TRAINING_DIR="."; fi
|
if [[ ! $LLAMA_TRAINING_DIR ]]; then LLAMA_TRAINING_DIR="."; fi
|
||||||
|
|
||||||
# MODEL="$LLAMA_MODEL_DIR/openllama-3b-v2-q8_0.gguf" # This is the model the readme uses.
|
# MODEL="$LLAMA_MODEL_DIR/openllama-3b-v2-q8_0.gguf" # This is the model the readme uses.
|
||||||
MODEL="$LLAMA_MODEL_DIR/openllama-3b-v2.gguf" # An f16 model. Note in this case with "-g", you get an f32-format .BIN file that isn't yet supported if you use it with "main --lora" with GPU inferencing.
|
MODEL="$LLAMA_MODEL_DIR/openllama-3b-v2.gguf" # An f16 model. Note in this case with "-g", you get an f32-format .BIN file that isn't yet supported if you use it with "llama-cli --lora" with GPU inferencing.
|
||||||
|
|
||||||
while getopts "dg" opt; do
|
while getopts "dg" opt; do
|
||||||
case $opt in
|
case $opt in
|
||||||
|
15
examples/gguf-hash/CMakeLists.txt
Normal file
15
examples/gguf-hash/CMakeLists.txt
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
set(TARGET llama-gguf-hash)
|
||||||
|
add_executable(${TARGET} gguf-hash.cpp)
|
||||||
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
|
|
||||||
|
# clibs dependencies
|
||||||
|
include_directories(deps/)
|
||||||
|
add_library(xxhash OBJECT deps/xxhash/xxhash.c deps/xxhash/xxhash.h)
|
||||||
|
target_link_libraries(${TARGET} PRIVATE xxhash)
|
||||||
|
add_library(sha1 OBJECT deps/sha1/sha1.c deps/sha1/sha1.h)
|
||||||
|
target_link_libraries(${TARGET} PRIVATE sha1)
|
||||||
|
add_library(sha256 OBJECT deps/sha256/sha256.c deps/sha256/sha256.h)
|
||||||
|
target_link_libraries(${TARGET} PRIVATE sha256)
|
||||||
|
|
||||||
|
target_link_libraries(${TARGET} PRIVATE ggml ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
206
examples/gguf-hash/README.md
Normal file
206
examples/gguf-hash/README.md
Normal file
@ -0,0 +1,206 @@
|
|||||||
|
|
||||||
|
# llama-gguf-hash
|
||||||
|
|
||||||
|
CLI to hash GGUF files to detect difference on a per model and per tensor level.
|
||||||
|
|
||||||
|
**Command line options:**
|
||||||
|
|
||||||
|
- `--help`: display help message
|
||||||
|
- `--xxh64`: use xhash 64bit hash mode (default)
|
||||||
|
- `--sha1`: use sha1
|
||||||
|
- `--uuid`: use uuid
|
||||||
|
- `--sha256`: use sha256
|
||||||
|
- `--all`: use all hash
|
||||||
|
- `--no-layer`: exclude per layer hash
|
||||||
|
- `--uuid`: generate UUIDv5 ID
|
||||||
|
- `-c`, `--check <manifest>`: verify against a manifest
|
||||||
|
|
||||||
|
## About
|
||||||
|
|
||||||
|
While most POSIX systems already have hash checking programs like sha256sum, it
|
||||||
|
is designed to check entire files. This is not ideal for our purpose if we want
|
||||||
|
to check for consistency of the tensor data even if the metadata content of the
|
||||||
|
gguf KV store has been updated.
|
||||||
|
|
||||||
|
This program is designed to hash a gguf tensor payload on a 'per tensor layer'
|
||||||
|
in addition to a 'entire tensor model' hash. The intent is that the entire
|
||||||
|
tensor layer can be checked first but if there is any detected inconsistencies,
|
||||||
|
then the per tensor hash can be used to narrow down the specific tensor layer
|
||||||
|
that has inconsistencies.
|
||||||
|
|
||||||
|
For Maintainers:
|
||||||
|
- Detection of tensor inconsistency during development and automated tests
|
||||||
|
- This is served by xxh64 which is fast
|
||||||
|
- This is also served by having per tensor layer to assist in narrowing down
|
||||||
|
the location of the faulty tensor layer
|
||||||
|
- This is also served by sha1 which is much slower but more widely supported
|
||||||
|
|
||||||
|
For Model Creators:
|
||||||
|
- Optional consistent UUID generation based on model tensor content
|
||||||
|
- This is served by UUIDv5 which is useful for databases keys
|
||||||
|
- llama.cpp UUIDv5 Namespace: `ef001206-dadc-5f6d-a15f-3359e577d4e5`
|
||||||
|
- Made via UUIDv5 URL namespace of `en.wikipedia.org/wiki/Llama.cpp`
|
||||||
|
|
||||||
|
For Model Users:
|
||||||
|
- Assurance of tensor layer integrity even if metadata was updated
|
||||||
|
- This is served by sha256 which is still considered very secure as of 2024
|
||||||
|
|
||||||
|
### Design Note
|
||||||
|
|
||||||
|
- The default behavior of this program if no arguments is provided is to hash
|
||||||
|
using xxhash's xxh32 mode because it is very fast and is primarily targeted
|
||||||
|
towards maintainers who may want to use this in automated tests.
|
||||||
|
- xxhash support xxh32 and xxh128 for 32bit hash and 128bit hash respectively
|
||||||
|
however we picked 64bit xxhash as most computers are 64bit as of 2024 and thus
|
||||||
|
would have a better affinity to calculating hash that is 64bit in size.
|
||||||
|
|
||||||
|
## Compile Example
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cmake -B build -DCMAKE_BUILD_TYPE=Debug -DLLAMA_FATAL_WARNINGS=ON
|
||||||
|
make -C build clean
|
||||||
|
make -C build llama-gguf-hash VERBOSE=1
|
||||||
|
./build/bin/llama-gguf-hash test.gguf
|
||||||
|
./build/bin/llama-gguf-hash --xxh64 test.gguf
|
||||||
|
./build/bin/llama-gguf-hash --sha1 test.gguf
|
||||||
|
./build/bin/llama-gguf-hash --uuid test.gguf
|
||||||
|
./build/bin/llama-gguf-hash --sha256 test.gguf
|
||||||
|
```
|
||||||
|
|
||||||
|
## Generation and Verification Example
|
||||||
|
|
||||||
|
To generate we may use this command
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./llama-gguf-hash --all test.gguf > test.gguf.manifest
|
||||||
|
```
|
||||||
|
|
||||||
|
Which would generate a manifest that looks like below, which contains multiple hash type and per tensor layer hashes as well
|
||||||
|
(This excludes UUID as that is an ID not a hash)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
xxh64 f66e9cd66a4396a0 test.gguf:tensor_0
|
||||||
|
sha1 59f79ecefd8125a996fdf419239051a7e99e5f20 test.gguf:tensor_0
|
||||||
|
sha256 c0510d38fa060c46265e0160a85c7243096b01dd31c2f355bdbb5516b20de1bd test.gguf:tensor_0
|
||||||
|
xxh64 7d3a1f9ac04d0537 test.gguf:tensor_1
|
||||||
|
sha1 4765f592eacf096df4628ba59476af94d767080a test.gguf:tensor_1
|
||||||
|
sha256 8514cbcc73692a2c56bd7a33a022edd5ff819614bd23b19915d7224387f397a7 test.gguf:tensor_1
|
||||||
|
xxh64 a0af5d700049693b test.gguf:tensor_2
|
||||||
|
sha1 25cbfbad4513cc348e2c95ebdee69d6ff2fd8753 test.gguf:tensor_2
|
||||||
|
sha256 947e6b36e20f2cc95e1d2ce1c1669d813d574657ac6b5ac5196158d454d35180 test.gguf:tensor_2
|
||||||
|
xxh64 e83fddf559d7b6a6 test.gguf:tensor_3
|
||||||
|
sha1 a9cba73e2d90f2ee3dae2548caa42bef3fe6a96c test.gguf:tensor_3
|
||||||
|
sha256 423b044e016d8ac73c39f23f60bf01bedef5ecb03c0230accd824c91fe86f1a1 test.gguf:tensor_3
|
||||||
|
xxh64 1257733306b7992d test.gguf:tensor_4
|
||||||
|
sha1 d7bc61db93bb685ce9d598da89717c66729b7543 test.gguf:tensor_4
|
||||||
|
sha256 79737cb3912d4201384cf7f16a1a37ff7823f23ea796cb205b6ca361ab9e3ebf test.gguf:tensor_4
|
||||||
|
xxh64 d238d16ba4711e58 test.gguf:tensor_5
|
||||||
|
sha1 0706566c198fe1072f37e0a5135b4b5f23654c52 test.gguf:tensor_5
|
||||||
|
sha256 60949be8298eced0ecdde64487643d018407bd261691e061d9e9c3dbc9fd358b test.gguf:tensor_5
|
||||||
|
xxh64 3fbc3b65ab8c7f39 test.gguf:tensor_6
|
||||||
|
sha1 73922a0727226a409049f6fc3172a52219ca6f00 test.gguf:tensor_6
|
||||||
|
sha256 574f4c46ff384a3b9a225eb955d2a871847a2e8b3fa59387a8252832e92ef7b0 test.gguf:tensor_6
|
||||||
|
xxh64 c22021c29854f093 test.gguf:tensor_7
|
||||||
|
sha1 efc39cece6a951188fc41e354c73bbfe6813d447 test.gguf:tensor_7
|
||||||
|
sha256 4c0410cd3c500f078ae5b21e8dc9eb79e29112713b2ab58a882f82a3868d4d75 test.gguf:tensor_7
|
||||||
|
xxh64 936df61f5d64261f test.gguf:tensor_8
|
||||||
|
sha1 c2490296d789a4f34398a337fed8377d943d9f06 test.gguf:tensor_8
|
||||||
|
sha256 c4401313feeba0261275c3b25bd2d8fe40ce04e0f440c2980ed0e9674c30ff01 test.gguf:tensor_8
|
||||||
|
xxh64 93fd20c64421c081 test.gguf:tensor_9
|
||||||
|
sha1 7047ce1e78437a6884337a3751c7ee0421918a65 test.gguf:tensor_9
|
||||||
|
sha256 23d57cf0d7a6e90b0b3616b41300e0cd354781e812add854a5f95aa55f2bc514 test.gguf:tensor_9
|
||||||
|
xxh64 5a54d3aad816f302 test.gguf
|
||||||
|
sha1 d15be52c4ff213e823cb6dd13af7ee2f978e7042 test.gguf
|
||||||
|
sha256 7dd641b32f59b60dbd4b5420c4b0f6321ccf48f58f6ae201a3dbc4a58a27c6e4 test.gguf
|
||||||
|
```
|
||||||
|
|
||||||
|
We can then use the normal check command which will by default check for the highest security strength hash and verify against that:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ ./llama-gguf-hash --check test.gguf.manifest test.gguf
|
||||||
|
manifest test.gguf.manifest sha256 sha1 xxh64
|
||||||
|
sha256 c0510d38fa060c46265e0160a85c7243096b01dd31c2f355bdbb5516b20de1bd test.gguf:tensor_0 - Ok
|
||||||
|
sha256 8514cbcc73692a2c56bd7a33a022edd5ff819614bd23b19915d7224387f397a7 test.gguf:tensor_1 - Ok
|
||||||
|
sha256 947e6b36e20f2cc95e1d2ce1c1669d813d574657ac6b5ac5196158d454d35180 test.gguf:tensor_2 - Ok
|
||||||
|
sha256 423b044e016d8ac73c39f23f60bf01bedef5ecb03c0230accd824c91fe86f1a1 test.gguf:tensor_3 - Ok
|
||||||
|
sha256 79737cb3912d4201384cf7f16a1a37ff7823f23ea796cb205b6ca361ab9e3ebf test.gguf:tensor_4 - Ok
|
||||||
|
sha256 60949be8298eced0ecdde64487643d018407bd261691e061d9e9c3dbc9fd358b test.gguf:tensor_5 - Ok
|
||||||
|
sha256 574f4c46ff384a3b9a225eb955d2a871847a2e8b3fa59387a8252832e92ef7b0 test.gguf:tensor_6 - Ok
|
||||||
|
sha256 4c0410cd3c500f078ae5b21e8dc9eb79e29112713b2ab58a882f82a3868d4d75 test.gguf:tensor_7 - Ok
|
||||||
|
sha256 c4401313feeba0261275c3b25bd2d8fe40ce04e0f440c2980ed0e9674c30ff01 test.gguf:tensor_8 - Ok
|
||||||
|
sha256 23d57cf0d7a6e90b0b3616b41300e0cd354781e812add854a5f95aa55f2bc514 test.gguf:tensor_9 - Ok
|
||||||
|
sha256 7dd641b32f59b60dbd4b5420c4b0f6321ccf48f58f6ae201a3dbc4a58a27c6e4 test.gguf - Ok
|
||||||
|
|
||||||
|
Verification results for test.gguf.manifest - Success
|
||||||
|
```
|
||||||
|
|
||||||
|
Or we may explicitly ask for a faster hash like:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ ./llama-gguf-hash --check test.gguf.manifest --xxh64 test.gguf
|
||||||
|
manifest test.gguf.manifest sha256 sha1 xxh64
|
||||||
|
xxh64 f66e9cd66a4396a0 test.gguf:tensor_0 - Ok
|
||||||
|
xxh64 7d3a1f9ac04d0537 test.gguf:tensor_1 - Ok
|
||||||
|
xxh64 a0af5d700049693b test.gguf:tensor_2 - Ok
|
||||||
|
xxh64 e83fddf559d7b6a6 test.gguf:tensor_3 - Ok
|
||||||
|
xxh64 1257733306b7992d test.gguf:tensor_4 - Ok
|
||||||
|
xxh64 d238d16ba4711e58 test.gguf:tensor_5 - Ok
|
||||||
|
xxh64 3fbc3b65ab8c7f39 test.gguf:tensor_6 - Ok
|
||||||
|
xxh64 c22021c29854f093 test.gguf:tensor_7 - Ok
|
||||||
|
xxh64 936df61f5d64261f test.gguf:tensor_8 - Ok
|
||||||
|
xxh64 93fd20c64421c081 test.gguf:tensor_9 - Ok
|
||||||
|
xxh64 5a54d3aad816f302 test.gguf - Ok
|
||||||
|
|
||||||
|
Verification results for test.gguf.manifest - Success
|
||||||
|
```
|
||||||
|
|
||||||
|
Or maybe we want to just check that all the hash is valid:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$./llama-gguf-hash --check test.gguf.manifest --all test.gguf.manifest
|
||||||
|
manifest test.gguf.manifest sha256 sha1 xxh64
|
||||||
|
xxh64 f66e9cd66a4396a0 test.gguf:tensor_0 - Ok
|
||||||
|
sha1 59f79ecefd8125a996fdf419239051a7e99e5f20 test.gguf:tensor_0 - Ok
|
||||||
|
sha256 c0510d38fa060c46265e0160a85c7243096b01dd31c2f355bdbb5516b20de1bd test.gguf:tensor_0 - Ok
|
||||||
|
xxh64 7d3a1f9ac04d0537 test.gguf:tensor_1 - Ok
|
||||||
|
sha1 4765f592eacf096df4628ba59476af94d767080a test.gguf:tensor_1 - Ok
|
||||||
|
sha256 8514cbcc73692a2c56bd7a33a022edd5ff819614bd23b19915d7224387f397a7 test.gguf:tensor_1 - Ok
|
||||||
|
xxh64 a0af5d700049693b test.gguf:tensor_2 - Ok
|
||||||
|
sha1 25cbfbad4513cc348e2c95ebdee69d6ff2fd8753 test.gguf:tensor_2 - Ok
|
||||||
|
sha256 947e6b36e20f2cc95e1d2ce1c1669d813d574657ac6b5ac5196158d454d35180 test.gguf:tensor_2 - Ok
|
||||||
|
xxh64 e83fddf559d7b6a6 test.gguf:tensor_3 - Ok
|
||||||
|
sha1 a9cba73e2d90f2ee3dae2548caa42bef3fe6a96c test.gguf:tensor_3 - Ok
|
||||||
|
sha256 423b044e016d8ac73c39f23f60bf01bedef5ecb03c0230accd824c91fe86f1a1 test.gguf:tensor_3 - Ok
|
||||||
|
xxh64 1257733306b7992d test.gguf:tensor_4 - Ok
|
||||||
|
sha1 d7bc61db93bb685ce9d598da89717c66729b7543 test.gguf:tensor_4 - Ok
|
||||||
|
sha256 79737cb3912d4201384cf7f16a1a37ff7823f23ea796cb205b6ca361ab9e3ebf test.gguf:tensor_4 - Ok
|
||||||
|
xxh64 d238d16ba4711e58 test.gguf:tensor_5 - Ok
|
||||||
|
sha1 0706566c198fe1072f37e0a5135b4b5f23654c52 test.gguf:tensor_5 - Ok
|
||||||
|
sha256 60949be8298eced0ecdde64487643d018407bd261691e061d9e9c3dbc9fd358b test.gguf:tensor_5 - Ok
|
||||||
|
xxh64 3fbc3b65ab8c7f39 test.gguf:tensor_6 - Ok
|
||||||
|
sha1 73922a0727226a409049f6fc3172a52219ca6f00 test.gguf:tensor_6 - Ok
|
||||||
|
sha256 574f4c46ff384a3b9a225eb955d2a871847a2e8b3fa59387a8252832e92ef7b0 test.gguf:tensor_6 - Ok
|
||||||
|
xxh64 c22021c29854f093 test.gguf:tensor_7 - Ok
|
||||||
|
sha1 efc39cece6a951188fc41e354c73bbfe6813d447 test.gguf:tensor_7 - Ok
|
||||||
|
sha256 4c0410cd3c500f078ae5b21e8dc9eb79e29112713b2ab58a882f82a3868d4d75 test.gguf:tensor_7 - Ok
|
||||||
|
xxh64 936df61f5d64261f test.gguf:tensor_8 - Ok
|
||||||
|
sha1 c2490296d789a4f34398a337fed8377d943d9f06 test.gguf:tensor_8 - Ok
|
||||||
|
sha256 c4401313feeba0261275c3b25bd2d8fe40ce04e0f440c2980ed0e9674c30ff01 test.gguf:tensor_8 - Ok
|
||||||
|
xxh64 93fd20c64421c081 test.gguf:tensor_9 - Ok
|
||||||
|
sha1 7047ce1e78437a6884337a3751c7ee0421918a65 test.gguf:tensor_9 - Ok
|
||||||
|
sha256 23d57cf0d7a6e90b0b3616b41300e0cd354781e812add854a5f95aa55f2bc514 test.gguf:tensor_9 - Ok
|
||||||
|
xxh64 5a54d3aad816f302 test.gguf - Ok
|
||||||
|
sha1 d15be52c4ff213e823cb6dd13af7ee2f978e7042 test.gguf - Ok
|
||||||
|
sha256 7dd641b32f59b60dbd4b5420c4b0f6321ccf48f58f6ae201a3dbc4a58a27c6e4 test.gguf - Ok
|
||||||
|
|
||||||
|
Verification results for test.gguf.manifest - Success
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Crypto/Hash Libraries Used
|
||||||
|
|
||||||
|
These micro c libraries dependencies was installed via the [clib c package manager](https://github.com/clibs)
|
||||||
|
|
||||||
|
- https://github.com/mofosyne/xxHash (From: https://github.com/Cyan4973/xxHash)
|
||||||
|
- https://github.com/clibs/sha1/
|
||||||
|
- https://github.com/jb55/sha256.c
|
13
examples/gguf-hash/deps/rotate-bits/package.json
Normal file
13
examples/gguf-hash/deps/rotate-bits/package.json
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
{
|
||||||
|
"name": "rotate-bits",
|
||||||
|
"version": "0.1.1",
|
||||||
|
"repo": "jb55/rotate-bits.h",
|
||||||
|
"description": "rotate bits",
|
||||||
|
"keywords": ["rotl", "rotr"],
|
||||||
|
"src": ["rotate-bits.h"],
|
||||||
|
"license": "Public Domain",
|
||||||
|
"development": {
|
||||||
|
"thlorenz/tap.c": "*"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
46
examples/gguf-hash/deps/rotate-bits/rotate-bits.h
Normal file
46
examples/gguf-hash/deps/rotate-bits/rotate-bits.h
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
|
||||||
|
|
||||||
|
#ifndef __ROTATE_DEFS_H
|
||||||
|
#define __ROTATE_DEFS_H
|
||||||
|
|
||||||
|
#ifdef _MSC_VER
|
||||||
|
|
||||||
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
#define ROTL32(v, n) _rotl((v), (n))
|
||||||
|
#define ROTL64(v, n) _rotl64((v), (n))
|
||||||
|
|
||||||
|
#define ROTR32(v, n) _rotr((v), (n))
|
||||||
|
#define ROTR64(v, n) _rotr64((v), (n))
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#define U8V(v) ((uint8_t)(v) & 0xFFU)
|
||||||
|
#define U16V(v) ((uint16_t)(v) & 0xFFFFU)
|
||||||
|
#define U32V(v) ((uint32_t)(v) & 0xFFFFFFFFU)
|
||||||
|
#define U64V(v) ((uint64_t)(v) & 0xFFFFFFFFFFFFFFFFU)
|
||||||
|
|
||||||
|
#define ROTL32(v, n) \
|
||||||
|
(U32V((uint32_t)(v) << (n)) | ((uint32_t)(v) >> (32 - (n))))
|
||||||
|
|
||||||
|
// tests fail if we don't have this cast...
|
||||||
|
#define ROTL64(v, n) \
|
||||||
|
(U64V((uint64_t)(v) << (n)) | ((uint64_t)(v) >> (64 - (n))))
|
||||||
|
|
||||||
|
#define ROTR32(v, n) ROTL32(v, 32 - (n))
|
||||||
|
#define ROTR64(v, n) ROTL64(v, 64 - (n))
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define ROTL8(v, n) \
|
||||||
|
(U8V((uint8_t)(v) << (n)) | ((uint8_t)(v) >> (8 - (n))))
|
||||||
|
|
||||||
|
#define ROTL16(v, n) \
|
||||||
|
(U16V((uint16_t)(v) << (n)) | ((uint16_t)(v) >> (16 - (n))))
|
||||||
|
|
||||||
|
#define ROTR8(v, n) ROTL8(v, 8 - (n))
|
||||||
|
#define ROTR16(v, n) ROTL16(v, 16 - (n))
|
||||||
|
|
||||||
|
#endif
|
9
examples/gguf-hash/deps/sha1/package.json
Normal file
9
examples/gguf-hash/deps/sha1/package.json
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"name": "sha1",
|
||||||
|
"version": "0.0.1",
|
||||||
|
"repo": "clibs/sha1",
|
||||||
|
"description": "sha1 hash algorithm",
|
||||||
|
"keywords": ["sha1", "hash"],
|
||||||
|
"license": "public domain",
|
||||||
|
"src": ["sha1.c", "sha1.h"]
|
||||||
|
}
|
295
examples/gguf-hash/deps/sha1/sha1.c
Normal file
295
examples/gguf-hash/deps/sha1/sha1.c
Normal file
@ -0,0 +1,295 @@
|
|||||||
|
/*
|
||||||
|
SHA-1 in C
|
||||||
|
By Steve Reid <steve@edmweb.com>
|
||||||
|
100% Public Domain
|
||||||
|
|
||||||
|
Test Vectors (from FIPS PUB 180-1)
|
||||||
|
"abc"
|
||||||
|
A9993E36 4706816A BA3E2571 7850C26C 9CD0D89D
|
||||||
|
"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"
|
||||||
|
84983E44 1C3BD26E BAAE4AA1 F95129E5 E54670F1
|
||||||
|
A million repetitions of "a"
|
||||||
|
34AA973C D4C4DAA4 F61EEB2B DBAD2731 6534016F
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* #define LITTLE_ENDIAN * This should be #define'd already, if true. */
|
||||||
|
/* #define SHA1HANDSOFF * Copies data before messing with it. */
|
||||||
|
|
||||||
|
#define SHA1HANDSOFF
|
||||||
|
|
||||||
|
#include <stdio.h>
|
||||||
|
#include <string.h>
|
||||||
|
|
||||||
|
/* for uint32_t */
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include "sha1.h"
|
||||||
|
|
||||||
|
|
||||||
|
#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
|
||||||
|
|
||||||
|
/* blk0() and blk() perform the initial expand. */
|
||||||
|
/* I got the idea of expanding during the round function from SSLeay */
|
||||||
|
#if BYTE_ORDER == LITTLE_ENDIAN
|
||||||
|
#define blk0(i) (block->l[i] = (rol(block->l[i],24)&0xFF00FF00) \
|
||||||
|
|(rol(block->l[i],8)&0x00FF00FF))
|
||||||
|
#elif BYTE_ORDER == BIG_ENDIAN
|
||||||
|
#define blk0(i) block->l[i]
|
||||||
|
#else
|
||||||
|
#error "Endianness not defined!"
|
||||||
|
#endif
|
||||||
|
#define blk(i) (block->l[i&15] = rol(block->l[(i+13)&15]^block->l[(i+8)&15] \
|
||||||
|
^block->l[(i+2)&15]^block->l[i&15],1))
|
||||||
|
|
||||||
|
/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */
|
||||||
|
#define R0(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk0(i)+0x5A827999+rol(v,5);w=rol(w,30);
|
||||||
|
#define R1(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk(i)+0x5A827999+rol(v,5);w=rol(w,30);
|
||||||
|
#define R2(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0x6ED9EBA1+rol(v,5);w=rol(w,30);
|
||||||
|
#define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5);w=rol(w,30);
|
||||||
|
#define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30);
|
||||||
|
|
||||||
|
|
||||||
|
/* Hash a single 512-bit block. This is the core of the algorithm. */
|
||||||
|
|
||||||
|
void SHA1Transform(
|
||||||
|
uint32_t state[5],
|
||||||
|
const unsigned char buffer[64]
|
||||||
|
)
|
||||||
|
{
|
||||||
|
uint32_t a, b, c, d, e;
|
||||||
|
|
||||||
|
typedef union
|
||||||
|
{
|
||||||
|
unsigned char c[64];
|
||||||
|
uint32_t l[16];
|
||||||
|
} CHAR64LONG16;
|
||||||
|
|
||||||
|
#ifdef SHA1HANDSOFF
|
||||||
|
CHAR64LONG16 block[1]; /* use array to appear as a pointer */
|
||||||
|
|
||||||
|
memcpy(block, buffer, 64);
|
||||||
|
#else
|
||||||
|
/* The following had better never be used because it causes the
|
||||||
|
* pointer-to-const buffer to be cast into a pointer to non-const.
|
||||||
|
* And the result is written through. I threw a "const" in, hoping
|
||||||
|
* this will cause a diagnostic.
|
||||||
|
*/
|
||||||
|
CHAR64LONG16 *block = (const CHAR64LONG16 *) buffer;
|
||||||
|
#endif
|
||||||
|
/* Copy context->state[] to working vars */
|
||||||
|
a = state[0];
|
||||||
|
b = state[1];
|
||||||
|
c = state[2];
|
||||||
|
d = state[3];
|
||||||
|
e = state[4];
|
||||||
|
/* 4 rounds of 20 operations each. Loop unrolled. */
|
||||||
|
R0(a, b, c, d, e, 0);
|
||||||
|
R0(e, a, b, c, d, 1);
|
||||||
|
R0(d, e, a, b, c, 2);
|
||||||
|
R0(c, d, e, a, b, 3);
|
||||||
|
R0(b, c, d, e, a, 4);
|
||||||
|
R0(a, b, c, d, e, 5);
|
||||||
|
R0(e, a, b, c, d, 6);
|
||||||
|
R0(d, e, a, b, c, 7);
|
||||||
|
R0(c, d, e, a, b, 8);
|
||||||
|
R0(b, c, d, e, a, 9);
|
||||||
|
R0(a, b, c, d, e, 10);
|
||||||
|
R0(e, a, b, c, d, 11);
|
||||||
|
R0(d, e, a, b, c, 12);
|
||||||
|
R0(c, d, e, a, b, 13);
|
||||||
|
R0(b, c, d, e, a, 14);
|
||||||
|
R0(a, b, c, d, e, 15);
|
||||||
|
R1(e, a, b, c, d, 16);
|
||||||
|
R1(d, e, a, b, c, 17);
|
||||||
|
R1(c, d, e, a, b, 18);
|
||||||
|
R1(b, c, d, e, a, 19);
|
||||||
|
R2(a, b, c, d, e, 20);
|
||||||
|
R2(e, a, b, c, d, 21);
|
||||||
|
R2(d, e, a, b, c, 22);
|
||||||
|
R2(c, d, e, a, b, 23);
|
||||||
|
R2(b, c, d, e, a, 24);
|
||||||
|
R2(a, b, c, d, e, 25);
|
||||||
|
R2(e, a, b, c, d, 26);
|
||||||
|
R2(d, e, a, b, c, 27);
|
||||||
|
R2(c, d, e, a, b, 28);
|
||||||
|
R2(b, c, d, e, a, 29);
|
||||||
|
R2(a, b, c, d, e, 30);
|
||||||
|
R2(e, a, b, c, d, 31);
|
||||||
|
R2(d, e, a, b, c, 32);
|
||||||
|
R2(c, d, e, a, b, 33);
|
||||||
|
R2(b, c, d, e, a, 34);
|
||||||
|
R2(a, b, c, d, e, 35);
|
||||||
|
R2(e, a, b, c, d, 36);
|
||||||
|
R2(d, e, a, b, c, 37);
|
||||||
|
R2(c, d, e, a, b, 38);
|
||||||
|
R2(b, c, d, e, a, 39);
|
||||||
|
R3(a, b, c, d, e, 40);
|
||||||
|
R3(e, a, b, c, d, 41);
|
||||||
|
R3(d, e, a, b, c, 42);
|
||||||
|
R3(c, d, e, a, b, 43);
|
||||||
|
R3(b, c, d, e, a, 44);
|
||||||
|
R3(a, b, c, d, e, 45);
|
||||||
|
R3(e, a, b, c, d, 46);
|
||||||
|
R3(d, e, a, b, c, 47);
|
||||||
|
R3(c, d, e, a, b, 48);
|
||||||
|
R3(b, c, d, e, a, 49);
|
||||||
|
R3(a, b, c, d, e, 50);
|
||||||
|
R3(e, a, b, c, d, 51);
|
||||||
|
R3(d, e, a, b, c, 52);
|
||||||
|
R3(c, d, e, a, b, 53);
|
||||||
|
R3(b, c, d, e, a, 54);
|
||||||
|
R3(a, b, c, d, e, 55);
|
||||||
|
R3(e, a, b, c, d, 56);
|
||||||
|
R3(d, e, a, b, c, 57);
|
||||||
|
R3(c, d, e, a, b, 58);
|
||||||
|
R3(b, c, d, e, a, 59);
|
||||||
|
R4(a, b, c, d, e, 60);
|
||||||
|
R4(e, a, b, c, d, 61);
|
||||||
|
R4(d, e, a, b, c, 62);
|
||||||
|
R4(c, d, e, a, b, 63);
|
||||||
|
R4(b, c, d, e, a, 64);
|
||||||
|
R4(a, b, c, d, e, 65);
|
||||||
|
R4(e, a, b, c, d, 66);
|
||||||
|
R4(d, e, a, b, c, 67);
|
||||||
|
R4(c, d, e, a, b, 68);
|
||||||
|
R4(b, c, d, e, a, 69);
|
||||||
|
R4(a, b, c, d, e, 70);
|
||||||
|
R4(e, a, b, c, d, 71);
|
||||||
|
R4(d, e, a, b, c, 72);
|
||||||
|
R4(c, d, e, a, b, 73);
|
||||||
|
R4(b, c, d, e, a, 74);
|
||||||
|
R4(a, b, c, d, e, 75);
|
||||||
|
R4(e, a, b, c, d, 76);
|
||||||
|
R4(d, e, a, b, c, 77);
|
||||||
|
R4(c, d, e, a, b, 78);
|
||||||
|
R4(b, c, d, e, a, 79);
|
||||||
|
/* Add the working vars back into context.state[] */
|
||||||
|
state[0] += a;
|
||||||
|
state[1] += b;
|
||||||
|
state[2] += c;
|
||||||
|
state[3] += d;
|
||||||
|
state[4] += e;
|
||||||
|
/* Wipe variables */
|
||||||
|
a = b = c = d = e = 0;
|
||||||
|
#ifdef SHA1HANDSOFF
|
||||||
|
memset(block, '\0', sizeof(block));
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* SHA1Init - Initialize new context */
|
||||||
|
|
||||||
|
void SHA1Init(
|
||||||
|
SHA1_CTX * context
|
||||||
|
)
|
||||||
|
{
|
||||||
|
/* SHA1 initialization constants */
|
||||||
|
context->state[0] = 0x67452301;
|
||||||
|
context->state[1] = 0xEFCDAB89;
|
||||||
|
context->state[2] = 0x98BADCFE;
|
||||||
|
context->state[3] = 0x10325476;
|
||||||
|
context->state[4] = 0xC3D2E1F0;
|
||||||
|
context->count[0] = context->count[1] = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Run your data through this. */
|
||||||
|
|
||||||
|
void SHA1Update(
|
||||||
|
SHA1_CTX * context,
|
||||||
|
const unsigned char *data,
|
||||||
|
uint32_t len
|
||||||
|
)
|
||||||
|
{
|
||||||
|
uint32_t i;
|
||||||
|
|
||||||
|
uint32_t j;
|
||||||
|
|
||||||
|
j = context->count[0];
|
||||||
|
if ((context->count[0] += len << 3) < j)
|
||||||
|
context->count[1]++;
|
||||||
|
context->count[1] += (len >> 29);
|
||||||
|
j = (j >> 3) & 63;
|
||||||
|
if ((j + len) > 63)
|
||||||
|
{
|
||||||
|
memcpy(&context->buffer[j], data, (i = 64 - j));
|
||||||
|
SHA1Transform(context->state, context->buffer);
|
||||||
|
for (; i + 63 < len; i += 64)
|
||||||
|
{
|
||||||
|
SHA1Transform(context->state, &data[i]);
|
||||||
|
}
|
||||||
|
j = 0;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
i = 0;
|
||||||
|
memcpy(&context->buffer[j], &data[i], len - i);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Add padding and return the message digest. */
|
||||||
|
|
||||||
|
void SHA1Final(
|
||||||
|
unsigned char digest[20],
|
||||||
|
SHA1_CTX * context
|
||||||
|
)
|
||||||
|
{
|
||||||
|
unsigned i;
|
||||||
|
|
||||||
|
unsigned char finalcount[8];
|
||||||
|
|
||||||
|
unsigned char c;
|
||||||
|
|
||||||
|
#if 0 /* untested "improvement" by DHR */
|
||||||
|
/* Convert context->count to a sequence of bytes
|
||||||
|
* in finalcount. Second element first, but
|
||||||
|
* big-endian order within element.
|
||||||
|
* But we do it all backwards.
|
||||||
|
*/
|
||||||
|
unsigned char *fcp = &finalcount[8];
|
||||||
|
|
||||||
|
for (i = 0; i < 2; i++)
|
||||||
|
{
|
||||||
|
uint32_t t = context->count[i];
|
||||||
|
|
||||||
|
int j;
|
||||||
|
|
||||||
|
for (j = 0; j < 4; t >>= 8, j++)
|
||||||
|
*--fcp = (unsigned char) t}
|
||||||
|
#else
|
||||||
|
for (i = 0; i < 8; i++)
|
||||||
|
{
|
||||||
|
finalcount[i] = (unsigned char) ((context->count[(i >= 4 ? 0 : 1)] >> ((3 - (i & 3)) * 8)) & 255); /* Endian independent */
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
c = 0200;
|
||||||
|
SHA1Update(context, &c, 1);
|
||||||
|
while ((context->count[0] & 504) != 448)
|
||||||
|
{
|
||||||
|
c = 0000;
|
||||||
|
SHA1Update(context, &c, 1);
|
||||||
|
}
|
||||||
|
SHA1Update(context, finalcount, 8); /* Should cause a SHA1Transform() */
|
||||||
|
for (i = 0; i < 20; i++)
|
||||||
|
{
|
||||||
|
digest[i] = (unsigned char)
|
||||||
|
((context->state[i >> 2] >> ((3 - (i & 3)) * 8)) & 255);
|
||||||
|
}
|
||||||
|
/* Wipe variables */
|
||||||
|
memset(context, '\0', sizeof(*context));
|
||||||
|
memset(&finalcount, '\0', sizeof(finalcount));
|
||||||
|
}
|
||||||
|
|
||||||
|
void SHA1(
|
||||||
|
char *hash_out,
|
||||||
|
const char *str,
|
||||||
|
uint32_t len)
|
||||||
|
{
|
||||||
|
SHA1_CTX ctx;
|
||||||
|
unsigned int ii;
|
||||||
|
|
||||||
|
SHA1Init(&ctx);
|
||||||
|
for (ii=0; ii<len; ii+=1)
|
||||||
|
SHA1Update(&ctx, (const unsigned char*)str + ii, 1);
|
||||||
|
SHA1Final((unsigned char *)hash_out, &ctx);
|
||||||
|
}
|
||||||
|
|
52
examples/gguf-hash/deps/sha1/sha1.h
Normal file
52
examples/gguf-hash/deps/sha1/sha1.h
Normal file
@ -0,0 +1,52 @@
|
|||||||
|
#ifndef SHA1_H
|
||||||
|
#define SHA1_H
|
||||||
|
|
||||||
|
/*
|
||||||
|
SHA-1 in C
|
||||||
|
By Steve Reid <steve@edmweb.com>
|
||||||
|
100% Public Domain
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "stdint.h"
|
||||||
|
|
||||||
|
#if defined(__cplusplus)
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
typedef struct
|
||||||
|
{
|
||||||
|
uint32_t state[5];
|
||||||
|
uint32_t count[2];
|
||||||
|
unsigned char buffer[64];
|
||||||
|
} SHA1_CTX;
|
||||||
|
|
||||||
|
void SHA1Transform(
|
||||||
|
uint32_t state[5],
|
||||||
|
const unsigned char buffer[64]
|
||||||
|
);
|
||||||
|
|
||||||
|
void SHA1Init(
|
||||||
|
SHA1_CTX * context
|
||||||
|
);
|
||||||
|
|
||||||
|
void SHA1Update(
|
||||||
|
SHA1_CTX * context,
|
||||||
|
const unsigned char *data,
|
||||||
|
uint32_t len
|
||||||
|
);
|
||||||
|
|
||||||
|
void SHA1Final(
|
||||||
|
unsigned char digest[20],
|
||||||
|
SHA1_CTX * context
|
||||||
|
);
|
||||||
|
|
||||||
|
void SHA1(
|
||||||
|
char *hash_out,
|
||||||
|
const char *str,
|
||||||
|
uint32_t len);
|
||||||
|
|
||||||
|
#if defined(__cplusplus)
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* SHA1_H */
|
15
examples/gguf-hash/deps/sha256/package.json
Normal file
15
examples/gguf-hash/deps/sha256/package.json
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
{
|
||||||
|
"name": "sha256",
|
||||||
|
"version": "0.0.2",
|
||||||
|
"repo": "jb55/sha256.c",
|
||||||
|
"description": "sha256 in c",
|
||||||
|
"keywords": ["sha256", "sha2"],
|
||||||
|
"src": ["sha256.c", "sha256.h"],
|
||||||
|
"dependencies": {
|
||||||
|
"jb55/rotate-bits.h": "0.1.1"
|
||||||
|
},
|
||||||
|
"development": {
|
||||||
|
"thlorenz/tap.c": "*"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
221
examples/gguf-hash/deps/sha256/sha256.c
Normal file
221
examples/gguf-hash/deps/sha256/sha256.c
Normal file
@ -0,0 +1,221 @@
|
|||||||
|
/* Crypto/Sha256.c -- SHA-256 Hash
|
||||||
|
2010-06-11 : Igor Pavlov : Public domain
|
||||||
|
This code is based on public domain code from Wei Dai's Crypto++ library. */
|
||||||
|
|
||||||
|
#include "rotate-bits/rotate-bits.h"
|
||||||
|
#include "sha256.h"
|
||||||
|
|
||||||
|
/* define it for speed optimization */
|
||||||
|
#define _SHA256_UNROLL
|
||||||
|
#define _SHA256_UNROLL2
|
||||||
|
|
||||||
|
void
|
||||||
|
sha256_init(sha256_t *p)
|
||||||
|
{
|
||||||
|
p->state[0] = 0x6a09e667;
|
||||||
|
p->state[1] = 0xbb67ae85;
|
||||||
|
p->state[2] = 0x3c6ef372;
|
||||||
|
p->state[3] = 0xa54ff53a;
|
||||||
|
p->state[4] = 0x510e527f;
|
||||||
|
p->state[5] = 0x9b05688c;
|
||||||
|
p->state[6] = 0x1f83d9ab;
|
||||||
|
p->state[7] = 0x5be0cd19;
|
||||||
|
p->count = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define S0(x) (ROTR32(x, 2) ^ ROTR32(x,13) ^ ROTR32(x, 22))
|
||||||
|
#define S1(x) (ROTR32(x, 6) ^ ROTR32(x,11) ^ ROTR32(x, 25))
|
||||||
|
#define s0(x) (ROTR32(x, 7) ^ ROTR32(x,18) ^ (x >> 3))
|
||||||
|
#define s1(x) (ROTR32(x,17) ^ ROTR32(x,19) ^ (x >> 10))
|
||||||
|
|
||||||
|
#define blk0(i) (W[i] = data[i])
|
||||||
|
#define blk2(i) (W[i&15] += s1(W[(i-2)&15]) + W[(i-7)&15] + s0(W[(i-15)&15]))
|
||||||
|
|
||||||
|
#define Ch(x,y,z) (z^(x&(y^z)))
|
||||||
|
#define Maj(x,y,z) ((x&y)|(z&(x|y)))
|
||||||
|
|
||||||
|
#define a(i) T[(0-(i))&7]
|
||||||
|
#define b(i) T[(1-(i))&7]
|
||||||
|
#define c(i) T[(2-(i))&7]
|
||||||
|
#define d(i) T[(3-(i))&7]
|
||||||
|
#define e(i) T[(4-(i))&7]
|
||||||
|
#define f(i) T[(5-(i))&7]
|
||||||
|
#define g(i) T[(6-(i))&7]
|
||||||
|
#define h(i) T[(7-(i))&7]
|
||||||
|
|
||||||
|
|
||||||
|
#ifdef _SHA256_UNROLL2
|
||||||
|
|
||||||
|
#define R(a,b,c,d,e,f,g,h, i) h += S1(e) + Ch(e,f,g) + K[i+j] + (j?blk2(i):blk0(i));\
|
||||||
|
d += h; h += S0(a) + Maj(a, b, c)
|
||||||
|
|
||||||
|
#define RX_8(i) \
|
||||||
|
R(a,b,c,d,e,f,g,h, i); \
|
||||||
|
R(h,a,b,c,d,e,f,g, (i+1)); \
|
||||||
|
R(g,h,a,b,c,d,e,f, (i+2)); \
|
||||||
|
R(f,g,h,a,b,c,d,e, (i+3)); \
|
||||||
|
R(e,f,g,h,a,b,c,d, (i+4)); \
|
||||||
|
R(d,e,f,g,h,a,b,c, (i+5)); \
|
||||||
|
R(c,d,e,f,g,h,a,b, (i+6)); \
|
||||||
|
R(b,c,d,e,f,g,h,a, (i+7))
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
#define R(i) h(i) += S1(e(i)) + Ch(e(i),f(i),g(i)) + K[i+j] + (j?blk2(i):blk0(i));\
|
||||||
|
d(i) += h(i); h(i) += S0(a(i)) + Maj(a(i), b(i), c(i))
|
||||||
|
|
||||||
|
#ifdef _SHA256_UNROLL
|
||||||
|
|
||||||
|
#define RX_8(i) R(i+0); R(i+1); R(i+2); R(i+3); R(i+4); R(i+5); R(i+6); R(i+7);
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static const uint32_t K[64] = {
|
||||||
|
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
|
||||||
|
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
|
||||||
|
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
|
||||||
|
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
|
||||||
|
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
|
||||||
|
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
|
||||||
|
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
|
||||||
|
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
|
||||||
|
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
|
||||||
|
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
|
||||||
|
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
|
||||||
|
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
|
||||||
|
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
|
||||||
|
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
|
||||||
|
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
|
||||||
|
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
|
||||||
|
};
|
||||||
|
|
||||||
|
static void
|
||||||
|
sha256_transform(uint32_t *state, const uint32_t *data)
|
||||||
|
{
|
||||||
|
uint32_t W[16] = {0};
|
||||||
|
unsigned j;
|
||||||
|
#ifdef _SHA256_UNROLL2
|
||||||
|
uint32_t a,b,c,d,e,f,g,h;
|
||||||
|
a = state[0];
|
||||||
|
b = state[1];
|
||||||
|
c = state[2];
|
||||||
|
d = state[3];
|
||||||
|
e = state[4];
|
||||||
|
f = state[5];
|
||||||
|
g = state[6];
|
||||||
|
h = state[7];
|
||||||
|
#else
|
||||||
|
uint32_t T[8];
|
||||||
|
for (j = 0; j < 8; j++)
|
||||||
|
T[j] = state[j];
|
||||||
|
#endif
|
||||||
|
|
||||||
|
for (j = 0; j < 64; j += 16)
|
||||||
|
{
|
||||||
|
#if defined(_SHA256_UNROLL) || defined(_SHA256_UNROLL2)
|
||||||
|
RX_8(0); RX_8(8);
|
||||||
|
#else
|
||||||
|
unsigned i;
|
||||||
|
for (i = 0; i < 16; i++) { R(i); }
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef _SHA256_UNROLL2
|
||||||
|
state[0] += a;
|
||||||
|
state[1] += b;
|
||||||
|
state[2] += c;
|
||||||
|
state[3] += d;
|
||||||
|
state[4] += e;
|
||||||
|
state[5] += f;
|
||||||
|
state[6] += g;
|
||||||
|
state[7] += h;
|
||||||
|
#else
|
||||||
|
for (j = 0; j < 8; j++)
|
||||||
|
state[j] += T[j];
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Wipe variables */
|
||||||
|
/* memset(W, 0, sizeof(W)); */
|
||||||
|
/* memset(T, 0, sizeof(T)); */
|
||||||
|
}
|
||||||
|
|
||||||
|
#undef S0
|
||||||
|
#undef S1
|
||||||
|
#undef s0
|
||||||
|
#undef s1
|
||||||
|
|
||||||
|
static void
|
||||||
|
sha256_write_byte_block(sha256_t *p)
|
||||||
|
{
|
||||||
|
uint32_t data32[16];
|
||||||
|
unsigned i;
|
||||||
|
for (i = 0; i < 16; i++)
|
||||||
|
data32[i] =
|
||||||
|
((uint32_t)(p->buffer[i * 4 ]) << 24) +
|
||||||
|
((uint32_t)(p->buffer[i * 4 + 1]) << 16) +
|
||||||
|
((uint32_t)(p->buffer[i * 4 + 2]) << 8) +
|
||||||
|
((uint32_t)(p->buffer[i * 4 + 3]));
|
||||||
|
sha256_transform(p->state, data32);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void
|
||||||
|
sha256_hash(unsigned char *buf, const unsigned char *data, size_t size)
|
||||||
|
{
|
||||||
|
sha256_t hash;
|
||||||
|
sha256_init(&hash);
|
||||||
|
sha256_update(&hash, data, size);
|
||||||
|
sha256_final(&hash, buf);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void
|
||||||
|
sha256_update(sha256_t *p, const unsigned char *data, size_t size)
|
||||||
|
{
|
||||||
|
uint32_t curBufferPos = (uint32_t)p->count & 0x3F;
|
||||||
|
while (size > 0)
|
||||||
|
{
|
||||||
|
p->buffer[curBufferPos++] = *data++;
|
||||||
|
p->count++;
|
||||||
|
size--;
|
||||||
|
if (curBufferPos == 64)
|
||||||
|
{
|
||||||
|
curBufferPos = 0;
|
||||||
|
sha256_write_byte_block(p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void
|
||||||
|
sha256_final(sha256_t *p, unsigned char *digest)
|
||||||
|
{
|
||||||
|
uint64_t lenInBits = (p->count << 3);
|
||||||
|
uint32_t curBufferPos = (uint32_t)p->count & 0x3F;
|
||||||
|
unsigned i;
|
||||||
|
p->buffer[curBufferPos++] = 0x80;
|
||||||
|
while (curBufferPos != (64 - 8))
|
||||||
|
{
|
||||||
|
curBufferPos &= 0x3F;
|
||||||
|
if (curBufferPos == 0)
|
||||||
|
sha256_write_byte_block(p);
|
||||||
|
p->buffer[curBufferPos++] = 0;
|
||||||
|
}
|
||||||
|
for (i = 0; i < 8; i++)
|
||||||
|
{
|
||||||
|
p->buffer[curBufferPos++] = (unsigned char)(lenInBits >> 56);
|
||||||
|
lenInBits <<= 8;
|
||||||
|
}
|
||||||
|
sha256_write_byte_block(p);
|
||||||
|
|
||||||
|
for (i = 0; i < 8; i++)
|
||||||
|
{
|
||||||
|
*digest++ = (unsigned char)(p->state[i] >> 24);
|
||||||
|
*digest++ = (unsigned char)(p->state[i] >> 16);
|
||||||
|
*digest++ = (unsigned char)(p->state[i] >> 8);
|
||||||
|
*digest++ = (unsigned char)(p->state[i]);
|
||||||
|
}
|
||||||
|
sha256_init(p);
|
||||||
|
}
|
24
examples/gguf-hash/deps/sha256/sha256.h
Normal file
24
examples/gguf-hash/deps/sha256/sha256.h
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
/* Sha256.h -- SHA-256 Hash
|
||||||
|
2010-06-11 : Igor Pavlov : Public domain */
|
||||||
|
|
||||||
|
#ifndef __CRYPTO_SHA256_H
|
||||||
|
#define __CRYPTO_SHA256_H
|
||||||
|
|
||||||
|
#include <stdlib.h>
|
||||||
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#define SHA256_DIGEST_SIZE 32
|
||||||
|
|
||||||
|
typedef struct sha256_t
|
||||||
|
{
|
||||||
|
uint32_t state[8];
|
||||||
|
uint64_t count;
|
||||||
|
unsigned char buffer[64];
|
||||||
|
} sha256_t;
|
||||||
|
|
||||||
|
void sha256_init(sha256_t *p);
|
||||||
|
void sha256_update(sha256_t *p, const unsigned char *data, size_t size);
|
||||||
|
void sha256_final(sha256_t *p, unsigned char *digest);
|
||||||
|
void sha256_hash(unsigned char *buf, const unsigned char *data, size_t size);
|
||||||
|
|
||||||
|
#endif
|
12
examples/gguf-hash/deps/xxhash/clib.json
Normal file
12
examples/gguf-hash/deps/xxhash/clib.json
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"name": "xxhash",
|
||||||
|
"version": "0.8.2",
|
||||||
|
"repo": "mofosyne/xxhash",
|
||||||
|
"description": "Extremely fast non-cryptographic hash algorithm",
|
||||||
|
"keywords": ["xxhash", "hashing"],
|
||||||
|
"license": "BSD-2-Clause",
|
||||||
|
"src": [
|
||||||
|
"xxhash.c",
|
||||||
|
"xxhash.h"
|
||||||
|
]
|
||||||
|
}
|
42
examples/gguf-hash/deps/xxhash/xxhash.c
Normal file
42
examples/gguf-hash/deps/xxhash/xxhash.c
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
/*
|
||||||
|
* xxHash - Extremely Fast Hash algorithm
|
||||||
|
* Copyright (C) 2012-2023 Yann Collet
|
||||||
|
*
|
||||||
|
* BSD 2-Clause License (https://www.opensource.org/licenses/bsd-license.php)
|
||||||
|
*
|
||||||
|
* Redistribution and use in source and binary forms, with or without
|
||||||
|
* modification, are permitted provided that the following conditions are
|
||||||
|
* met:
|
||||||
|
*
|
||||||
|
* * Redistributions of source code must retain the above copyright
|
||||||
|
* notice, this list of conditions and the following disclaimer.
|
||||||
|
* * Redistributions in binary form must reproduce the above
|
||||||
|
* copyright notice, this list of conditions and the following disclaimer
|
||||||
|
* in the documentation and/or other materials provided with the
|
||||||
|
* distribution.
|
||||||
|
*
|
||||||
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*
|
||||||
|
* You can contact the author at:
|
||||||
|
* - xxHash homepage: https://www.xxhash.com
|
||||||
|
* - xxHash source repository: https://github.com/Cyan4973/xxHash
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* xxhash.c instantiates functions defined in xxhash.h
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define XXH_STATIC_LINKING_ONLY /* access advanced declarations */
|
||||||
|
#define XXH_IMPLEMENTATION /* access definitions */
|
||||||
|
|
||||||
|
#include "xxhash.h"
|
7093
examples/gguf-hash/deps/xxhash/xxhash.h
Normal file
7093
examples/gguf-hash/deps/xxhash/xxhash.h
Normal file
File diff suppressed because it is too large
Load Diff
693
examples/gguf-hash/gguf-hash.cpp
Normal file
693
examples/gguf-hash/gguf-hash.cpp
Normal file
@ -0,0 +1,693 @@
|
|||||||
|
#include "ggml.h"
|
||||||
|
|
||||||
|
#include <cstdlib> /* abort() */
|
||||||
|
#include <cstddef>
|
||||||
|
#include <cstdio>
|
||||||
|
#include <string>
|
||||||
|
#include <stdexcept>
|
||||||
|
#include <algorithm>
|
||||||
|
#include <cstring>
|
||||||
|
|
||||||
|
#include <sstream>
|
||||||
|
#include <fstream>
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include "xxhash/xxhash.h"
|
||||||
|
#include "sha1/sha1.h"
|
||||||
|
#include "sha256/sha256.h"
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
// uuid.uuid5(uuid.NAMESPACE_URL, 'en.wikipedia.org/wiki/Llama.cpp')
|
||||||
|
#define UUID_NAMESPACE_LLAMA_CPP "ef001206-dadc-5f6d-a15f-3359e577d4e5"
|
||||||
|
#define UUID_NAMESPACE_LLAMA_CPP_HEX 0xef, 0x00, 0x12, 0x06, 0xda, 0xdc, 0x5f, 0x6d, 0xa1, 0x5f, 0x33, 0x59, 0xe5, 0x77, 0xd4, 0xe5
|
||||||
|
|
||||||
|
|
||||||
|
#define HASH_TYPE_SHA256_STR "sha256"
|
||||||
|
#define HASH_TYPE_SHA1_STR "sha1"
|
||||||
|
#define HASH_TYPE_XXH64_STR "xxh64"
|
||||||
|
#define HASH_TYPE_UUID_STR "uuid"
|
||||||
|
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
HASH_EXIT_SUCCESS = 0, // All hash has been generated or validated
|
||||||
|
HASH_EXIT_FAILURE = 1, // Generic Failure
|
||||||
|
HASH_EXIT_MISMATCH = 2, // Hash mismatched during validation
|
||||||
|
HASH_EXIT_MANIFEST_MISSING_ENTRY = 3, // Hash attempted validation but missing entry in manifest
|
||||||
|
HASH_EXIT_MANIFEST_UNKNOWN_HASH = 4, // Manifest is present, but we do not know any hash format within it
|
||||||
|
HASH_EXIT_MANIFEST_FILE_ERROR = 5 // Manifest is either missing or not a known format
|
||||||
|
} hash_exit_code_t;
|
||||||
|
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
HASH_MANIFEST_NOT_FOUND,
|
||||||
|
HASH_MANIFEST_MISMATCH,
|
||||||
|
HASH_MANIFEST_OK,
|
||||||
|
} hash_manifest_result_t;
|
||||||
|
|
||||||
|
|
||||||
|
struct hash_params {
|
||||||
|
std::string input;
|
||||||
|
bool xxh64 = false;
|
||||||
|
bool sha1 = false;
|
||||||
|
bool sha256 = false;
|
||||||
|
bool uuid = false;
|
||||||
|
|
||||||
|
bool no_layer = false;
|
||||||
|
|
||||||
|
bool manifest_is_usable = false;
|
||||||
|
std::string manifest_file;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct manifest_check_params {
|
||||||
|
bool xxh64 = false;
|
||||||
|
bool sha1 = false;
|
||||||
|
bool sha256 = false;
|
||||||
|
bool uuid = false;
|
||||||
|
};
|
||||||
|
|
||||||
|
static char const * hash_manifest_result_to_str(hash_manifest_result_t value) {
|
||||||
|
switch (value) {
|
||||||
|
case HASH_MANIFEST_NOT_FOUND: return "Not Found";
|
||||||
|
case HASH_MANIFEST_MISMATCH: return "Mismatch";
|
||||||
|
case HASH_MANIFEST_OK: return "Ok";
|
||||||
|
}
|
||||||
|
return "?";
|
||||||
|
}
|
||||||
|
|
||||||
|
static char const * hash_exit_code_to_str(hash_exit_code_t value) {
|
||||||
|
switch (value) {
|
||||||
|
case HASH_EXIT_SUCCESS: return "Success";
|
||||||
|
case HASH_EXIT_FAILURE: return "Failure";
|
||||||
|
case HASH_EXIT_MISMATCH: return "Mismatch";
|
||||||
|
case HASH_EXIT_MANIFEST_MISSING_ENTRY: return "Manifest Missing Entry";
|
||||||
|
case HASH_EXIT_MANIFEST_UNKNOWN_HASH: return "Manifest Unknown Hash";
|
||||||
|
case HASH_EXIT_MANIFEST_FILE_ERROR: return "Manifest File Error";
|
||||||
|
}
|
||||||
|
return "?";
|
||||||
|
}
|
||||||
|
|
||||||
|
static void hash_print_usage(const char * executable) {
|
||||||
|
const hash_params default_params;
|
||||||
|
printf("\n");
|
||||||
|
printf("usage: %s [options] GGUF_IN\n", executable);
|
||||||
|
printf("\n");
|
||||||
|
printf("Hash a GGUF file");
|
||||||
|
printf("\n");
|
||||||
|
printf("options:\n");
|
||||||
|
printf(" -h, --help show this help message and exit\n");
|
||||||
|
printf(" --xxh64 use xxh64 hash\n");
|
||||||
|
printf(" --sha1 use sha1 hash\n");
|
||||||
|
printf(" --sha256 use sha256 hash\n");
|
||||||
|
printf(" --all use all hash\n");
|
||||||
|
printf(" --no-layer exclude per layer hash\n");
|
||||||
|
printf(" --uuid generate UUIDv5 ID\n");
|
||||||
|
printf(" -c, --check <manifest> verify against a manifest\n");
|
||||||
|
printf("\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
static void hash_params_parse_ex(int argc, const char ** argv, hash_params & params) {
|
||||||
|
std::string arg;
|
||||||
|
bool invalid_param = false;
|
||||||
|
const std::string arg_prefix = "--";
|
||||||
|
|
||||||
|
int arg_idx = 1;
|
||||||
|
for (; arg_idx < argc && strncmp(argv[arg_idx], "--", 2) == 0; arg_idx++) {
|
||||||
|
arg = argv[arg_idx];
|
||||||
|
if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
|
||||||
|
std::replace(arg.begin(), arg.end(), '_', '-');
|
||||||
|
}
|
||||||
|
|
||||||
|
bool arg_found = false;
|
||||||
|
if (arg == "-h" || arg == "--help") {
|
||||||
|
hash_print_usage(argv[0]);
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (arg == "--xxh64") {
|
||||||
|
arg_found = true;
|
||||||
|
params.xxh64 = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (arg == "--sha1") {
|
||||||
|
arg_found = true;
|
||||||
|
params.sha1 = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (arg == "--uuid") {
|
||||||
|
arg_found = true;
|
||||||
|
params.uuid = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (arg == "--sha256") {
|
||||||
|
arg_found = true;
|
||||||
|
params.sha256 = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (arg == "--all") {
|
||||||
|
arg_found = true;
|
||||||
|
params.sha256 = true;
|
||||||
|
params.sha1 = true;
|
||||||
|
params.xxh64 = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (arg == "--no-layer") {
|
||||||
|
arg_found = true;
|
||||||
|
params.no_layer = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (arg == "-c" || arg == "--check") {
|
||||||
|
if (++arg_idx >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
arg_found = true;
|
||||||
|
params.manifest_file = argv[arg_idx];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!arg_found) {
|
||||||
|
throw std::invalid_argument("error: unknown argument: " + arg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (invalid_param) {
|
||||||
|
throw std::invalid_argument("error: invalid parameter for argument:" + arg);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (argc - arg_idx < 1) {
|
||||||
|
throw std::invalid_argument("error: bad arguments");
|
||||||
|
}
|
||||||
|
|
||||||
|
params.input = argv[arg_idx++];
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool hash_params_parse(int argc, const char ** argv, hash_params & params) {
|
||||||
|
bool result = true;
|
||||||
|
try {
|
||||||
|
hash_params_parse_ex(argc, argv, params);
|
||||||
|
}
|
||||||
|
catch (const std::invalid_argument & ex) {
|
||||||
|
fprintf(stderr, "%s\n", ex.what());
|
||||||
|
hash_print_usage(argv[0]);
|
||||||
|
exit(EXIT_FAILURE);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool manifest_type(const std::string & manifest_file, manifest_check_params & manifest_check) {
|
||||||
|
if (manifest_file.empty()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::ifstream file(manifest_file);
|
||||||
|
if (!file.is_open()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string manifest_entry_line;
|
||||||
|
while (getline(file, manifest_entry_line)) {
|
||||||
|
// hash_type_str hash_str tensor_name
|
||||||
|
// e.g. 'xxh64 f66e9cd66a4396a0 test.gguf:tensor_0'
|
||||||
|
std::istringstream line_stream(manifest_entry_line);
|
||||||
|
std::string file_hash_type;
|
||||||
|
if (line_stream >> file_hash_type) {
|
||||||
|
if (file_hash_type == HASH_TYPE_SHA256_STR) {
|
||||||
|
manifest_check.sha256 = true;
|
||||||
|
} else if (file_hash_type == HASH_TYPE_SHA1_STR) {
|
||||||
|
manifest_check.sha1 = true;
|
||||||
|
} else if (file_hash_type == HASH_TYPE_XXH64_STR) {
|
||||||
|
manifest_check.xxh64 = true;
|
||||||
|
} else if (file_hash_type == HASH_TYPE_UUID_STR) {
|
||||||
|
manifest_check.uuid = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static hash_manifest_result_t manifest_verify(const std::string& manifest_file, const std::string& hash_type_str, const std::string& hash_str, const std::string& tensor_name) {
|
||||||
|
if (manifest_file.empty()) {
|
||||||
|
return HASH_MANIFEST_NOT_FOUND;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::ifstream file(manifest_file);
|
||||||
|
if (!file.is_open()) {
|
||||||
|
return HASH_MANIFEST_NOT_FOUND;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string manifest_entry_line;
|
||||||
|
while (getline(file, manifest_entry_line)) {
|
||||||
|
std::istringstream line_stream(manifest_entry_line);
|
||||||
|
std::string file_hash_type;
|
||||||
|
std::string file_hash;
|
||||||
|
std::string file_tensor_name;
|
||||||
|
if (line_stream >> file_hash_type >> file_hash >> file_tensor_name) {
|
||||||
|
// Line parsed. Check hash validity
|
||||||
|
|
||||||
|
if (file_hash_type != hash_type_str) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (file_tensor_name != tensor_name) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (file_hash == hash_str) ? HASH_MANIFEST_OK : HASH_MANIFEST_MISMATCH;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return HASH_MANIFEST_NOT_FOUND;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void generate_uuidv5(const unsigned char sha1_digest[20], unsigned char uuid[16]) {
|
||||||
|
// Ref: https://www.rfc-editor.org/rfc/rfc9562.html#section-5.5
|
||||||
|
// Assumes that digest was processed correctly with the expected namespace
|
||||||
|
for (int i = 0; i < 16; i++) {
|
||||||
|
uuid[i] = sha1_digest[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set bits corresponding to UUID ver 5
|
||||||
|
uuid[ 6] &= ~(0xF << 4);
|
||||||
|
uuid[ 6] |= (5 << 4);
|
||||||
|
|
||||||
|
// Set bits corresponding to UUID variant 0b10XX
|
||||||
|
uuid[ 8] &= ~(0xc << 4);
|
||||||
|
uuid[ 8] |= (0x8 << 4);
|
||||||
|
}
|
||||||
|
|
||||||
|
static hash_exit_code_t gguf_hash(const hash_params & hash_params) {
|
||||||
|
const std::string & fname = hash_params.input;
|
||||||
|
struct ggml_context * ctx_data = NULL;
|
||||||
|
|
||||||
|
struct gguf_init_params params = {
|
||||||
|
/*.no_alloc = */ false,
|
||||||
|
/*.ctx = */ &ctx_data,
|
||||||
|
};
|
||||||
|
|
||||||
|
// xxh64 init
|
||||||
|
XXH64_state_t* xxh64_model_hash_state = NULL;
|
||||||
|
if (hash_params.xxh64) {
|
||||||
|
xxh64_model_hash_state = XXH64_createState();
|
||||||
|
if (xxh64_model_hash_state==NULL) {
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
|
||||||
|
XXH64_hash_t const seed = 0;
|
||||||
|
if (XXH64_reset(xxh64_model_hash_state, seed) == XXH_ERROR) {
|
||||||
|
abort();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sha1 init
|
||||||
|
SHA1_CTX sha1_model_hash_ctx;
|
||||||
|
if (hash_params.sha1) {
|
||||||
|
SHA1Init(&sha1_model_hash_ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
// sha256 init
|
||||||
|
sha256_t sha256_model_hash_ctx;
|
||||||
|
if (hash_params.sha256) {
|
||||||
|
sha256_init(&sha256_model_hash_ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
// sha1 for uuid init
|
||||||
|
SHA1_CTX sha1_for_uuid_ctx;
|
||||||
|
if (hash_params.uuid) {
|
||||||
|
unsigned char const uuidv5_namespace[] = {UUID_NAMESPACE_LLAMA_CPP_HEX};
|
||||||
|
SHA1Init(&sha1_for_uuid_ctx);
|
||||||
|
SHA1Update( &sha1_for_uuid_ctx, (unsigned char const *)uuidv5_namespace, sizeof(uuidv5_namespace));
|
||||||
|
}
|
||||||
|
|
||||||
|
struct gguf_context * ctx = gguf_init_from_file(fname.c_str(), params);
|
||||||
|
const int n_tensors = gguf_get_n_tensors(ctx);
|
||||||
|
bool tensor_layer_in_manifest = false;
|
||||||
|
bool model_in_manifest = false;
|
||||||
|
bool tensor_layer_has_mismatch = false;
|
||||||
|
bool model_has_mismatch = false;
|
||||||
|
for (int i = 0; i < n_tensors; ++i) {
|
||||||
|
const char * name = gguf_get_tensor_name(ctx, i);
|
||||||
|
struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name);
|
||||||
|
auto n_bytes = ggml_nbytes(cur);
|
||||||
|
auto *raw_data = cur->data;
|
||||||
|
const std::string tensor_layer_name = fname + ":" + name;
|
||||||
|
|
||||||
|
if (hash_params.xxh64) {
|
||||||
|
|
||||||
|
if (!hash_params.no_layer) {
|
||||||
|
// Per Layer Hash
|
||||||
|
XXH64_hash_t hash = XXH64(raw_data, n_bytes, 0);
|
||||||
|
|
||||||
|
char hex_result[17];
|
||||||
|
for (int offset = 0; offset < 8; offset++) {
|
||||||
|
unsigned int shift_bits_by = (8 * (8 - offset - 1));
|
||||||
|
sprintf( ( hex_result + (2*offset)), "%02x", (unsigned char) (hash >> shift_bits_by)&0xff);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hash_params.manifest_is_usable) {
|
||||||
|
hash_manifest_result_t verify_result = manifest_verify(hash_params.manifest_file, HASH_TYPE_XXH64_STR, hex_result, tensor_layer_name);
|
||||||
|
|
||||||
|
switch (verify_result) {
|
||||||
|
case HASH_MANIFEST_NOT_FOUND:
|
||||||
|
break;
|
||||||
|
case HASH_MANIFEST_MISMATCH:
|
||||||
|
tensor_layer_in_manifest = true;
|
||||||
|
tensor_layer_has_mismatch = true;
|
||||||
|
break;
|
||||||
|
case HASH_MANIFEST_OK:
|
||||||
|
tensor_layer_in_manifest = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("%-8s %-s %s - %s\n", HASH_TYPE_XXH64_STR, hex_result, tensor_layer_name.c_str(), hash_manifest_result_to_str(verify_result));
|
||||||
|
} else {
|
||||||
|
printf("%-8s %-s %s\n", HASH_TYPE_XXH64_STR, hex_result, tensor_layer_name.c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Overall Model Hash
|
||||||
|
if (XXH64_update(xxh64_model_hash_state, raw_data, n_bytes) == XXH_ERROR) abort();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hash_params.sha1) {
|
||||||
|
|
||||||
|
if (!hash_params.no_layer) {
|
||||||
|
// Per Layer Hash
|
||||||
|
char result[21]; // sha1 outputs 20 bytes
|
||||||
|
SHA1( result, (const char *)raw_data, n_bytes);
|
||||||
|
|
||||||
|
char hex_result[41] = {0};
|
||||||
|
for (int offset = 0; offset < 20; offset++) {
|
||||||
|
sprintf( ( hex_result + (2*offset)), "%02x", result[offset]&0xff);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hash_params.manifest_is_usable) {
|
||||||
|
hash_manifest_result_t verify_result = manifest_verify(hash_params.manifest_file, HASH_TYPE_SHA1_STR, hex_result, tensor_layer_name);
|
||||||
|
|
||||||
|
switch (verify_result) {
|
||||||
|
case HASH_MANIFEST_NOT_FOUND:
|
||||||
|
break;
|
||||||
|
case HASH_MANIFEST_MISMATCH:
|
||||||
|
tensor_layer_in_manifest = true;
|
||||||
|
tensor_layer_has_mismatch = true;
|
||||||
|
break;
|
||||||
|
case HASH_MANIFEST_OK:
|
||||||
|
tensor_layer_in_manifest = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("%-8s %-s %s - %s\n", HASH_TYPE_SHA1_STR, hex_result, tensor_layer_name.c_str(), hash_manifest_result_to_str(verify_result));
|
||||||
|
} else {
|
||||||
|
printf("%-8s %-s %s\n", HASH_TYPE_SHA1_STR, hex_result, tensor_layer_name.c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Overall Model Hash
|
||||||
|
SHA1Update( &sha1_model_hash_ctx, (unsigned char const *)raw_data, n_bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hash_params.sha256) {
|
||||||
|
|
||||||
|
if (!hash_params.no_layer) {
|
||||||
|
// Per Layer Hash
|
||||||
|
unsigned char result[SHA256_DIGEST_SIZE]; // sha256 outputs 32 bytes
|
||||||
|
sha256_hash((unsigned char*) result, (const unsigned char *)raw_data, n_bytes);
|
||||||
|
|
||||||
|
char hex_result[SHA256_DIGEST_SIZE * 2 + 1] = {0};
|
||||||
|
for (int offset = 0; offset < SHA256_DIGEST_SIZE; offset++) {
|
||||||
|
sprintf( ( hex_result + (2*offset)), "%02x", result[offset]&0xff);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hash_params.manifest_is_usable) {
|
||||||
|
hash_manifest_result_t verify_result = manifest_verify(hash_params.manifest_file, HASH_TYPE_SHA256_STR, hex_result, tensor_layer_name);
|
||||||
|
|
||||||
|
switch (verify_result) {
|
||||||
|
case HASH_MANIFEST_NOT_FOUND:
|
||||||
|
break;
|
||||||
|
case HASH_MANIFEST_MISMATCH:
|
||||||
|
tensor_layer_in_manifest = true;
|
||||||
|
tensor_layer_has_mismatch = true;
|
||||||
|
break;
|
||||||
|
case HASH_MANIFEST_OK:
|
||||||
|
tensor_layer_in_manifest = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("%-8s %-s %s - %s\n", HASH_TYPE_SHA256_STR, hex_result, tensor_layer_name.c_str(), hash_manifest_result_to_str(verify_result));
|
||||||
|
} else {
|
||||||
|
printf("%-8s %-s %s\n", HASH_TYPE_SHA256_STR, hex_result, tensor_layer_name.c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Overall Model Hash
|
||||||
|
sha256_update( &sha256_model_hash_ctx, (unsigned char const *)raw_data, n_bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hash_params.uuid) {
|
||||||
|
SHA1Update( &sha1_for_uuid_ctx, (unsigned char const *)raw_data, n_bytes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hash_params.xxh64) {
|
||||||
|
XXH64_hash_t const hash = XXH64_digest(xxh64_model_hash_state);
|
||||||
|
|
||||||
|
char hex_result[17];
|
||||||
|
for (int offset = 0; offset < 8; offset++) {
|
||||||
|
unsigned int shift_bits_by = (8 * (8 - offset - 1));
|
||||||
|
sprintf( ( hex_result + (2*offset)), "%02x", (unsigned char) (hash >> shift_bits_by)&0xff);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hash_params.manifest_is_usable) {
|
||||||
|
hash_manifest_result_t verify_result = manifest_verify(hash_params.manifest_file, HASH_TYPE_XXH64_STR, hex_result, fname);
|
||||||
|
|
||||||
|
switch (verify_result) {
|
||||||
|
case HASH_MANIFEST_NOT_FOUND:
|
||||||
|
break;
|
||||||
|
case HASH_MANIFEST_MISMATCH:
|
||||||
|
model_in_manifest = true;
|
||||||
|
model_has_mismatch = true;
|
||||||
|
break;
|
||||||
|
case HASH_MANIFEST_OK:
|
||||||
|
model_in_manifest = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("%-8s %-s %s - %s\n", HASH_TYPE_XXH64_STR, hex_result, fname.c_str(), hash_manifest_result_to_str(verify_result));
|
||||||
|
} else {
|
||||||
|
printf("%-8s %-s %s\n", HASH_TYPE_XXH64_STR, hex_result, fname.c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hash_params.sha1) {
|
||||||
|
unsigned char result[21];
|
||||||
|
SHA1Final(result, &sha1_model_hash_ctx);
|
||||||
|
|
||||||
|
char hex_result[41];
|
||||||
|
for (int offset = 0; offset < 20; offset++) {
|
||||||
|
sprintf( ( hex_result + (2*offset)), "%02x", result[offset]&0xff);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hash_params.manifest_is_usable) {
|
||||||
|
hash_manifest_result_t verify_result = manifest_verify(hash_params.manifest_file, HASH_TYPE_SHA1_STR, hex_result, fname);
|
||||||
|
|
||||||
|
switch (verify_result) {
|
||||||
|
case HASH_MANIFEST_NOT_FOUND:
|
||||||
|
break;
|
||||||
|
case HASH_MANIFEST_MISMATCH:
|
||||||
|
model_in_manifest = true;
|
||||||
|
model_has_mismatch = true;
|
||||||
|
break;
|
||||||
|
case HASH_MANIFEST_OK:
|
||||||
|
model_in_manifest = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("%-8s %-s %s - %s\n", HASH_TYPE_SHA1_STR, hex_result, fname.c_str(), hash_manifest_result_to_str(verify_result));
|
||||||
|
} else {
|
||||||
|
printf("%-8s %-s %s\n", HASH_TYPE_SHA1_STR, hex_result, fname.c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hash_params.sha256) {
|
||||||
|
unsigned char result[SHA256_DIGEST_SIZE]; // sha256 outputs 32 bytes
|
||||||
|
sha256_final( &sha256_model_hash_ctx, result);
|
||||||
|
|
||||||
|
char hex_result[SHA256_DIGEST_SIZE * 2 + 1] = {0};
|
||||||
|
for (int offset = 0; offset < SHA256_DIGEST_SIZE; offset++) {
|
||||||
|
sprintf( ( hex_result + (2*offset)), "%02x", result[offset]&0xff);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hash_params.manifest_is_usable) {
|
||||||
|
hash_manifest_result_t verify_result = manifest_verify(hash_params.manifest_file, HASH_TYPE_SHA256_STR, hex_result, fname);
|
||||||
|
|
||||||
|
switch (verify_result) {
|
||||||
|
case HASH_MANIFEST_NOT_FOUND:
|
||||||
|
break;
|
||||||
|
case HASH_MANIFEST_MISMATCH:
|
||||||
|
model_in_manifest = true;
|
||||||
|
model_has_mismatch = true;
|
||||||
|
break;
|
||||||
|
case HASH_MANIFEST_OK:
|
||||||
|
model_in_manifest = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("%-8s %-s %s - %s\n", HASH_TYPE_SHA256_STR, hex_result, fname.c_str(), hash_manifest_result_to_str(verify_result));
|
||||||
|
} else {
|
||||||
|
printf("%-8s %-s %s\n", HASH_TYPE_SHA256_STR, hex_result, fname.c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hash_params.uuid) {
|
||||||
|
unsigned char result[21];
|
||||||
|
SHA1Final(result, &sha1_for_uuid_ctx);
|
||||||
|
|
||||||
|
unsigned char uuid[16];
|
||||||
|
generate_uuidv5(result, uuid);
|
||||||
|
|
||||||
|
char string_buffer[37] = {0};
|
||||||
|
sprintf(string_buffer, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x",
|
||||||
|
uuid[0], uuid[1], uuid[2], uuid[3],
|
||||||
|
uuid[4], uuid[5], uuid[6], uuid[7],
|
||||||
|
uuid[8], uuid[9], uuid[10], uuid[11],
|
||||||
|
uuid[12], uuid[13], uuid[14], uuid[15]);
|
||||||
|
|
||||||
|
if (hash_params.manifest_is_usable) {
|
||||||
|
hash_manifest_result_t verify_result = manifest_verify(hash_params.manifest_file, HASH_TYPE_SHA256_STR, string_buffer, fname);
|
||||||
|
|
||||||
|
switch (verify_result) {
|
||||||
|
case HASH_MANIFEST_NOT_FOUND:
|
||||||
|
break;
|
||||||
|
case HASH_MANIFEST_MISMATCH:
|
||||||
|
model_in_manifest = true;
|
||||||
|
model_has_mismatch = true;
|
||||||
|
break;
|
||||||
|
case HASH_MANIFEST_OK:
|
||||||
|
model_in_manifest = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("%-8s %-s %s - %s\n", HASH_TYPE_UUID_STR, string_buffer, fname.c_str(), hash_manifest_result_to_str(verify_result));
|
||||||
|
} else {
|
||||||
|
printf("%-8s %-s %s\n", HASH_TYPE_UUID_STR, string_buffer, fname.c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
ggml_free(ctx_data);
|
||||||
|
gguf_free(ctx);
|
||||||
|
|
||||||
|
|
||||||
|
if (hash_params.manifest_is_usable) {
|
||||||
|
// In hash verification mode
|
||||||
|
|
||||||
|
if (!model_in_manifest) {
|
||||||
|
// model missing in manifest?
|
||||||
|
|
||||||
|
// Check tensor layer...
|
||||||
|
if (!tensor_layer_in_manifest) {
|
||||||
|
// Still missing? Maybe we are reading the wrong manifest.
|
||||||
|
return HASH_EXIT_MANIFEST_MISSING_ENTRY;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tensor_layer_has_mismatch) {
|
||||||
|
// Per tensor check found error
|
||||||
|
return HASH_EXIT_FAILURE;
|
||||||
|
}
|
||||||
|
|
||||||
|
// All per tensor layer checks passed? Sounds good enough.
|
||||||
|
return HASH_EXIT_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Overall model check passed, but let's check per layer just in case
|
||||||
|
// If missing, we don't care too much as the overall model checked
|
||||||
|
if (tensor_layer_in_manifest && tensor_layer_has_mismatch) {
|
||||||
|
return HASH_EXIT_FAILURE;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (model_has_mismatch) {
|
||||||
|
// model has failed hash somewhere in the model
|
||||||
|
return HASH_EXIT_FAILURE;
|
||||||
|
}
|
||||||
|
|
||||||
|
// All checks appears to be fine
|
||||||
|
return HASH_EXIT_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
// In hash generation mode
|
||||||
|
return HASH_EXIT_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, const char ** argv) {
|
||||||
|
hash_params params;
|
||||||
|
manifest_check_params manifest_check;
|
||||||
|
hash_params_parse(argc, argv, params);
|
||||||
|
|
||||||
|
if (!params.manifest_file.empty()) {
|
||||||
|
if (!manifest_type(params.manifest_file, manifest_check)) {
|
||||||
|
printf("ERROR cannot open manifest %s", params.manifest_file.c_str());
|
||||||
|
return HASH_EXIT_MANIFEST_FILE_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!manifest_check.sha256 && !manifest_check.sha1 && !manifest_check.xxh64 && !manifest_check.uuid) {
|
||||||
|
printf("ERROR manifest does not have any known hash format in %s", params.manifest_file.c_str());
|
||||||
|
return HASH_EXIT_MANIFEST_UNKNOWN_HASH;
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("manifest %s", params.manifest_file.c_str());
|
||||||
|
|
||||||
|
if (manifest_check.sha256) {
|
||||||
|
printf(" sha256");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (manifest_check.sha1) {
|
||||||
|
printf(" sha1");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (manifest_check.xxh64) {
|
||||||
|
printf(" xxh64");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (manifest_check.uuid) {
|
||||||
|
printf(" uuid");
|
||||||
|
}
|
||||||
|
|
||||||
|
printf("\n");
|
||||||
|
|
||||||
|
// Autoselect the highest security hash if manifest is provided but
|
||||||
|
// the user has not specifically defined the hash they care about
|
||||||
|
if (!params.xxh64 && !params.sha1 && !params.uuid && !params.sha256) {
|
||||||
|
// User has not selected a specific value, pick most secure hash
|
||||||
|
if (manifest_check.sha256) {
|
||||||
|
params.sha256 = true;
|
||||||
|
} else if (manifest_check.sha1) {
|
||||||
|
params.sha1 = true;
|
||||||
|
} else if (manifest_check.xxh64) {
|
||||||
|
params.xxh64 = true;
|
||||||
|
} else if (manifest_check.uuid) {
|
||||||
|
params.uuid = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
params.manifest_is_usable = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// By default if no swich argument provided, assume xxh64
|
||||||
|
if (!params.xxh64 && !params.sha1 && !params.uuid && !params.sha256) {
|
||||||
|
params.xxh64 = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
hash_exit_code_t exit_code = gguf_hash(params);
|
||||||
|
|
||||||
|
if (params.manifest_is_usable) {
|
||||||
|
printf("\nVerification results for %s - %s\n", params.manifest_file.c_str(), hash_exit_code_to_str(exit_code));
|
||||||
|
}
|
||||||
|
|
||||||
|
return exit_code;
|
||||||
|
}
|
@ -659,4 +659,3 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
# Usage:
|
# Usage:
|
||||||
#! ./llama-server -m some-model.gguf &
|
#! ./llama-server -m some-model.gguf &
|
||||||
#! pip install pydantic
|
#! pip install pydantic
|
||||||
#! python json-schema-pydantic-example.py
|
#! python json_schema_pydantic_example.py
|
||||||
|
|
||||||
from pydantic import BaseModel, Extra, TypeAdapter
|
from pydantic import BaseModel, Extra, TypeAdapter
|
||||||
from annotated_types import MinLen
|
from annotated_types import MinLen
|
@ -322,7 +322,7 @@ actor LlamaContext {
|
|||||||
defer {
|
defer {
|
||||||
result.deallocate()
|
result.deallocate()
|
||||||
}
|
}
|
||||||
let nTokens = llama_token_to_piece(model, token, result, 8, false)
|
let nTokens = llama_token_to_piece(model, token, result, 8, 0, false)
|
||||||
|
|
||||||
if nTokens < 0 {
|
if nTokens < 0 {
|
||||||
let newResult = UnsafeMutablePointer<Int8>.allocate(capacity: Int(-nTokens))
|
let newResult = UnsafeMutablePointer<Int8>.allocate(capacity: Int(-nTokens))
|
||||||
@ -330,7 +330,7 @@ actor LlamaContext {
|
|||||||
defer {
|
defer {
|
||||||
newResult.deallocate()
|
newResult.deallocate()
|
||||||
}
|
}
|
||||||
let nNewTokens = llama_token_to_piece(model, token, newResult, -nTokens, false)
|
let nNewTokens = llama_token_to_piece(model, token, newResult, -nTokens, 0, false)
|
||||||
let bufferPointer = UnsafeBufferPointer(start: newResult, count: Int(nNewTokens))
|
let bufferPointer = UnsafeBufferPointer(start: newResult, count: Int(nNewTokens))
|
||||||
return Array(bufferPointer)
|
return Array(bufferPointer)
|
||||||
} else {
|
} else {
|
||||||
|
@ -30,16 +30,16 @@ git clone https://huggingface.co/mtgv/MobileVLM-1.7B
|
|||||||
git clone https://huggingface.co/openai/clip-vit-large-patch14-336
|
git clone https://huggingface.co/openai/clip-vit-large-patch14-336
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Use `llava-surgery.py` to split the LLaVA model to LLaMA and multimodel projector constituents:
|
2. Use `llava_surgery.py` to split the LLaVA model to LLaMA and multimodel projector constituents:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
python ./examples/llava/llava-surgery.py -m path/to/MobileVLM-1.7B
|
python ./examples/llava/llava_surgery.py -m path/to/MobileVLM-1.7B
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Use `convert-image-encoder-to-gguf.py` with `--projector-type ldp` (for **V2** please use `--projector-type ldpv2`) to convert the LLaVA image encoder to GGUF:
|
3. Use `convert_image_encoder_to_gguf.py` with `--projector-type ldp` (for **V2** please use `--projector-type ldpv2`) to convert the LLaVA image encoder to GGUF:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
python ./examples/llava/convert-image-encoder-to-gguf \
|
python ./examples/llava/convert_image_encoder_to_gguf \
|
||||||
-m path/to/clip-vit-large-patch14-336 \
|
-m path/to/clip-vit-large-patch14-336 \
|
||||||
--llava-projector path/to/MobileVLM-1.7B/llava.projector \
|
--llava-projector path/to/MobileVLM-1.7B/llava.projector \
|
||||||
--output-dir path/to/MobileVLM-1.7B \
|
--output-dir path/to/MobileVLM-1.7B \
|
||||||
@ -47,17 +47,17 @@ python ./examples/llava/convert-image-encoder-to-gguf \
|
|||||||
```
|
```
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
python ./examples/llava/convert-image-encoder-to-gguf \
|
python ./examples/llava/convert_image_encoder_to_gguf \
|
||||||
-m path/to/clip-vit-large-patch14-336 \
|
-m path/to/clip-vit-large-patch14-336 \
|
||||||
--llava-projector path/to/MobileVLM-1.7B_V2/llava.projector \
|
--llava-projector path/to/MobileVLM-1.7B_V2/llava.projector \
|
||||||
--output-dir path/to/MobileVLM-1.7B_V2 \
|
--output-dir path/to/MobileVLM-1.7B_V2 \
|
||||||
--projector-type ldpv2
|
--projector-type ldpv2
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Use `examples/convert-legacy-llama.py` to convert the LLaMA part of LLaVA to GGUF:
|
4. Use `examples/convert_legacy_llama.py` to convert the LLaMA part of LLaVA to GGUF:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
python ./examples/convert-legacy-llama.py path/to/MobileVLM-1.7B
|
python ./examples/convert_legacy_llama.py path/to/MobileVLM-1.7B
|
||||||
```
|
```
|
||||||
|
|
||||||
5. Use `quantize` to convert LLaMA part's DataType from `fp16` to `q4_k`
|
5. Use `quantize` to convert LLaMA part's DataType from `fp16` to `q4_k`
|
||||||
|
@ -38,22 +38,22 @@ git clone https://huggingface.co/openai/clip-vit-large-patch14-336
|
|||||||
pip install -r examples/llava/requirements.txt
|
pip install -r examples/llava/requirements.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Use `llava-surgery.py` to split the LLaVA model to LLaMA and multimodel projector constituents:
|
3. Use `llava_surgery.py` to split the LLaVA model to LLaMA and multimodel projector constituents:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
python ./examples/llava/llava-surgery.py -m ../llava-v1.5-7b
|
python ./examples/llava/llava_surgery.py -m ../llava-v1.5-7b
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Use `convert-image-encoder-to-gguf.py` to convert the LLaVA image encoder to GGUF:
|
4. Use `convert_image_encoder_to_gguf.py` to convert the LLaVA image encoder to GGUF:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
python ./examples/llava/convert-image-encoder-to-gguf.py -m ../clip-vit-large-patch14-336 --llava-projector ../llava-v1.5-7b/llava.projector --output-dir ../llava-v1.5-7b
|
python ./examples/llava/convert_image_encoder_to_gguf.py -m ../clip-vit-large-patch14-336 --llava-projector ../llava-v1.5-7b/llava.projector --output-dir ../llava-v1.5-7b
|
||||||
```
|
```
|
||||||
|
|
||||||
5. Use `examples/convert-legacy-llama.py` to convert the LLaMA part of LLaVA to GGUF:
|
5. Use `examples/convert_legacy_llama.py` to convert the LLaMA part of LLaVA to GGUF:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
python ./examples/convert-legacy-llama.py ../llava-v1.5-7b --skip-unknown
|
python ./examples/convert_legacy_llama.py ../llava-v1.5-7b --skip-unknown
|
||||||
```
|
```
|
||||||
|
|
||||||
Now both the LLaMA part and the image encoder are in the `llava-v1.5-7b` directory.
|
Now both the LLaMA part and the image encoder are in the `llava-v1.5-7b` directory.
|
||||||
@ -70,9 +70,9 @@ git clone https://huggingface.co/liuhaotian/llava-v1.6-vicuna-7b
|
|||||||
pip install -r examples/llava/requirements.txt
|
pip install -r examples/llava/requirements.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
3) Use `llava-surgery-v2.py` which also supports llava-1.5 variants pytorch as well as safetensor models:
|
3) Use `llava_surgery_v2.py` which also supports llava-1.5 variants pytorch as well as safetensor models:
|
||||||
```console
|
```console
|
||||||
python examples/llava/llava-surgery-v2.py -C -m ../llava-v1.6-vicuna-7b/
|
python examples/llava/llava_surgery_v2.py -C -m ../llava-v1.6-vicuna-7b/
|
||||||
```
|
```
|
||||||
- you will find a llava.projector and a llava.clip file in your model directory
|
- you will find a llava.projector and a llava.clip file in your model directory
|
||||||
|
|
||||||
@ -86,13 +86,13 @@ curl -s -q https://huggingface.co/cmp-nct/llava-1.6-gguf/raw/main/config_vit.jso
|
|||||||
|
|
||||||
5) Create the visual gguf model:
|
5) Create the visual gguf model:
|
||||||
```console
|
```console
|
||||||
python ./examples/llava/convert-image-encoder-to-gguf.py -m vit --llava-projector vit/llava.projector --output-dir vit --clip-model-is-vision
|
python ./examples/llava/convert_image_encoder_to_gguf.py -m vit --llava-projector vit/llava.projector --output-dir vit --clip-model-is-vision
|
||||||
```
|
```
|
||||||
- This is similar to llava-1.5, the difference is that we tell the encoder that we are working with the pure vision model part of CLIP
|
- This is similar to llava-1.5, the difference is that we tell the encoder that we are working with the pure vision model part of CLIP
|
||||||
|
|
||||||
6) Then convert the model to gguf format:
|
6) Then convert the model to gguf format:
|
||||||
```console
|
```console
|
||||||
python ./examples/convert-legacy-llama.py ../llava-v1.6-vicuna-7b/ --skip-unknown
|
python ./examples/convert_legacy_llama.py ../llava-v1.6-vicuna-7b/ --skip-unknown
|
||||||
```
|
```
|
||||||
|
|
||||||
7) And finally we can run the llava cli using the 1.6 model version:
|
7) And finally we can run the llava cli using the 1.6 model version:
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
-r ../../requirements/requirements-convert-legacy-llama.txt
|
-r ../../requirements/requirements-convert_legacy_llama.txt
|
||||||
|
--extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
pillow~=10.2.0
|
pillow~=10.2.0
|
||||||
torch~=2.1.1
|
torch~=2.2.1
|
||||||
|
@ -10,4 +10,3 @@ More info:
|
|||||||
|
|
||||||
https://github.com/ggerganov/llama.cpp/pull/4484
|
https://github.com/ggerganov/llama.cpp/pull/4484
|
||||||
https://github.com/ggerganov/llama.cpp/issues/4226
|
https://github.com/ggerganov/llama.cpp/issues/4226
|
||||||
|
|
||||||
|
1
examples/main-cmake-pkg/.gitignore
vendored
1
examples/main-cmake-pkg/.gitignore
vendored
@ -48,4 +48,3 @@
|
|||||||
build*/
|
build*/
|
||||||
out/
|
out/
|
||||||
tmp/
|
tmp/
|
||||||
|
|
||||||
|
@ -30,4 +30,3 @@ target_include_directories(${TARGET} PRIVATE ${_common_path})
|
|||||||
install(TARGETS ${TARGET} RUNTIME)
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||||
|
|
||||||
|
@ -37,7 +37,8 @@ static gpt_params * g_params;
|
|||||||
static std::vector<llama_token> * g_input_tokens;
|
static std::vector<llama_token> * g_input_tokens;
|
||||||
static std::ostringstream * g_output_ss;
|
static std::ostringstream * g_output_ss;
|
||||||
static std::vector<llama_token> * g_output_tokens;
|
static std::vector<llama_token> * g_output_tokens;
|
||||||
static bool is_interacting = false;
|
static bool is_interacting = false;
|
||||||
|
static bool need_insert_eot = false;
|
||||||
|
|
||||||
static bool file_exists(const std::string & path) {
|
static bool file_exists(const std::string & path) {
|
||||||
std::ifstream f(path.c_str());
|
std::ifstream f(path.c_str());
|
||||||
@ -99,7 +100,8 @@ static void write_logfile(
|
|||||||
static void sigint_handler(int signo) {
|
static void sigint_handler(int signo) {
|
||||||
if (signo == SIGINT) {
|
if (signo == SIGINT) {
|
||||||
if (!is_interacting && g_params->interactive) {
|
if (!is_interacting && g_params->interactive) {
|
||||||
is_interacting = true;
|
is_interacting = true;
|
||||||
|
need_insert_eot = true;
|
||||||
} else {
|
} else {
|
||||||
console::cleanup();
|
console::cleanup();
|
||||||
printf("\n");
|
printf("\n");
|
||||||
@ -224,7 +226,14 @@ int main(int argc, char ** argv) {
|
|||||||
__func__, n_ctx_train, n_ctx);
|
__func__, n_ctx_train, n_ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_TEE("%s: chat template example: %s\n", __func__, llama_chat_format_example(model, params.chat_template).c_str());
|
// print chat template example in conversation mode
|
||||||
|
if (params.conversation) {
|
||||||
|
if (params.enable_chat_template) {
|
||||||
|
LOG_TEE("%s: chat template example: %s\n", __func__, llama_chat_format_example(model, params.chat_template).c_str());
|
||||||
|
} else {
|
||||||
|
LOG_TEE("%s: in-suffix/prefix is specified, chat template will be disabled\n", __func__);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// print system information
|
// print system information
|
||||||
{
|
{
|
||||||
@ -255,13 +264,15 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const bool add_bos = llama_should_add_bos_token(model);
|
const bool add_bos = llama_should_add_bos_token(model);
|
||||||
GGML_ASSERT(llama_add_eos_token(model) != 1);
|
if (!llama_model_has_encoder(model)) {
|
||||||
|
GGML_ASSERT(llama_add_eos_token(model) != 1);
|
||||||
|
}
|
||||||
LOG("add_bos: %d\n", add_bos);
|
LOG("add_bos: %d\n", add_bos);
|
||||||
|
|
||||||
std::vector<llama_token> embd_inp;
|
std::vector<llama_token> embd_inp;
|
||||||
|
|
||||||
{
|
{
|
||||||
auto prompt = params.conversation
|
auto prompt = (params.conversation && params.enable_chat_template && !params.prompt.empty())
|
||||||
? chat_add_and_format(model, chat_msgs, "system", params.prompt) // format the system prompt in conversation mode
|
? chat_add_and_format(model, chat_msgs, "system", params.prompt) // format the system prompt in conversation mode
|
||||||
: params.prompt;
|
: params.prompt;
|
||||||
if (params.interactive_first || !params.prompt.empty() || session_tokens.empty()) {
|
if (params.interactive_first || !params.prompt.empty() || session_tokens.empty()) {
|
||||||
@ -517,6 +528,24 @@ int main(int argc, char ** argv) {
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (llama_model_has_encoder(model)) {
|
||||||
|
int enc_input_size = embd_inp.size();
|
||||||
|
llama_token * enc_input_buf = embd_inp.data();
|
||||||
|
|
||||||
|
if (llama_encode(ctx, llama_batch_get_one(enc_input_buf, enc_input_size, 0, 0))) {
|
||||||
|
LOG_TEE("%s : failed to eval\n", __func__);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
|
||||||
|
if (decoder_start_token_id == -1) {
|
||||||
|
decoder_start_token_id = llama_token_bos(model);
|
||||||
|
}
|
||||||
|
|
||||||
|
embd_inp.clear();
|
||||||
|
embd_inp.push_back(decoder_start_token_id);
|
||||||
|
}
|
||||||
|
|
||||||
while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
|
while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
|
||||||
// predict
|
// predict
|
||||||
if (!embd.empty()) {
|
if (!embd.empty()) {
|
||||||
@ -810,7 +839,9 @@ int main(int argc, char ** argv) {
|
|||||||
is_antiprompt = true;
|
is_antiprompt = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
chat_add_and_format(model, chat_msgs, "assistant", assistant_ss.str());
|
if (params.enable_chat_template) {
|
||||||
|
chat_add_and_format(model, chat_msgs, "assistant", assistant_ss.str());
|
||||||
|
}
|
||||||
is_interacting = true;
|
is_interacting = true;
|
||||||
printf("\n");
|
printf("\n");
|
||||||
}
|
}
|
||||||
@ -872,16 +903,24 @@ int main(int argc, char ** argv) {
|
|||||||
string_process_escapes(buffer);
|
string_process_escapes(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string user_inp = params.conversation
|
bool format_chat = params.conversation && params.enable_chat_template;
|
||||||
|
std::string user_inp = format_chat
|
||||||
? chat_add_and_format(model, chat_msgs, "user", std::move(buffer))
|
? chat_add_and_format(model, chat_msgs, "user", std::move(buffer))
|
||||||
: std::move(buffer);
|
: std::move(buffer);
|
||||||
// TODO: one inconvenient of current chat template implementation is that we can't distinguish between user input and special tokens (prefix/postfix)
|
// TODO: one inconvenient of current chat template implementation is that we can't distinguish between user input and special tokens (prefix/postfix)
|
||||||
const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
|
const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
|
||||||
const auto line_inp = ::llama_tokenize(ctx, user_inp, false, params.conversation);
|
const auto line_inp = ::llama_tokenize(ctx, user_inp, false, format_chat);
|
||||||
const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
|
const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
|
||||||
|
|
||||||
LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
|
LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
|
||||||
|
|
||||||
|
// if user stop generation mid-way, we must add EOT to finish model's last response
|
||||||
|
if (need_insert_eot && format_chat) {
|
||||||
|
llama_token eot = llama_token_eot(model);
|
||||||
|
embd_inp.push_back(eot == -1 ? llama_token_eos(model) : eot);
|
||||||
|
need_insert_eot = false;
|
||||||
|
}
|
||||||
|
|
||||||
embd_inp.insert(embd_inp.end(), line_pfx.begin(), line_pfx.end());
|
embd_inp.insert(embd_inp.end(), line_pfx.begin(), line_pfx.end());
|
||||||
embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
|
embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
|
||||||
embd_inp.insert(embd_inp.end(), line_sfx.begin(), line_sfx.end());
|
embd_inp.insert(embd_inp.end(), line_sfx.begin(), line_sfx.end());
|
||||||
|
@ -1,5 +1,8 @@
|
|||||||
# llama.cpp/example/passkey
|
# llama.cpp/example/passkey
|
||||||
|
|
||||||
|
A passkey retrieval task is an evaluation method used to measure a language
|
||||||
|
models ability to recall information from long contexts.
|
||||||
|
|
||||||
See the following PRs for more info:
|
See the following PRs for more info:
|
||||||
|
|
||||||
- https://github.com/ggerganov/llama.cpp/pull/3856
|
- https://github.com/ggerganov/llama.cpp/pull/3856
|
||||||
|
@ -1991,6 +1991,12 @@ int main(int argc, char ** argv) {
|
|||||||
params.n_batch = std::min(params.n_batch, n_kv);
|
params.n_batch = std::min(params.n_batch, n_kv);
|
||||||
} else {
|
} else {
|
||||||
params.n_batch = std::min(params.n_batch, params.n_ctx);
|
params.n_batch = std::min(params.n_batch, params.n_ctx);
|
||||||
|
if (params.kl_divergence) {
|
||||||
|
params.n_parallel = 1;
|
||||||
|
} else {
|
||||||
|
// ensure there's at least enough seq_ids for HellaSwag
|
||||||
|
params.n_parallel = std::max(4, params.n_parallel);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.ppl_stride > 0) {
|
if (params.ppl_stride > 0) {
|
||||||
@ -2015,9 +2021,6 @@ int main(int argc, char ** argv) {
|
|||||||
llama_model * model;
|
llama_model * model;
|
||||||
llama_context * ctx;
|
llama_context * ctx;
|
||||||
|
|
||||||
// ensure there's at least enough seq_ids for HellaSwag
|
|
||||||
params.n_parallel = std::max(4, params.n_parallel);
|
|
||||||
|
|
||||||
// load the model and apply lora adapter, if any
|
// load the model and apply lora adapter, if any
|
||||||
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
||||||
if (model == NULL) {
|
if (model == NULL) {
|
||||||
|
@ -4,7 +4,89 @@ You can also use the [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-
|
|||||||
|
|
||||||
Note: It is synced from llama.cpp `main` every 6 hours.
|
Note: It is synced from llama.cpp `main` every 6 hours.
|
||||||
|
|
||||||
## Llama 2 7B
|
Example usage:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# obtain the official LLaMA model weights and place them in ./models
|
||||||
|
ls ./models
|
||||||
|
llama-2-7b tokenizer_checklist.chk tokenizer.model
|
||||||
|
# [Optional] for models using BPE tokenizers
|
||||||
|
ls ./models
|
||||||
|
<folder containing weights and tokenizer json> vocab.json
|
||||||
|
# [Optional] for PyTorch .bin models like Mistral-7B
|
||||||
|
ls ./models
|
||||||
|
<folder containing weights and tokenizer json>
|
||||||
|
|
||||||
|
# install Python dependencies
|
||||||
|
python3 -m pip install -r requirements.txt
|
||||||
|
|
||||||
|
# convert the model to ggml FP16 format
|
||||||
|
python3 convert_hf_to_gguf.py models/mymodel/
|
||||||
|
|
||||||
|
# quantize the model to 4-bits (using Q4_K_M method)
|
||||||
|
./llama-quantize ./models/mymodel/ggml-model-f16.gguf ./models/mymodel/ggml-model-Q4_K_M.gguf Q4_K_M
|
||||||
|
|
||||||
|
# update the gguf filetype to current version if older version is now unsupported
|
||||||
|
./llama-quantize ./models/mymodel/ggml-model-Q4_K_M.gguf ./models/mymodel/ggml-model-Q4_K_M-v2.gguf COPY
|
||||||
|
```
|
||||||
|
|
||||||
|
Run the quantized model:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# start inference on a gguf model
|
||||||
|
./llama-cli -m ./models/mymodel/ggml-model-Q4_K_M.gguf -n 128
|
||||||
|
```
|
||||||
|
|
||||||
|
When running the larger models, make sure you have enough disk space to store all the intermediate files.
|
||||||
|
|
||||||
|
## Memory/Disk Requirements
|
||||||
|
|
||||||
|
As the models are currently fully loaded into memory, you will need adequate disk space to save them and sufficient RAM to load them. At the moment, memory and disk requirements are the same.
|
||||||
|
|
||||||
|
| Model | Original size | Quantized size (Q4_0) |
|
||||||
|
|------:|--------------:|----------------------:|
|
||||||
|
| 7B | 13 GB | 3.9 GB |
|
||||||
|
| 13B | 24 GB | 7.8 GB |
|
||||||
|
| 30B | 60 GB | 19.5 GB |
|
||||||
|
| 65B | 120 GB | 38.5 GB |
|
||||||
|
|
||||||
|
## Quantization
|
||||||
|
|
||||||
|
Several quantization methods are supported. They differ in the resulting model disk size and inference speed.
|
||||||
|
|
||||||
|
*(outdated)*
|
||||||
|
|
||||||
|
| Model | Measure | F16 | Q4_0 | Q4_1 | Q5_0 | Q5_1 | Q8_0 |
|
||||||
|
|------:|--------------|-------:|-------:|-------:|-------:|-------:|-------:|
|
||||||
|
| 7B | perplexity | 5.9066 | 6.1565 | 6.0912 | 5.9862 | 5.9481 | 5.9070 |
|
||||||
|
| 7B | file size | 13.0G | 3.5G | 3.9G | 4.3G | 4.7G | 6.7G |
|
||||||
|
| 7B | ms/tok @ 4th | 127 | 55 | 54 | 76 | 83 | 72 |
|
||||||
|
| 7B | ms/tok @ 8th | 122 | 43 | 45 | 52 | 56 | 67 |
|
||||||
|
| 7B | bits/weight | 16.0 | 4.5 | 5.0 | 5.5 | 6.0 | 8.5 |
|
||||||
|
| 13B | perplexity | 5.2543 | 5.3860 | 5.3608 | 5.2856 | 5.2706 | 5.2548 |
|
||||||
|
| 13B | file size | 25.0G | 6.8G | 7.6G | 8.3G | 9.1G | 13G |
|
||||||
|
| 13B | ms/tok @ 4th | - | 103 | 105 | 148 | 160 | 131 |
|
||||||
|
| 13B | ms/tok @ 8th | - | 73 | 82 | 98 | 105 | 128 |
|
||||||
|
| 13B | bits/weight | 16.0 | 4.5 | 5.0 | 5.5 | 6.0 | 8.5 |
|
||||||
|
|
||||||
|
- [k-quants](https://github.com/ggerganov/llama.cpp/pull/1684)
|
||||||
|
- recent k-quants improvements and new i-quants
|
||||||
|
- [#2707](https://github.com/ggerganov/llama.cpp/pull/2707)
|
||||||
|
- [#2807](https://github.com/ggerganov/llama.cpp/pull/2807)
|
||||||
|
- [#4773 - 2-bit i-quants (inference)](https://github.com/ggerganov/llama.cpp/pull/4773)
|
||||||
|
- [#4856 - 2-bit i-quants (inference)](https://github.com/ggerganov/llama.cpp/pull/4856)
|
||||||
|
- [#4861 - importance matrix](https://github.com/ggerganov/llama.cpp/pull/4861)
|
||||||
|
- [#4872 - MoE models](https://github.com/ggerganov/llama.cpp/pull/4872)
|
||||||
|
- [#4897 - 2-bit quantization](https://github.com/ggerganov/llama.cpp/pull/4897)
|
||||||
|
- [#4930 - imatrix for all k-quants](https://github.com/ggerganov/llama.cpp/pull/4930)
|
||||||
|
- [#4951 - imatrix on the GPU](https://github.com/ggerganov/llama.cpp/pull/4957)
|
||||||
|
- [#4969 - imatrix for legacy quants](https://github.com/ggerganov/llama.cpp/pull/4969)
|
||||||
|
- [#4996 - k-qunats tuning](https://github.com/ggerganov/llama.cpp/pull/4996)
|
||||||
|
- [#5060 - Q3_K_XS](https://github.com/ggerganov/llama.cpp/pull/5060)
|
||||||
|
- [#5196 - 3-bit i-quants](https://github.com/ggerganov/llama.cpp/pull/5196)
|
||||||
|
- [quantization tuning](https://github.com/ggerganov/llama.cpp/pull/5320), [another one](https://github.com/ggerganov/llama.cpp/pull/5334), and [another one](https://github.com/ggerganov/llama.cpp/pull/5361)
|
||||||
|
|
||||||
|
**Llama 2 7B**
|
||||||
|
|
||||||
| Quantization | Bits per Weight (BPW) |
|
| Quantization | Bits per Weight (BPW) |
|
||||||
|--------------|-----------------------|
|
|--------------|-----------------------|
|
||||||
@ -18,7 +100,8 @@ Note: It is synced from llama.cpp `main` every 6 hours.
|
|||||||
| Q5_K_M | 5.68 |
|
| Q5_K_M | 5.68 |
|
||||||
| Q6_K | 6.56 |
|
| Q6_K | 6.56 |
|
||||||
|
|
||||||
## Llama 2 13B
|
**Llama 2 13B**
|
||||||
|
|
||||||
Quantization | Bits per Weight (BPW)
|
Quantization | Bits per Weight (BPW)
|
||||||
-- | --
|
-- | --
|
||||||
Q2_K | 3.34
|
Q2_K | 3.34
|
||||||
@ -31,7 +114,7 @@ Q5_K_S | 5.51
|
|||||||
Q5_K_M | 5.67
|
Q5_K_M | 5.67
|
||||||
Q6_K | 6.56
|
Q6_K | 6.56
|
||||||
|
|
||||||
# Llama 2 70B
|
**Llama 2 70B**
|
||||||
|
|
||||||
Quantization | Bits per Weight (BPW)
|
Quantization | Bits per Weight (BPW)
|
||||||
-- | --
|
-- | --
|
||||||
|
@ -366,7 +366,8 @@ Notice that each `probs` is an array of length `n_probs`.
|
|||||||
"assistant_name": "",
|
"assistant_name": "",
|
||||||
"user_name": "",
|
"user_name": "",
|
||||||
"default_generation_settings": { ... },
|
"default_generation_settings": { ... },
|
||||||
"total_slots": 1
|
"total_slots": 1,
|
||||||
|
"chat_template": ""
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -374,8 +375,9 @@ Notice that each `probs` is an array of length `n_probs`.
|
|||||||
- `user_name` - the required anti-prompt to generate the prompt in case you have specified a system prompt for all slots.
|
- `user_name` - the required anti-prompt to generate the prompt in case you have specified a system prompt for all slots.
|
||||||
- `default_generation_settings` - the default generation settings for the `/completion` endpoint, which has the same fields as the `generation_settings` response object from the `/completion` endpoint.
|
- `default_generation_settings` - the default generation settings for the `/completion` endpoint, which has the same fields as the `generation_settings` response object from the `/completion` endpoint.
|
||||||
- `total_slots` - the total number of slots for process requests (defined by `--parallel` option)
|
- `total_slots` - the total number of slots for process requests (defined by `--parallel` option)
|
||||||
|
- `chat_template` - the model's original Jinja2 prompt template
|
||||||
|
|
||||||
- **POST** `/v1/chat/completions`: OpenAI-compatible Chat Completions API. Given a ChatML-formatted json description in `messages`, it returns the predicted completion. Both synchronous and streaming mode are supported, so scripted and interactive applications work fine. While no strong claims of compatibility with OpenAI API spec is being made, in our experience it suffices to support many apps. Only model with [supported chat template](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template) can be used optimally with this endpoint. By default, ChatML template will be used.
|
- **POST** `/v1/chat/completions`: OpenAI-compatible Chat Completions API. Given a ChatML-formatted json description in `messages`, it returns the predicted completion. Both synchronous and streaming mode are supported, so scripted and interactive applications work fine. While no strong claims of compatibility with OpenAI API spec is being made, in our experience it suffices to support many apps. Only models with a [supported chat template](https://github.com/ggerganov/llama.cpp/wiki/Templates-supported-by-llama_chat_apply_template) can be used optimally with this endpoint. By default, the ChatML template will be used.
|
||||||
|
|
||||||
*Options:*
|
*Options:*
|
||||||
|
|
||||||
|
@ -2605,7 +2605,7 @@ int main(int argc, char ** argv) {
|
|||||||
// if a custom chat template is not supplied, we will use the one that comes with the model (if any)
|
// if a custom chat template is not supplied, we will use the one that comes with the model (if any)
|
||||||
if (params.chat_template.empty()) {
|
if (params.chat_template.empty()) {
|
||||||
if (!ctx_server.validate_model_chat_template()) {
|
if (!ctx_server.validate_model_chat_template()) {
|
||||||
LOG_ERROR("The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses", {});
|
LOG_WARNING("The chat template that comes with this model is not yet supported, falling back to chatml. This may cause the model to output suboptimal responses", {});
|
||||||
params.chat_template = "chatml";
|
params.chat_template = "chatml";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2967,11 +2967,20 @@ int main(int argc, char ** argv) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
const auto handle_props = [&ctx_server](const httplib::Request & req, httplib::Response & res) {
|
const auto handle_props = [&ctx_server](const httplib::Request & req, httplib::Response & res) {
|
||||||
|
std::string template_key = "tokenizer.chat_template", curr_tmpl;
|
||||||
|
int32_t tlen = llama_model_meta_val_str(ctx_server.model, template_key.c_str(), nullptr, 0);
|
||||||
|
if (tlen > 0) {
|
||||||
|
std::vector<char> curr_tmpl_buf(tlen + 1, 0);
|
||||||
|
if (llama_model_meta_val_str(ctx_server.model, template_key.c_str(), curr_tmpl_buf.data(), curr_tmpl_buf.size()) == tlen) {
|
||||||
|
curr_tmpl = std::string(curr_tmpl_buf.data(), tlen);
|
||||||
|
}
|
||||||
|
}
|
||||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||||
json data = {
|
json data = {
|
||||||
{ "system_prompt", ctx_server.system_prompt.c_str() },
|
{ "system_prompt", ctx_server.system_prompt.c_str() },
|
||||||
{ "default_generation_settings", ctx_server.default_generation_settings_for_props },
|
{ "default_generation_settings", ctx_server.default_generation_settings_for_props },
|
||||||
{ "total_slots", ctx_server.params.n_parallel }
|
{ "total_slots", ctx_server.params.n_parallel },
|
||||||
|
{ "chat_template", curr_tmpl.c_str() }
|
||||||
};
|
};
|
||||||
|
|
||||||
res.set_content(data.dump(), "application/json; charset=utf-8");
|
res.set_content(data.dump(), "application/json; charset=utf-8");
|
||||||
|
@ -52,4 +52,3 @@ Feature: Passkey / Self-extend with context shift
|
|||||||
#| TheBloke/Llama-2-7B-GGUF | llama-2-7b.Q2_K.gguf | 4096 | 3 | 16384 | 512 | 4 | 512 | 500 | 300 | 1234 | 5 | 1234 |
|
#| TheBloke/Llama-2-7B-GGUF | llama-2-7b.Q2_K.gguf | 4096 | 3 | 16384 | 512 | 4 | 512 | 500 | 300 | 1234 | 5 | 1234 |
|
||||||
#| TheBloke/Mixtral-8x7B-v0.1-GGUF | mixtral-8x7b-v0.1.Q2_K.gguf | 32768 | 2 | 16384 | 512 | 4 | 512 | 500 | 100 | 0987 | 5 | 0
|
#| TheBloke/Mixtral-8x7B-v0.1-GGUF | mixtral-8x7b-v0.1.Q2_K.gguf | 32768 | 2 | 16384 | 512 | 4 | 512 | 500 | 100 | 0987 | 5 | 0
|
||||||
# 987 |
|
# 987 |
|
||||||
|
|
||||||
|
@ -1054,4 +1054,3 @@
|
|||||||
</body>
|
</body>
|
||||||
|
|
||||||
</html>
|
</html>
|
||||||
|
|
||||||
|
@ -1058,4 +1058,3 @@
|
|||||||
</body>
|
</body>
|
||||||
|
|
||||||
</html>
|
</html>
|
||||||
|
|
||||||
|
@ -31,4 +31,3 @@ for i in range(n-1):
|
|||||||
embedding2 = np.array(result[j])
|
embedding2 = np.array(result[j])
|
||||||
similarity = np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2))
|
similarity = np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2))
|
||||||
print(f"Similarity between {i} and {j}: {similarity:.2f}")
|
print(f"Similarity between {i} and {j}: {similarity:.2f}")
|
||||||
|
|
@ -34,4 +34,3 @@ fi
|
|||||||
|
|
||||||
#use multiple GPUs with same max compute units
|
#use multiple GPUs with same max compute units
|
||||||
#ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m models/llama-2-7b.Q4_0.gguf -p "${INPUT2}" -n 400 -e -ngl 33 -s 0
|
#ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m models/llama-2-7b.Q4_0.gguf -p "${INPUT2}" -n 400 -e -ngl 33 -s 0
|
||||||
|
|
||||||
|
@ -31,4 +31,3 @@ exit /B 0
|
|||||||
:ERROR
|
:ERROR
|
||||||
echo comomand error: %errorlevel%
|
echo comomand error: %errorlevel%
|
||||||
exit /B %errorlevel%
|
exit /B %errorlevel%
|
||||||
|
|
||||||
|
@ -7,5 +7,3 @@ set INPUT2="Building a website can be done in 10 simple steps:\nStep 1:"
|
|||||||
|
|
||||||
|
|
||||||
.\build\bin\main.exe -m models\llama-2-7b.Q4_0.gguf -p %INPUT2% -n 400 -e -ngl 33 -s 0
|
.\build\bin\main.exe -m models\llama-2-7b.Q4_0.gguf -p %INPUT2% -n 400 -e -ngl 33 -s 0
|
||||||
|
|
||||||
|
|
||||||
|
@ -30,6 +30,7 @@ static void print_usage_information(const char * argv0, FILE * stream) {
|
|||||||
fprintf(stream, " --stdin read prompt from standard input.\n");
|
fprintf(stream, " --stdin read prompt from standard input.\n");
|
||||||
fprintf(stream, " --no-bos do not ever add a BOS token to the prompt, even if normally the model uses a BOS token.\n");
|
fprintf(stream, " --no-bos do not ever add a BOS token to the prompt, even if normally the model uses a BOS token.\n");
|
||||||
fprintf(stream, " --log-disable disable logs. Makes stderr quiet when loading the model.\n");
|
fprintf(stream, " --log-disable disable logs. Makes stderr quiet when loading the model.\n");
|
||||||
|
fprintf(stream, " --show-count print the total number of tokens.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void llama_log_callback_null(ggml_log_level level, const char * text, void * user_data) {
|
static void llama_log_callback_null(ggml_log_level level, const char * text, void * user_data) {
|
||||||
@ -195,6 +196,7 @@ int main(int raw_argc, char ** raw_argv) {
|
|||||||
bool printing_ids = false;
|
bool printing_ids = false;
|
||||||
bool no_bos = false;
|
bool no_bos = false;
|
||||||
bool disable_logging = false;
|
bool disable_logging = false;
|
||||||
|
bool show_token_count = false;
|
||||||
const char * model_path = NULL;
|
const char * model_path = NULL;
|
||||||
const char * prompt_path = NULL;
|
const char * prompt_path = NULL;
|
||||||
const char * prompt_arg = NULL;
|
const char * prompt_arg = NULL;
|
||||||
@ -249,6 +251,9 @@ int main(int raw_argc, char ** raw_argv) {
|
|||||||
else if (arg == "--log-disable") {
|
else if (arg == "--log-disable") {
|
||||||
disable_logging = true;
|
disable_logging = true;
|
||||||
}
|
}
|
||||||
|
else if (arg == "--show-count") {
|
||||||
|
show_token_count = true;
|
||||||
|
}
|
||||||
else {
|
else {
|
||||||
fprintf(stderr, "Error: unknown option '%s'\n", argv[iarg].c_str());
|
fprintf(stderr, "Error: unknown option '%s'\n", argv[iarg].c_str());
|
||||||
return 1;
|
return 1;
|
||||||
@ -384,6 +389,9 @@ int main(int raw_argc, char ** raw_argv) {
|
|||||||
printf("]\n");
|
printf("]\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (show_token_count) {
|
||||||
|
printf("Total number of tokens: %ld\n", tokens.size());
|
||||||
|
}
|
||||||
// silence valgrind
|
// silence valgrind
|
||||||
llama_free(ctx);
|
llama_free(ctx);
|
||||||
llama_free_model(model);
|
llama_free_model(model);
|
||||||
|
@ -20,11 +20,11 @@
|
|||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1718895438,
|
"lastModified": 1719506693,
|
||||||
"narHash": "sha256-k3JqJrkdoYwE3fHE6xGDY676AYmyh4U2Zw+0Bwe5DLU=",
|
"narHash": "sha256-C8e9S7RzshSdHB7L+v9I51af1gDM5unhJ2xO1ywxNH8=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "d603719ec6e294f034936c0d0dc06f689d91b6c3",
|
"rev": "b2852eb9365c6de48ffb0dc2c9562591f652242a",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -63,4 +63,3 @@ GGML_API void ggml_backend_metal_capture_next_compute(ggml_backend_t backend);
|
|||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -486,9 +486,11 @@ if (GGML_SYCL)
|
|||||||
add_compile_options(-I./) #include DPCT
|
add_compile_options(-I./) #include DPCT
|
||||||
|
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3")
|
|
||||||
if (GGML_SYCL_TARGET STREQUAL "NVIDIA")
|
if (GGML_SYCL_TARGET STREQUAL "NVIDIA")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsycl-targets=nvptx64-nvidia-cuda")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsycl-targets=nvptx64-nvidia-cuda")
|
||||||
|
add_compile_definitions(GGML_SYCL_WARP_SIZE=32)
|
||||||
|
else()
|
||||||
|
add_compile_definitions(GGML_SYCL_WARP_SIZE=16)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
file(GLOB GGML_HEADERS_SYCL "ggml-sycl/*.hpp")
|
file(GLOB GGML_HEADERS_SYCL "ggml-sycl/*.hpp")
|
||||||
@ -1166,9 +1168,12 @@ target_link_libraries(ggml PRIVATE Threads::Threads ${GGML_EXTRA_LIBS})
|
|||||||
|
|
||||||
find_library(MATH_LIBRARY m)
|
find_library(MATH_LIBRARY m)
|
||||||
if (MATH_LIBRARY)
|
if (MATH_LIBRARY)
|
||||||
target_link_libraries(ggml PRIVATE ${MATH_LIBRARY})
|
if (NOT WIN32 OR NOT GGML_SYCL)
|
||||||
|
target_link_libraries(ggml PRIVATE ${MATH_LIBRARY})
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (BUILD_SHARED_LIBS)
|
if (BUILD_SHARED_LIBS)
|
||||||
set_target_properties(ggml PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
set_target_properties(ggml PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||||
|
target_compile_definitions(ggml PRIVATE GGML_SHARED GGML_BUILD)
|
||||||
endif()
|
endif()
|
||||||
|
@ -106,19 +106,19 @@ typedef sycl::half2 ggml_half2;
|
|||||||
#define QR6_K 2
|
#define QR6_K 2
|
||||||
|
|
||||||
#define QI2_XXS (QK_K / (4*QR2_XXS))
|
#define QI2_XXS (QK_K / (4*QR2_XXS))
|
||||||
#define QR2_XXS 8
|
#define QR2_XXS 4
|
||||||
|
|
||||||
#define QI2_XS (QK_K / (4*QR2_XS))
|
#define QI2_XS (QK_K / (4*QR2_XS))
|
||||||
#define QR2_XS 8
|
#define QR2_XS 4
|
||||||
|
|
||||||
#define QI2_S (QK_K / (4*QR2_S))
|
#define QI2_S (QK_K / (4*QR2_S))
|
||||||
#define QR2_S 8
|
#define QR2_S 4
|
||||||
|
|
||||||
#define QI3_XXS (QK_K / (4*QR3_XXS))
|
#define QI3_XXS (QK_K / (4*QR3_XXS))
|
||||||
#define QR3_XXS 8
|
#define QR3_XXS 4
|
||||||
|
|
||||||
#define QI3_XS (QK_K / (4*QR3_XS))
|
#define QI3_XS (QK_K / (4*QR3_XS))
|
||||||
#define QR3_XS 8
|
#define QR3_XS 4
|
||||||
|
|
||||||
#define QI1_S (QK_K / (4*QR1_S))
|
#define QI1_S (QK_K / (4*QR1_S))
|
||||||
#define QR1_S 8
|
#define QR1_S 8
|
||||||
@ -130,10 +130,10 @@ typedef sycl::half2 ggml_half2;
|
|||||||
#define QR4_NL 2
|
#define QR4_NL 2
|
||||||
|
|
||||||
#define QI4_XS (QK_K / (4*QR4_XS))
|
#define QI4_XS (QK_K / (4*QR4_XS))
|
||||||
#define QR4_XS 8
|
#define QR4_XS 2
|
||||||
|
|
||||||
#define QI3_S (QK_K / (4*QR3_S))
|
#define QI3_S (QK_K / (4*QR3_S))
|
||||||
#define QR3_S 8
|
#define QR3_S 4
|
||||||
|
|
||||||
#endif // GGML_COMMON_DECL_CUDA || GGML_COMMON_DECL_HIP
|
#endif // GGML_COMMON_DECL_CUDA || GGML_COMMON_DECL_HIP
|
||||||
|
|
||||||
|
@ -1882,6 +1882,11 @@ static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor
|
|||||||
bool use_mul_mat_q = ggml_is_quantized(src0->type)
|
bool use_mul_mat_q = ggml_is_quantized(src0->type)
|
||||||
&& src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32;
|
&& src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32;
|
||||||
|
|
||||||
|
// if mmvq is available it's a better choice than dmmv:
|
||||||
|
#ifndef GGML_CUDA_FORCE_DMMV
|
||||||
|
use_dequantize_mul_mat_vec = use_dequantize_mul_mat_vec && !use_mul_mat_vec_q;
|
||||||
|
#endif // GGML_CUDA_FORCE_DMMV
|
||||||
|
|
||||||
bool any_gpus_with_slow_fp16 = false;
|
bool any_gpus_with_slow_fp16 = false;
|
||||||
|
|
||||||
if (split) {
|
if (split) {
|
||||||
@ -1894,22 +1899,15 @@ static void ggml_cuda_mul_mat(ggml_backend_cuda_context & ctx, const ggml_tensor
|
|||||||
}
|
}
|
||||||
|
|
||||||
const int cc = ggml_cuda_info().devices[id].cc;
|
const int cc = ggml_cuda_info().devices[id].cc;
|
||||||
use_mul_mat_vec_q = use_mul_mat_vec_q && cc >= MIN_CC_DP4A;
|
|
||||||
use_mul_mat_q = use_mul_mat_q && ggml_cuda_should_use_mmq(src0->type, cc, src1->ne[1]);
|
use_mul_mat_q = use_mul_mat_q && ggml_cuda_should_use_mmq(src0->type, cc, src1->ne[1]);
|
||||||
any_gpus_with_slow_fp16 = any_gpus_with_slow_fp16 || !fast_fp16_available(cc);
|
any_gpus_with_slow_fp16 = any_gpus_with_slow_fp16 || !fast_fp16_available(cc);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
const int cc = ggml_cuda_info().devices[ctx.device].cc;
|
const int cc = ggml_cuda_info().devices[ctx.device].cc;
|
||||||
use_mul_mat_vec_q = use_mul_mat_vec_q && cc >= MIN_CC_DP4A;
|
|
||||||
use_mul_mat_q = use_mul_mat_q && ggml_cuda_should_use_mmq(src0->type, cc, src1->ne[1]);
|
use_mul_mat_q = use_mul_mat_q && ggml_cuda_should_use_mmq(src0->type, cc, src1->ne[1]);
|
||||||
any_gpus_with_slow_fp16 = any_gpus_with_slow_fp16 || !fast_fp16_available(cc);
|
any_gpus_with_slow_fp16 = any_gpus_with_slow_fp16 || !fast_fp16_available(cc);
|
||||||
}
|
}
|
||||||
|
|
||||||
// if mmvq is available it's a better choice than dmmv:
|
|
||||||
#ifndef GGML_CUDA_FORCE_DMMV
|
|
||||||
use_dequantize_mul_mat_vec = use_dequantize_mul_mat_vec && !use_mul_mat_vec_q;
|
|
||||||
#endif // GGML_CUDA_FORCE_DMMV
|
|
||||||
|
|
||||||
// debug helpers
|
// debug helpers
|
||||||
//printf("src0: %8d %8d %8d %8d\n", src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3]);
|
//printf("src0: %8d %8d %8d %8d\n", src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3]);
|
||||||
//printf(" %8d %8d %8d %8d\n", src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3]);
|
//printf(" %8d %8d %8d %8d\n", src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3]);
|
||||||
@ -2713,27 +2711,40 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
|
|||||||
case GGML_OP_MUL_MAT:
|
case GGML_OP_MUL_MAT:
|
||||||
case GGML_OP_MUL_MAT_ID:
|
case GGML_OP_MUL_MAT_ID:
|
||||||
{
|
{
|
||||||
struct ggml_tensor * a;
|
struct ggml_tensor * a = op->src[0];
|
||||||
struct ggml_tensor * b;
|
|
||||||
if (op->op == GGML_OP_MUL_MAT) {
|
if (op->op == GGML_OP_MUL_MAT) {
|
||||||
a = op->src[0];
|
struct ggml_tensor * b = op->src[1];
|
||||||
b = op->src[1];
|
if (a->ne[3] != b->ne[3]) {
|
||||||
} else {
|
|
||||||
a = op->src[2];
|
|
||||||
b = op->src[1];
|
|
||||||
}
|
|
||||||
if (a->ne[3] != b->ne[3]) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
ggml_type a_type = a->type;
|
|
||||||
if (a_type == GGML_TYPE_IQ2_XXS || a_type == GGML_TYPE_IQ2_XS || a_type == GGML_TYPE_IQ3_XXS ||
|
|
||||||
a_type == GGML_TYPE_IQ1_S || a_type == GGML_TYPE_IQ4_NL || a_type == GGML_TYPE_IQ3_S ||
|
|
||||||
a_type == GGML_TYPE_IQ1_M || a_type == GGML_TYPE_IQ2_S || a_type == GGML_TYPE_IQ4_XS) {
|
|
||||||
if (b->ne[1] == 1 && ggml_nrows(b) > 1) {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true;
|
switch (a->type) {
|
||||||
|
case GGML_TYPE_F32:
|
||||||
|
case GGML_TYPE_F16:
|
||||||
|
case GGML_TYPE_Q4_0:
|
||||||
|
case GGML_TYPE_Q4_1:
|
||||||
|
case GGML_TYPE_Q5_0:
|
||||||
|
case GGML_TYPE_Q5_1:
|
||||||
|
case GGML_TYPE_Q8_0:
|
||||||
|
case GGML_TYPE_Q2_K:
|
||||||
|
case GGML_TYPE_Q3_K:
|
||||||
|
case GGML_TYPE_Q4_K:
|
||||||
|
case GGML_TYPE_Q5_K:
|
||||||
|
case GGML_TYPE_Q6_K:
|
||||||
|
case GGML_TYPE_Q8_K:
|
||||||
|
case GGML_TYPE_IQ1_M:
|
||||||
|
case GGML_TYPE_IQ1_S:
|
||||||
|
case GGML_TYPE_IQ2_S:
|
||||||
|
case GGML_TYPE_IQ2_XS:
|
||||||
|
case GGML_TYPE_IQ2_XXS:
|
||||||
|
case GGML_TYPE_IQ3_S:
|
||||||
|
case GGML_TYPE_IQ3_XXS:
|
||||||
|
case GGML_TYPE_IQ4_NL:
|
||||||
|
case GGML_TYPE_IQ4_XS:
|
||||||
|
return true;
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_GET_ROWS:
|
case GGML_OP_GET_ROWS:
|
||||||
{
|
{
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
#include "ggml-cuda.h"
|
#include "ggml-cuda.h"
|
||||||
|
|
||||||
|
#include <cstdint>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
#if defined(GGML_USE_HIPBLAS)
|
#if defined(GGML_USE_HIPBLAS)
|
||||||
@ -226,6 +227,10 @@ typedef float2 dfloat2;
|
|||||||
#define RDNA2
|
#define RDNA2
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(__gfx1010__) || defined(__gfx1012__)
|
||||||
|
#define RDNA1
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifndef __has_builtin
|
#ifndef __has_builtin
|
||||||
#define __has_builtin(x) 0
|
#define __has_builtin(x) 0
|
||||||
#endif
|
#endif
|
||||||
@ -268,30 +273,15 @@ static __device__ __forceinline__ unsigned int __vcmpeq4(unsigned int a, unsigne
|
|||||||
return c;
|
return c;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) {
|
static __device__ __forceinline__ unsigned int __vcmpne4(unsigned int a, unsigned int b) {
|
||||||
#if defined(__gfx906__) || defined(__gfx908__) || defined(__gfx90a__) || defined(__gfx1030__)
|
const uint8x4_t& va = reinterpret_cast<const uint8x4_t&>(a);
|
||||||
c = __builtin_amdgcn_sdot4(a, b, c, false);
|
const uint8x4_t& vb = reinterpret_cast<const uint8x4_t&>(b);
|
||||||
#elif defined(RDNA3)
|
unsigned int c;
|
||||||
c = __builtin_amdgcn_sudot4( true, a, true, b, c, false);
|
uint8x4_t& vc = reinterpret_cast<uint8x4_t&>(c);
|
||||||
#elif defined(__gfx1010__) || defined(__gfx900__)
|
#pragma unroll
|
||||||
int tmp1;
|
for (int i = 0; i < 4; ++i) {
|
||||||
int tmp2;
|
vc[i] = va[i] == vb[i] ? 0x00 : 0xff;
|
||||||
asm("\n \
|
}
|
||||||
v_mul_i32_i24 %1, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0 \n \
|
|
||||||
v_mul_i32_i24 %2, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1 \n \
|
|
||||||
v_add3_u32 %0, %1, %2, %0 \n \
|
|
||||||
v_mul_i32_i24 %1, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2 \n \
|
|
||||||
v_mul_i32_i24 %2, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3 \n \
|
|
||||||
v_add3_u32 %0, %1, %2, %0 \n \
|
|
||||||
"
|
|
||||||
: "+v"(c), "=&v"(tmp1), "=&v"(tmp2)
|
|
||||||
: "v"(a), "v"(b)
|
|
||||||
);
|
|
||||||
#else
|
|
||||||
const int8x4_t va = reinterpret_cast<const int8x4_t&>(a);
|
|
||||||
const int8x4_t vb = reinterpret_cast<const int8x4_t&>(b);
|
|
||||||
c += va[0] * vb[0] + va[1] * vb[1] + va[2] * vb[2] + va[3] * vb[3];
|
|
||||||
#endif
|
|
||||||
return c;
|
return c;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -467,8 +457,48 @@ static __device__ __forceinline__ uint32_t __hgt2_mask(const half2 a, const half
|
|||||||
}
|
}
|
||||||
#endif // CUDART_VERSION < 12000
|
#endif // CUDART_VERSION < 12000
|
||||||
|
|
||||||
|
static __device__ __forceinline__ int ggml_cuda_dp4a(const int a, const int b, int c) {
|
||||||
|
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
||||||
|
#if defined(__gfx906__) || defined(__gfx908__) || defined(__gfx90a__) || defined(__gfx1030__)
|
||||||
|
c = __builtin_amdgcn_sdot4(a, b, c, false);
|
||||||
|
#elif defined(RDNA3)
|
||||||
|
c = __builtin_amdgcn_sudot4( true, a, true, b, c, false);
|
||||||
|
#elif defined(__gfx1010__) || defined(__gfx900__)
|
||||||
|
int tmp1;
|
||||||
|
int tmp2;
|
||||||
|
asm("\n \
|
||||||
|
v_mul_i32_i24 %1, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:BYTE_0 \n \
|
||||||
|
v_mul_i32_i24 %2, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1 \n \
|
||||||
|
v_add3_u32 %0, %1, %2, %0 \n \
|
||||||
|
v_mul_i32_i24 %1, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_2 src1_sel:BYTE_2 \n \
|
||||||
|
v_mul_i32_i24 %2, sext(%3), sext(%4) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3 src1_sel:BYTE_3 \n \
|
||||||
|
v_add3_u32 %0, %1, %2, %0 \n \
|
||||||
|
"
|
||||||
|
: "+v"(c), "=&v"(tmp1), "=&v"(tmp2)
|
||||||
|
: "v"(a), "v"(b)
|
||||||
|
);
|
||||||
|
#else
|
||||||
|
const int8x4_t va = reinterpret_cast<const int8x4_t&>(a);
|
||||||
|
const int8x4_t vb = reinterpret_cast<const int8x4_t&>(b);
|
||||||
|
c += va[0] * vb[0] + va[1] * vb[1] + va[2] * vb[2] + va[3] * vb[3];
|
||||||
|
#endif
|
||||||
|
return c;
|
||||||
|
|
||||||
|
#else // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
||||||
|
|
||||||
|
#if __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
|
return __dp4a(a, b, c);
|
||||||
|
#else // __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
|
const int8_t * a8 = (const int8_t *) &a;
|
||||||
|
const int8_t * b8 = (const int8_t *) &b;
|
||||||
|
return c + a8[0]*b8[0] + a8[1]*b8[1] + a8[2]*b8[2] + a8[3]*b8[3];
|
||||||
|
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
||||||
|
|
||||||
|
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: move to ggml-common.h
|
// TODO: move to ggml-common.h
|
||||||
static const __device__ int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113};
|
static constexpr __device__ int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113};
|
||||||
|
|
||||||
typedef void (*dequantize_kernel_t)(const void * vx, const int64_t ib, const int iqs, dfloat2 & v);
|
typedef void (*dequantize_kernel_t)(const void * vx, const int64_t ib, const int iqs, dfloat2 & v);
|
||||||
|
|
||||||
|
@ -487,4 +487,3 @@ void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1) {
|
|||||||
GGML_ASSERT(false);
|
GGML_ASSERT(false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -54,12 +54,11 @@ typedef float (*vec_dot_KQ_f32_t)(
|
|||||||
template<typename T, int D>
|
template<typename T, int D>
|
||||||
static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q4_0(
|
static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q4_0(
|
||||||
const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) {
|
const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) {
|
||||||
#if __CUDA_ARCH__ >= MIN_CC_DP4A
|
|
||||||
|
|
||||||
const block_q4_0 * K_q4_0 = (const block_q4_0 *) K_c;
|
const block_q4_0 * K_q4_0 = (const block_q4_0 *) K_c;
|
||||||
GGML_UNUSED(Q_v);
|
GGML_UNUSED(Q_v);
|
||||||
|
|
||||||
half sum = 0.0f;
|
T sum = 0.0f;
|
||||||
|
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int k_KQ_0 = 0; k_KQ_0 < D/sizeof(int); k_KQ_0 += WARP_SIZE) {
|
for (int k_KQ_0 = 0; k_KQ_0 < D/sizeof(int); k_KQ_0 += WARP_SIZE) {
|
||||||
@ -69,10 +68,10 @@ static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q4_0(
|
|||||||
const int iqs4 = k_KQ % QI4_0;
|
const int iqs4 = k_KQ % QI4_0;
|
||||||
const int shift = k_KQ & (QI8_1/2);
|
const int shift = k_KQ & (QI8_1/2);
|
||||||
|
|
||||||
const int v = (get_int_from_uint8(K_q4_0[ib].qs, iqs4) >> shift) & 0x0F0F0F0F;
|
const int v = (get_int_b2(K_q4_0[ib].qs, iqs4) >> shift) & 0x0F0F0F0F;
|
||||||
const int u = Q_q8[k_KQ_0/WARP_SIZE];
|
const int u = Q_q8[k_KQ_0/WARP_SIZE];
|
||||||
|
|
||||||
const int sumi = __dp4a(v, u, 0);
|
const int sumi = ggml_cuda_dp4a(v, u, 0);
|
||||||
|
|
||||||
#ifdef FP16_AVAILABLE
|
#ifdef FP16_AVAILABLE
|
||||||
if (std::is_same<T, half>::value) {
|
if (std::is_same<T, half>::value) {
|
||||||
@ -90,19 +89,11 @@ static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q4_0(
|
|||||||
}
|
}
|
||||||
|
|
||||||
return sum;
|
return sum;
|
||||||
#else
|
|
||||||
GGML_UNUSED(K_c);
|
|
||||||
GGML_UNUSED(Q_v);
|
|
||||||
GGML_UNUSED(Q_q8);
|
|
||||||
GGML_UNUSED(Q_ds_v);
|
|
||||||
NO_DEVICE_CODE;
|
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T, int D>
|
template<typename T, int D>
|
||||||
static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q4_1(
|
static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q4_1(
|
||||||
const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) {
|
const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) {
|
||||||
#if __CUDA_ARCH__ >= MIN_CC_DP4A
|
|
||||||
|
|
||||||
const block_q4_1 * K_q4_1 = (const block_q4_1 *) K_c;
|
const block_q4_1 * K_q4_1 = (const block_q4_1 *) K_c;
|
||||||
GGML_UNUSED(Q_v);
|
GGML_UNUSED(Q_v);
|
||||||
@ -117,10 +108,10 @@ static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q4_1(
|
|||||||
const int iqs4 = k_KQ % QI4_1;
|
const int iqs4 = k_KQ % QI4_1;
|
||||||
const int shift = k_KQ & (QI8_1/2);
|
const int shift = k_KQ & (QI8_1/2);
|
||||||
|
|
||||||
const int v = (get_int_from_uint8_aligned(K_q4_1[ib].qs, iqs4) >> shift) & 0x0F0F0F0F;
|
const int v = (get_int_b4(K_q4_1[ib].qs, iqs4) >> shift) & 0x0F0F0F0F;
|
||||||
const int u = Q_q8[k_KQ_0/WARP_SIZE];
|
const int u = Q_q8[k_KQ_0/WARP_SIZE];
|
||||||
|
|
||||||
const int sumi = __dp4a(v, u, 0);
|
const int sumi = ggml_cuda_dp4a(v, u, 0);
|
||||||
|
|
||||||
#ifdef FP16_AVAILABLE
|
#ifdef FP16_AVAILABLE
|
||||||
if (std::is_same<T, half>::value) {
|
if (std::is_same<T, half>::value) {
|
||||||
@ -142,19 +133,11 @@ static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q4_1(
|
|||||||
}
|
}
|
||||||
|
|
||||||
return sum;
|
return sum;
|
||||||
#else
|
|
||||||
GGML_UNUSED(K_c);
|
|
||||||
GGML_UNUSED(Q_v);
|
|
||||||
GGML_UNUSED(Q_q8);
|
|
||||||
GGML_UNUSED(Q_ds_v);
|
|
||||||
NO_DEVICE_CODE;
|
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T, int D>
|
template<typename T, int D>
|
||||||
static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q5_0(
|
static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q5_0(
|
||||||
const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) {
|
const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) {
|
||||||
#if __CUDA_ARCH__ >= MIN_CC_DP4A
|
|
||||||
|
|
||||||
const block_q5_0 * K_q5_0 = (const block_q5_0 *) K_c;
|
const block_q5_0 * K_q5_0 = (const block_q5_0 *) K_c;
|
||||||
GGML_UNUSED(Q_v);
|
GGML_UNUSED(Q_v);
|
||||||
@ -170,8 +153,8 @@ static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q5_0(
|
|||||||
const int iqs8 = k_KQ % QI8_1;
|
const int iqs8 = k_KQ % QI8_1;
|
||||||
const int shift = k_KQ & (QI8_1/2);
|
const int shift = k_KQ & (QI8_1/2);
|
||||||
|
|
||||||
int v = (get_int_from_uint8(K_q5_0[ib].qs, iqs4) >> shift) & 0x0F0F0F0F;
|
int v = (get_int_b2(K_q5_0[ib].qs, iqs4) >> shift) & 0x0F0F0F0F;
|
||||||
const int vh = get_int_from_uint8(K_q5_0[ib].qh, 0) >> (iqs8 * QI5_0);
|
const int vh = get_int_b2(K_q5_0[ib].qh, 0) >> (iqs8 * QI5_0);
|
||||||
v |= (vh << 4) & 0x00000010; // 0 -> 4
|
v |= (vh << 4) & 0x00000010; // 0 -> 4
|
||||||
v |= (vh << 11) & 0x00001000; // 1 -> 12
|
v |= (vh << 11) & 0x00001000; // 1 -> 12
|
||||||
v |= (vh << 18) & 0x00100000; // 2 -> 20
|
v |= (vh << 18) & 0x00100000; // 2 -> 20
|
||||||
@ -179,7 +162,7 @@ static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q5_0(
|
|||||||
|
|
||||||
const int u = Q_q8[k_KQ_0/WARP_SIZE];
|
const int u = Q_q8[k_KQ_0/WARP_SIZE];
|
||||||
|
|
||||||
const int sumi = __dp4a(v, u, 0);
|
const int sumi = ggml_cuda_dp4a(v, u, 0);
|
||||||
|
|
||||||
#ifdef FP16_AVAILABLE
|
#ifdef FP16_AVAILABLE
|
||||||
if (std::is_same<T, half>::value) {
|
if (std::is_same<T, half>::value) {
|
||||||
@ -197,19 +180,11 @@ static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q5_0(
|
|||||||
}
|
}
|
||||||
|
|
||||||
return sum;
|
return sum;
|
||||||
#else
|
|
||||||
GGML_UNUSED(K_c);
|
|
||||||
GGML_UNUSED(Q_v);
|
|
||||||
GGML_UNUSED(Q_q8);
|
|
||||||
GGML_UNUSED(Q_ds_v);
|
|
||||||
NO_DEVICE_CODE;
|
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T, int D>
|
template<typename T, int D>
|
||||||
static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q5_1(
|
static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q5_1(
|
||||||
const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) {
|
const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) {
|
||||||
#if __CUDA_ARCH__ >= MIN_CC_DP4A
|
|
||||||
|
|
||||||
const block_q5_1 * K_q5_1 = (const block_q5_1 *) K_c;
|
const block_q5_1 * K_q5_1 = (const block_q5_1 *) K_c;
|
||||||
GGML_UNUSED(Q_v);
|
GGML_UNUSED(Q_v);
|
||||||
@ -225,8 +200,8 @@ static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q5_1(
|
|||||||
const int iqs8 = k_KQ % QI8_1;
|
const int iqs8 = k_KQ % QI8_1;
|
||||||
const int shift = k_KQ & (QI8_1/2);
|
const int shift = k_KQ & (QI8_1/2);
|
||||||
|
|
||||||
int v = (get_int_from_uint8(K_q5_1[ib].qs, iqs4) >> shift) & 0x0F0F0F0F;
|
int v = (get_int_b2(K_q5_1[ib].qs, iqs4) >> shift) & 0x0F0F0F0F;
|
||||||
const int vh = get_int_from_uint8(K_q5_1[ib].qh, 0) >> (iqs8 * QI5_1);
|
const int vh = get_int_b2(K_q5_1[ib].qh, 0) >> (iqs8 * QI5_1);
|
||||||
v |= (vh << 4) & 0x00000010; // 0 -> 4
|
v |= (vh << 4) & 0x00000010; // 0 -> 4
|
||||||
v |= (vh << 11) & 0x00001000; // 1 -> 12
|
v |= (vh << 11) & 0x00001000; // 1 -> 12
|
||||||
v |= (vh << 18) & 0x00100000; // 2 -> 20
|
v |= (vh << 18) & 0x00100000; // 2 -> 20
|
||||||
@ -234,7 +209,7 @@ static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q5_1(
|
|||||||
|
|
||||||
const int u = Q_q8[k_KQ_0/WARP_SIZE];
|
const int u = Q_q8[k_KQ_0/WARP_SIZE];
|
||||||
|
|
||||||
const int sumi = __dp4a(v, u, 0);
|
const int sumi = ggml_cuda_dp4a(v, u, 0);
|
||||||
|
|
||||||
#ifdef FP16_AVAILABLE
|
#ifdef FP16_AVAILABLE
|
||||||
if (std::is_same<T, half>::value) {
|
if (std::is_same<T, half>::value) {
|
||||||
@ -256,19 +231,11 @@ static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q5_1(
|
|||||||
}
|
}
|
||||||
|
|
||||||
return sum;
|
return sum;
|
||||||
#else
|
|
||||||
GGML_UNUSED(K_c);
|
|
||||||
GGML_UNUSED(Q_v);
|
|
||||||
GGML_UNUSED(Q_q8);
|
|
||||||
GGML_UNUSED(Q_ds_v);
|
|
||||||
NO_DEVICE_CODE;
|
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T, int D>
|
template <typename T, int D>
|
||||||
static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q8_0(
|
static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q8_0(
|
||||||
const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) {
|
const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) {
|
||||||
#if __CUDA_ARCH__ >= MIN_CC_DP4A
|
|
||||||
|
|
||||||
const block_q8_0 * K_q8_0 = (const block_q8_0 *) K_c;
|
const block_q8_0 * K_q8_0 = (const block_q8_0 *) K_c;
|
||||||
GGML_UNUSED(Q_v);
|
GGML_UNUSED(Q_v);
|
||||||
@ -282,7 +249,7 @@ static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q8_0(
|
|||||||
const int ib = k_KQ / QI8_0;
|
const int ib = k_KQ / QI8_0;
|
||||||
const int iqs = k_KQ % QI8_0;
|
const int iqs = k_KQ % QI8_0;
|
||||||
|
|
||||||
const int v = get_int_from_int8(K_q8_0[ib].qs, iqs);
|
const int v = get_int_b2(K_q8_0[ib].qs, iqs);
|
||||||
|
|
||||||
T Q_d;
|
T Q_d;
|
||||||
if (std::is_same<T, half>::value) {
|
if (std::is_same<T, half>::value) {
|
||||||
@ -297,13 +264,6 @@ static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q8_0(
|
|||||||
}
|
}
|
||||||
|
|
||||||
return sum;
|
return sum;
|
||||||
#else
|
|
||||||
GGML_UNUSED(K_c);
|
|
||||||
GGML_UNUSED(Q_v);
|
|
||||||
GGML_UNUSED(Q_q8);
|
|
||||||
GGML_UNUSED(Q_ds_v);
|
|
||||||
NO_DEVICE_CODE;
|
|
||||||
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T, int D>
|
template <typename T, int D>
|
||||||
@ -448,7 +408,7 @@ static __device__ __forceinline__ T dequantize_1_q5_0(const void * __restrict__
|
|||||||
|
|
||||||
const T d = x[ib].d;
|
const T d = x[ib].d;
|
||||||
const int ql0 = x[ib].qs[iqs];
|
const int ql0 = x[ib].qs[iqs];
|
||||||
const int qh0 = get_int_from_uint8(x[ib].qh, 0);
|
const int qh0 = get_int_b2(x[ib].qh, 0);
|
||||||
const int ql = ((ql0 >> (4*shift)) & 0x0F);
|
const int ql = ((ql0 >> (4*shift)) & 0x0F);
|
||||||
const int qh = ((qh0 >> idq) << 4) & 0x10;
|
const int qh = ((qh0 >> idq) << 4) & 0x10;
|
||||||
const int q = (ql | qh) - 16;
|
const int q = (ql | qh) - 16;
|
||||||
@ -473,7 +433,7 @@ static __device__ __forceinline__ T dequantize_1_q5_1(const void * __restrict__
|
|||||||
|
|
||||||
const half2 dm = x[ib].dm;
|
const half2 dm = x[ib].dm;
|
||||||
const int ql0 = x[ib].qs[iqs];
|
const int ql0 = x[ib].qs[iqs];
|
||||||
const int qh0 = get_int_from_uint8_aligned(x[ib].qh, 0);
|
const int qh0 = get_int_b4(x[ib].qh, 0);
|
||||||
const int ql = ((ql0 >> (4*shift)) & 0x0F);
|
const int ql = ((ql0 >> (4*shift)) & 0x0F);
|
||||||
const int qh = ((qh0 >> idq) << 4) & 0x10;
|
const int qh = ((qh0 >> idq) << 4) & 0x10;
|
||||||
const int q = (ql | qh);
|
const int q = (ql | qh);
|
||||||
|
@ -59,6 +59,12 @@ void ggml_cuda_op_mul_mat_q(
|
|||||||
case GGML_TYPE_Q6_K:
|
case GGML_TYPE_Q6_K:
|
||||||
mul_mat_q_case<GGML_TYPE_Q6_K>(ctx, args, stream);
|
mul_mat_q_case<GGML_TYPE_Q6_K>(ctx, args, stream);
|
||||||
break;
|
break;
|
||||||
|
case GGML_TYPE_IQ4_XS:
|
||||||
|
mul_mat_q_case<GGML_TYPE_IQ4_XS>(ctx, args, stream);
|
||||||
|
break;
|
||||||
|
case GGML_TYPE_IQ4_NL:
|
||||||
|
mul_mat_q_case<GGML_TYPE_IQ4_NL>(ctx, args, stream);
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
GGML_ASSERT(false);
|
GGML_ASSERT(false);
|
||||||
break;
|
break;
|
||||||
@ -87,6 +93,8 @@ bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) {
|
|||||||
case GGML_TYPE_Q4_K:
|
case GGML_TYPE_Q4_K:
|
||||||
case GGML_TYPE_Q5_K:
|
case GGML_TYPE_Q5_K:
|
||||||
case GGML_TYPE_Q6_K:
|
case GGML_TYPE_Q6_K:
|
||||||
|
case GGML_TYPE_IQ4_XS:
|
||||||
|
case GGML_TYPE_IQ4_NL:
|
||||||
mmq_supported = true;
|
mmq_supported = true;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
|
@ -60,12 +60,16 @@ static constexpr __device__ int get_mmq_x_max_device() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static constexpr int get_mmq_y_host(const int cc) {
|
static constexpr int get_mmq_y_host(const int cc) {
|
||||||
return int8_mma_available(cc) || cc >= CC_VOLTA ? 128 : 64;
|
return cc >= CC_OFFSET_AMD ? (cc == CC_RDNA1 ? 64 : 128) : (cc >= CC_VOLTA ? 128 : 64);
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr __device__ int get_mmq_y_device() {
|
static constexpr __device__ int get_mmq_y_device() {
|
||||||
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
||||||
|
#if defined(RDNA1)
|
||||||
|
return 64;
|
||||||
|
#else
|
||||||
return 128;
|
return 128;
|
||||||
|
#endif // defined RDNA1
|
||||||
#else
|
#else
|
||||||
#if __CUDA_ARCH__ >= CC_VOLTA
|
#if __CUDA_ARCH__ >= CC_VOLTA
|
||||||
return 128;
|
return 128;
|
||||||
@ -88,15 +92,17 @@ static constexpr __device__ int get_mmq_y_device() {
|
|||||||
|
|
||||||
static constexpr __host__ __device__ tile_x_sizes mmq_get_dp4a_tile_x_sizes(ggml_type type, int mmq_y) {
|
static constexpr __host__ __device__ tile_x_sizes mmq_get_dp4a_tile_x_sizes(ggml_type type, int mmq_y) {
|
||||||
return type == GGML_TYPE_Q4_0 ? MMQ_DP4A_TXS_Q4_0 :
|
return type == GGML_TYPE_Q4_0 ? MMQ_DP4A_TXS_Q4_0 :
|
||||||
type == GGML_TYPE_Q4_1 ? MMQ_DP4A_TXS_Q4_1 :
|
type == GGML_TYPE_Q4_1 ? MMQ_DP4A_TXS_Q4_1 :
|
||||||
type == GGML_TYPE_Q5_0 ? MMQ_DP4A_TXS_Q5_0 :
|
type == GGML_TYPE_Q5_0 ? MMQ_DP4A_TXS_Q5_0 :
|
||||||
type == GGML_TYPE_Q5_1 ? MMQ_DP4A_TXS_Q5_1 :
|
type == GGML_TYPE_Q5_1 ? MMQ_DP4A_TXS_Q5_1 :
|
||||||
type == GGML_TYPE_Q8_0 ? MMQ_DP4A_TXS_Q8_0 :
|
type == GGML_TYPE_Q8_0 ? MMQ_DP4A_TXS_Q8_0 :
|
||||||
type == GGML_TYPE_Q2_K ? MMQ_DP4A_TXS_Q2_K :
|
type == GGML_TYPE_Q2_K ? MMQ_DP4A_TXS_Q2_K :
|
||||||
type == GGML_TYPE_Q3_K ? MMQ_DP4A_TXS_Q3_K :
|
type == GGML_TYPE_Q3_K ? MMQ_DP4A_TXS_Q3_K :
|
||||||
type == GGML_TYPE_Q4_K ? MMQ_DP4A_TXS_Q4_K :
|
type == GGML_TYPE_Q4_K ? MMQ_DP4A_TXS_Q4_K :
|
||||||
type == GGML_TYPE_Q5_K ? MMQ_DP4A_TXS_Q5_K :
|
type == GGML_TYPE_Q5_K ? MMQ_DP4A_TXS_Q5_K :
|
||||||
type == GGML_TYPE_Q6_K ? MMQ_DP4A_TXS_Q6_K :
|
type == GGML_TYPE_Q6_K ? MMQ_DP4A_TXS_Q6_K :
|
||||||
|
type == GGML_TYPE_IQ4_XS ? MMQ_DP4A_TXS_Q5_0 :
|
||||||
|
type == GGML_TYPE_IQ4_NL ? MMQ_DP4A_TXS_Q5_0 :
|
||||||
tile_x_sizes{0, 0, 0};
|
tile_x_sizes{0, 0, 0};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -124,15 +130,17 @@ static_assert(MMQ_MMA_TILE_X_K_Q6_K % 8 == 4, "Wrong padding.");
|
|||||||
|
|
||||||
static constexpr __host__ __device__ int mmq_get_mma_tile_x_k(ggml_type type) {
|
static constexpr __host__ __device__ int mmq_get_mma_tile_x_k(ggml_type type) {
|
||||||
return type == GGML_TYPE_Q4_0 ? MMQ_MMA_TILE_X_K_Q4_0 :
|
return type == GGML_TYPE_Q4_0 ? MMQ_MMA_TILE_X_K_Q4_0 :
|
||||||
type == GGML_TYPE_Q4_1 ? MMQ_MMA_TILE_X_K_Q4_1 :
|
type == GGML_TYPE_Q4_1 ? MMQ_MMA_TILE_X_K_Q4_1 :
|
||||||
type == GGML_TYPE_Q5_0 ? MMQ_MMA_TILE_X_K_Q5_0 :
|
type == GGML_TYPE_Q5_0 ? MMQ_MMA_TILE_X_K_Q5_0 :
|
||||||
type == GGML_TYPE_Q5_1 ? MMQ_MMA_TILE_X_K_Q5_1 :
|
type == GGML_TYPE_Q5_1 ? MMQ_MMA_TILE_X_K_Q5_1 :
|
||||||
type == GGML_TYPE_Q8_0 ? MMQ_MMA_TILE_X_K_Q8_0 :
|
type == GGML_TYPE_Q8_0 ? MMQ_MMA_TILE_X_K_Q8_0 :
|
||||||
type == GGML_TYPE_Q2_K ? MMQ_MMA_TILE_X_K_Q2_K :
|
type == GGML_TYPE_Q2_K ? MMQ_MMA_TILE_X_K_Q2_K :
|
||||||
type == GGML_TYPE_Q3_K ? MMQ_MMA_TILE_X_K_Q3_K :
|
type == GGML_TYPE_Q3_K ? MMQ_MMA_TILE_X_K_Q3_K :
|
||||||
type == GGML_TYPE_Q4_K ? MMQ_MMA_TILE_X_K_Q4_K :
|
type == GGML_TYPE_Q4_K ? MMQ_MMA_TILE_X_K_Q4_K :
|
||||||
type == GGML_TYPE_Q5_K ? MMQ_MMA_TILE_X_K_Q5_K :
|
type == GGML_TYPE_Q5_K ? MMQ_MMA_TILE_X_K_Q5_K :
|
||||||
type == GGML_TYPE_Q6_K ? MMQ_MMA_TILE_X_K_Q6_K :
|
type == GGML_TYPE_Q6_K ? MMQ_MMA_TILE_X_K_Q6_K :
|
||||||
|
type == GGML_TYPE_IQ4_XS ? MMQ_MMA_TILE_X_K_Q5_0 :
|
||||||
|
type == GGML_TYPE_IQ4_NL ? MMQ_MMA_TILE_X_K_Q5_0 :
|
||||||
0;
|
0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -181,9 +189,9 @@ template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinlin
|
|||||||
const block_q4_0 * bxi = (const block_q4_0 *) x + kbx0 + i*stride + kbx;
|
const block_q4_0 * bxi = (const block_q4_0 *) x + kbx0 + i*stride + kbx;
|
||||||
|
|
||||||
#ifdef INT8_MMA_AVAILABLE
|
#ifdef INT8_MMA_AVAILABLE
|
||||||
x_qs[i*MMQ_MMA_TILE_X_K_Q4_0 + threadIdx.x] = get_int_from_uint8(bxi->qs, kqsx);
|
x_qs[i*MMQ_MMA_TILE_X_K_Q4_0 + threadIdx.x] = get_int_b2(bxi->qs, kqsx);
|
||||||
#else
|
#else
|
||||||
x_qs[i*(WARP_SIZE + 1) + threadIdx.x] = get_int_from_uint8(bxi->qs, kqsx);
|
x_qs[i*(WARP_SIZE + 1) + threadIdx.x] = get_int_b2(bxi->qs, kqsx);
|
||||||
#endif // INT8_MMA_AVAILABLE
|
#endif // INT8_MMA_AVAILABLE
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -344,9 +352,9 @@ template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinlin
|
|||||||
const block_q4_1 * bxi = (const block_q4_1 *) x + kbx0 + i*stride + kbx;
|
const block_q4_1 * bxi = (const block_q4_1 *) x + kbx0 + i*stride + kbx;
|
||||||
|
|
||||||
#ifdef INT8_MMA_AVAILABLE
|
#ifdef INT8_MMA_AVAILABLE
|
||||||
x_qs[i*MMQ_MMA_TILE_X_K_Q4_1 + threadIdx.x] = get_int_from_uint8_aligned(bxi->qs, kqsx);
|
x_qs[i*MMQ_MMA_TILE_X_K_Q4_1 + threadIdx.x] = get_int_b4(bxi->qs, kqsx);
|
||||||
#else
|
#else
|
||||||
x_qs[i*(WARP_SIZE + 1) + threadIdx.x] = get_int_from_uint8_aligned(bxi->qs, kqsx);
|
x_qs[i*(WARP_SIZE + 1) + threadIdx.x] = get_int_b4(bxi->qs, kqsx);
|
||||||
#endif // INT8_MMA_AVAILABLE
|
#endif // INT8_MMA_AVAILABLE
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -505,8 +513,8 @@ template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinlin
|
|||||||
|
|
||||||
const block_q5_0 * bxi = (const block_q5_0 *) x + kbx0 + i*stride + kbx;
|
const block_q5_0 * bxi = (const block_q5_0 *) x + kbx0 + i*stride + kbx;
|
||||||
|
|
||||||
const int ql = get_int_from_uint8(bxi->qs, kqsx);
|
const int ql = get_int_b2(bxi->qs, kqsx);
|
||||||
const int qh = get_int_from_uint8(bxi->qh, 0) >> (4 * (threadIdx.x % QI5_0));
|
const int qh = get_int_b2(bxi->qh, 0) >> (4 * (threadIdx.x % QI5_0));
|
||||||
|
|
||||||
int qs0 = (ql >> 0) & 0x0F0F0F0F;
|
int qs0 = (ql >> 0) & 0x0F0F0F0F;
|
||||||
qs0 |= (qh << 4) & 0x00000010; // 0 -> 4
|
qs0 |= (qh << 4) & 0x00000010; // 0 -> 4
|
||||||
@ -670,8 +678,8 @@ template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinlin
|
|||||||
|
|
||||||
const block_q5_1 * bxi = (const block_q5_1 *) x + kbx0 + i*stride + kbx;
|
const block_q5_1 * bxi = (const block_q5_1 *) x + kbx0 + i*stride + kbx;
|
||||||
|
|
||||||
const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx);
|
const int ql = get_int_b4(bxi->qs, kqsx);
|
||||||
const int qh = get_int_from_uint8_aligned(bxi->qh, 0) >> (4 * (threadIdx.x % QI5_1));
|
const int qh = get_int_b4(bxi->qh, 0) >> (4 * (threadIdx.x % QI5_1));
|
||||||
|
|
||||||
int qs0 = (ql >> 0) & 0x0F0F0F0F;
|
int qs0 = (ql >> 0) & 0x0F0F0F0F;
|
||||||
qs0 |= (qh << 4) & 0x00000010; // 0 -> 4
|
qs0 |= (qh << 4) & 0x00000010; // 0 -> 4
|
||||||
@ -835,9 +843,9 @@ template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinlin
|
|||||||
const block_q8_0 * bxi = (const block_q8_0 *) x + kbx0 + i*stride + kbx;
|
const block_q8_0 * bxi = (const block_q8_0 *) x + kbx0 + i*stride + kbx;
|
||||||
|
|
||||||
#ifdef INT8_MMA_AVAILABLE
|
#ifdef INT8_MMA_AVAILABLE
|
||||||
x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + threadIdx.x] = get_int_from_int8(bxi->qs, kqsx);
|
x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + threadIdx.x] = get_int_b2(bxi->qs, kqsx);
|
||||||
#else
|
#else
|
||||||
x_qs[i*(WARP_SIZE + 1) + threadIdx.x] = get_int_from_int8(bxi->qs, kqsx);
|
x_qs[i*(WARP_SIZE + 1) + threadIdx.x] = get_int_b2(bxi->qs, kqsx);
|
||||||
#endif // INT8_MMA_AVAILABLE
|
#endif // INT8_MMA_AVAILABLE
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -980,7 +988,7 @@ template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinlin
|
|||||||
|
|
||||||
const block_q2_K * bxi = (const block_q2_K *) x + kbx0 + i*stride + kbx;
|
const block_q2_K * bxi = (const block_q2_K *) x + kbx0 + i*stride + kbx;
|
||||||
|
|
||||||
const int x_ql_0 = get_int_from_uint8(bxi->qs, kqsx);
|
const int x_ql_0 = get_int_b2(bxi->qs, kqsx);
|
||||||
|
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int l = 0; l < QR2_K; ++l) {
|
for (int l = 0; l < QR2_K; ++l) {
|
||||||
@ -1162,8 +1170,8 @@ template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinlin
|
|||||||
|
|
||||||
const block_q3_K * bxi = (const block_q3_K *) x + kbx0 + i*stride + kbx;
|
const block_q3_K * bxi = (const block_q3_K *) x + kbx0 + i*stride + kbx;
|
||||||
|
|
||||||
const int x_ql_0 = get_int_from_uint8(bxi->qs, kqsx);
|
const int x_ql_0 = get_int_b2(bxi->qs, kqsx);
|
||||||
const int x_qh_0 = get_int_from_uint8(bxi->hmask, kqsx % (QI3_K/2)) >> (4 * (kqsx / (QI3_K/2)));
|
const int x_qh_0 = get_int_b2(bxi->hmask, kqsx % (QI3_K/2)) >> (4 * (kqsx / (QI3_K/2)));
|
||||||
|
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int l = 0; l < QR3_K; ++l) {
|
for (int l = 0; l < QR3_K; ++l) {
|
||||||
@ -1221,11 +1229,11 @@ template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinlin
|
|||||||
|
|
||||||
const int ksc_low = ksc % (QI3_K/8);
|
const int ksc_low = ksc % (QI3_K/8);
|
||||||
const int shift_low = 4 * (ksc / (QI3_K/8));
|
const int shift_low = 4 * (ksc / (QI3_K/8));
|
||||||
const int sc_low = (get_int_from_uint8(bxi->scales, ksc_low) >> shift_low) & 0x0F0F0F0F;
|
const int sc_low = (get_int_b2(bxi->scales, ksc_low) >> shift_low) & 0x0F0F0F0F;
|
||||||
|
|
||||||
const int ksc_high = QI3_K/8;
|
const int ksc_high = QI3_K/8;
|
||||||
const int shift_high = 2 * ksc;
|
const int shift_high = 2 * ksc;
|
||||||
const int sc_high = ((get_int_from_uint8(bxi->scales, ksc_high) >> shift_high) << 4) & 0x30303030;
|
const int sc_high = ((get_int_b2(bxi->scales, ksc_high) >> shift_high) << 4) & 0x30303030;
|
||||||
|
|
||||||
const int sc = __vsubss4(sc_low | sc_high, 0x20202020);
|
const int sc = __vsubss4(sc_low | sc_high, 0x20202020);
|
||||||
|
|
||||||
@ -1389,9 +1397,9 @@ template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinlin
|
|||||||
const block_q4_K * bxi = (const block_q4_K *) x + kbx0 + i*stride + kbx;
|
const block_q4_K * bxi = (const block_q4_K *) x + kbx0 + i*stride + kbx;
|
||||||
|
|
||||||
#ifdef INT8_MMA_AVAILABLE
|
#ifdef INT8_MMA_AVAILABLE
|
||||||
x_qs[i*MMQ_MMA_TILE_X_K_Q4_K + threadIdx.x] = get_int_from_uint8_aligned(bxi->qs, kqsx);
|
x_qs[i*MMQ_MMA_TILE_X_K_Q4_K + threadIdx.x] = get_int_b4(bxi->qs, kqsx);
|
||||||
#else
|
#else
|
||||||
x_qs[i*(WARP_SIZE + 1) + threadIdx.x] = get_int_from_uint8_aligned(bxi->qs, kqsx);
|
x_qs[i*(WARP_SIZE + 1) + threadIdx.x] = get_int_b4(bxi->qs, kqsx);
|
||||||
#endif // INT8_MMA_AVAILABLE
|
#endif // INT8_MMA_AVAILABLE
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1606,11 +1614,11 @@ template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinlin
|
|||||||
const block_q5_K * bxi = (const block_q5_K *) x + kbx0 + i*stride + kbx;
|
const block_q5_K * bxi = (const block_q5_K *) x + kbx0 + i*stride + kbx;
|
||||||
const int ky = QR5_K*kqsx;
|
const int ky = QR5_K*kqsx;
|
||||||
|
|
||||||
const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx);
|
const int ql = get_int_b4(bxi->qs, kqsx);
|
||||||
const int ql0 = (ql >> 0) & 0x0F0F0F0F;
|
const int ql0 = (ql >> 0) & 0x0F0F0F0F;
|
||||||
const int ql1 = (ql >> 4) & 0x0F0F0F0F;
|
const int ql1 = (ql >> 4) & 0x0F0F0F0F;
|
||||||
|
|
||||||
const int qh = get_int_from_uint8_aligned(bxi->qh, kqsx % (QI5_K/4));
|
const int qh = get_int_b4(bxi->qh, kqsx % (QI5_K/4));
|
||||||
const int qh0 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 0)) << 4) & 0x10101010;
|
const int qh0 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 0)) << 4) & 0x10101010;
|
||||||
const int qh1 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 1)) << 4) & 0x10101010;
|
const int qh1 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 1)) << 4) & 0x10101010;
|
||||||
|
|
||||||
@ -1828,11 +1836,11 @@ template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinlin
|
|||||||
const block_q6_K * bxi = (const block_q6_K *) x + kbx0 + i*stride + kbx;
|
const block_q6_K * bxi = (const block_q6_K *) x + kbx0 + i*stride + kbx;
|
||||||
const int ky = QR6_K*kqsx;
|
const int ky = QR6_K*kqsx;
|
||||||
|
|
||||||
const int ql = get_int_from_uint8(bxi->ql, kqsx);
|
const int ql = get_int_b2(bxi->ql, kqsx);
|
||||||
const int ql0 = (ql >> 0) & 0x0F0F0F0F;
|
const int ql0 = (ql >> 0) & 0x0F0F0F0F;
|
||||||
const int ql1 = (ql >> 4) & 0x0F0F0F0F;
|
const int ql1 = (ql >> 4) & 0x0F0F0F0F;
|
||||||
|
|
||||||
const int qh = get_int_from_uint8(bxi->qh, (QI6_K/4) * (kqsx / (QI6_K/2)) + kqsx % (QI6_K/4));
|
const int qh = get_int_b2(bxi->qh, (QI6_K/4) * (kqsx / (QI6_K/2)) + kqsx % (QI6_K/4));
|
||||||
const int qh0 = ((qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) << 4) & 0x30303030;
|
const int qh0 = ((qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) << 4) & 0x30303030;
|
||||||
const int qh1 = (qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) & 0x30303030;
|
const int qh1 = (qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) & 0x30303030;
|
||||||
|
|
||||||
@ -1879,9 +1887,9 @@ template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinlin
|
|||||||
const block_q6_K * bxi = (const block_q6_K *) x + kbx0 + i*stride + (threadIdx.x % (WARP_SIZE/8)) / 4;
|
const block_q6_K * bxi = (const block_q6_K *) x + kbx0 + i*stride + (threadIdx.x % (WARP_SIZE/8)) / 4;
|
||||||
|
|
||||||
#ifdef INT8_MMA_AVAILABLE
|
#ifdef INT8_MMA_AVAILABLE
|
||||||
x_sc[i*MMQ_MMA_TILE_X_K_Q6_K + threadIdx.x % (WARP_SIZE/8)] = get_int_from_int8(bxi->scales, threadIdx.x % (QI6_K/8));
|
x_sc[i*MMQ_MMA_TILE_X_K_Q6_K + threadIdx.x % (WARP_SIZE/8)] = get_int_b2(bxi->scales, threadIdx.x % (QI6_K/8));
|
||||||
#else
|
#else
|
||||||
x_sc[i*(WARP_SIZE/8) + i/8 + threadIdx.x % (WARP_SIZE/8)] = get_int_from_int8(bxi->scales, threadIdx.x % (QI6_K/8));
|
x_sc[i*(WARP_SIZE/8) + i/8 + threadIdx.x % (WARP_SIZE/8)] = get_int_b2(bxi->scales, threadIdx.x % (QI6_K/8));
|
||||||
#endif // INT8_MMA_AVAILABLE
|
#endif // INT8_MMA_AVAILABLE
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2014,6 +2022,124 @@ static __device__ __forceinline__ void vec_dot_q6_K_q8_1_mma(
|
|||||||
#endif // INT8_MMA_AVAILABLE
|
#endif // INT8_MMA_AVAILABLE
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_iq4_nl(
|
||||||
|
const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) {
|
||||||
|
|
||||||
|
#ifdef INT8_MMA_AVAILABLE
|
||||||
|
int * x_qs = (int *) x_tile;
|
||||||
|
float * x_df = (float *) (x_qs + WARP_SIZE*2);
|
||||||
|
#else
|
||||||
|
constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ4_NL, mmq_y);
|
||||||
|
int * x_qs = (int *) x_tile;
|
||||||
|
float * x_df = (float *) (x_qs + txs.qs);
|
||||||
|
#endif // INT8_MMA_AVAILABLE
|
||||||
|
|
||||||
|
const int kbx = threadIdx.x / QI4_NL;
|
||||||
|
const int kqsx = threadIdx.x % QI4_NL;
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
|
||||||
|
int i = i0 + threadIdx.y;
|
||||||
|
|
||||||
|
if (need_check) {
|
||||||
|
i = min(i, i_max);
|
||||||
|
}
|
||||||
|
|
||||||
|
const block_iq4_nl * bxi = (const block_iq4_nl *) x + kbx0 + i*stride + kbx;
|
||||||
|
|
||||||
|
const int aux_q4 = get_int_b2(bxi->qs, kqsx);
|
||||||
|
const int2 v = get_int_from_table_16(aux_q4);
|
||||||
|
const int k0 = 8 * (threadIdx.x / 4) + threadIdx.x % 4;
|
||||||
|
#ifdef INT8_MMA_AVAILABLE
|
||||||
|
x_qs[i*MMQ_MMA_TILE_X_K_Q5_0 + k0 + 0] = v.x;
|
||||||
|
x_qs[i*MMQ_MMA_TILE_X_K_Q5_0 + k0 + 4] = v.y;
|
||||||
|
#else
|
||||||
|
x_qs[i*(2*WARP_SIZE + 1) + k0 + 0] = v.x;
|
||||||
|
x_qs[i*(2*WARP_SIZE + 1) + k0 + 4] = v.y;
|
||||||
|
#endif // INT8_MMA_AVAILABLE
|
||||||
|
}
|
||||||
|
|
||||||
|
const int blocks_per_tile_x_row = WARP_SIZE / QI4_NL;
|
||||||
|
const int kbxd = threadIdx.x % blocks_per_tile_x_row;
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_NL) {
|
||||||
|
int i = i0 + threadIdx.y * QI4_NL + threadIdx.x / blocks_per_tile_x_row;
|
||||||
|
|
||||||
|
if (need_check) {
|
||||||
|
i = min(i, i_max);
|
||||||
|
}
|
||||||
|
|
||||||
|
const block_iq4_nl * bxi = (const block_iq4_nl *) x + kbx0 + i*stride + kbxd;
|
||||||
|
|
||||||
|
#ifdef INT8_MMA_AVAILABLE
|
||||||
|
x_df[i*MMQ_MMA_TILE_X_K_Q5_0 + kbxd] = __half2float(bxi->d);
|
||||||
|
#else
|
||||||
|
x_df[i*(WARP_SIZE/4) + i/4 + kbxd] = __half2float(bxi->d);
|
||||||
|
#endif // INT8_MMA_AVAILABLE
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_iq4_xs(
|
||||||
|
const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) {
|
||||||
|
|
||||||
|
#ifdef INT8_MMA_AVAILABLE
|
||||||
|
int * x_qs = (int *) x_tile;
|
||||||
|
float * x_df = (float *) (x_qs + WARP_SIZE*2);
|
||||||
|
#else
|
||||||
|
constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ4_XS, mmq_y);
|
||||||
|
int * x_qs = (int *) x_tile;
|
||||||
|
float * x_df = (float *) (x_qs + txs.qs);
|
||||||
|
#endif // INT8_MMA_AVAILABLE
|
||||||
|
|
||||||
|
const int kbx = 0; // threadIdx.x / QI4_XS
|
||||||
|
const int kqsx = threadIdx.x; // threadIdx.x % QI4_XS
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
|
||||||
|
int i = i0 + threadIdx.y;
|
||||||
|
|
||||||
|
if (need_check) {
|
||||||
|
i = min(i, i_max);
|
||||||
|
}
|
||||||
|
|
||||||
|
const block_iq4_xs * bxi = (const block_iq4_xs *) x + kbx0 + i*stride + kbx;
|
||||||
|
|
||||||
|
const int aux_q4 = get_int_b4(bxi->qs, kqsx);
|
||||||
|
const int2 v = get_int_from_table_16(aux_q4);
|
||||||
|
const int k0 = 8 * (threadIdx.x / 4) + threadIdx.x % 4;
|
||||||
|
#ifdef INT8_MMA_AVAILABLE
|
||||||
|
x_qs[i*MMQ_MMA_TILE_X_K_Q5_0 + k0 + 0] = v.x;
|
||||||
|
x_qs[i*MMQ_MMA_TILE_X_K_Q5_0 + k0 + 4] = v.y;
|
||||||
|
#else
|
||||||
|
x_qs[i*(2*WARP_SIZE + 1) + k0 + 0] = v.x;
|
||||||
|
x_qs[i*(2*WARP_SIZE + 1) + k0 + 4] = v.y;
|
||||||
|
#endif // INT8_MMA_AVAILABLE
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) {
|
||||||
|
int i = i0 + threadIdx.y * 4 + threadIdx.x / (WARP_SIZE/4);
|
||||||
|
|
||||||
|
if (need_check) {
|
||||||
|
i = min(i, i_max);
|
||||||
|
}
|
||||||
|
|
||||||
|
const block_iq4_xs * bxi = (const block_iq4_xs *) x + kbx0 + i*stride;
|
||||||
|
|
||||||
|
const float d = __half2float(bxi->d);
|
||||||
|
|
||||||
|
const int ls = ((bxi->scales_l[(threadIdx.x % 8)/2] >> (4*(threadIdx.x % 2))) & 0x0F)
|
||||||
|
| (((bxi->scales_h >> (2*(threadIdx.x % 8))) & 0x03) << 4);
|
||||||
|
|
||||||
|
#ifdef INT8_MMA_AVAILABLE
|
||||||
|
x_df[i*MMQ_MMA_TILE_X_K_Q5_0 + threadIdx.x % 8] = d * (ls - 32);
|
||||||
|
#else
|
||||||
|
x_df[i*(WARP_SIZE/4) + i/4 + threadIdx.x % 8] = d * (ls - 32);
|
||||||
|
#endif // INT8_MMA_AVAILABLE
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
template<int mmq_x, int mmq_y, int nwarps, bool need_check>
|
template<int mmq_x, int mmq_y, int nwarps, bool need_check>
|
||||||
static __device__ __forceinline__ void mmq_write_back_dp4a(
|
static __device__ __forceinline__ void mmq_write_back_dp4a(
|
||||||
const float * __restrict__ sum, float * __restrict__ dst, const int & stride, const int & i_max, const int & j_max) {
|
const float * __restrict__ sum, float * __restrict__ dst, const int & stride, const int & i_max, const int & j_max) {
|
||||||
@ -2163,6 +2289,22 @@ struct mmq_type_traits<mmq_x, mmq_y, nwarps, need_check, GGML_TYPE_Q6_K> {
|
|||||||
static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q6_K_q8_1_dp4a<mmq_x, mmq_y, nwarps>;
|
static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q6_K_q8_1_dp4a<mmq_x, mmq_y, nwarps>;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template <int mmq_x, int mmq_y, int nwarps, bool need_check>
|
||||||
|
struct mmq_type_traits<mmq_x, mmq_y, nwarps, need_check, GGML_TYPE_IQ4_NL> {
|
||||||
|
static constexpr int vdr = VDR_IQ4_NL_Q8_1_MMQ;
|
||||||
|
static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq4_nl<mmq_y, nwarps, need_check>;
|
||||||
|
static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q5_0_q8_1_mma<mmq_x, mmq_y, nwarps>;
|
||||||
|
static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q5_0_q8_1_dp4a<mmq_x, mmq_y, nwarps>;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <int mmq_x, int mmq_y, int nwarps, bool need_check>
|
||||||
|
struct mmq_type_traits<mmq_x, mmq_y, nwarps, need_check, GGML_TYPE_IQ4_XS> {
|
||||||
|
static constexpr int vdr = VDR_IQ4_XS_Q8_1_MMQ;
|
||||||
|
static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq4_xs<mmq_y, nwarps, need_check>;
|
||||||
|
static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q5_0_q8_1_mma<mmq_x, mmq_y, nwarps>;
|
||||||
|
static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q5_0_q8_1_dp4a<mmq_x, mmq_y, nwarps>;
|
||||||
|
};
|
||||||
|
|
||||||
static bool mmq_need_sum(const ggml_type type_x) {
|
static bool mmq_need_sum(const ggml_type type_x) {
|
||||||
switch (type_x) {
|
switch (type_x) {
|
||||||
case GGML_TYPE_Q4_0:
|
case GGML_TYPE_Q4_0:
|
||||||
@ -2180,6 +2322,8 @@ static bool mmq_need_sum(const ggml_type type_x) {
|
|||||||
case GGML_TYPE_Q5_K:
|
case GGML_TYPE_Q5_K:
|
||||||
return true;
|
return true;
|
||||||
case GGML_TYPE_Q6_K:
|
case GGML_TYPE_Q6_K:
|
||||||
|
case GGML_TYPE_IQ4_XS:
|
||||||
|
case GGML_TYPE_IQ4_NL:
|
||||||
return false;
|
return false;
|
||||||
default:
|
default:
|
||||||
GGML_ASSERT(false);
|
GGML_ASSERT(false);
|
||||||
@ -2301,8 +2445,11 @@ static __global__ void mul_mat_q(
|
|||||||
const int nty = (ne01 + mmq_y - 1) / mmq_y; // Number of tiles y
|
const int nty = (ne01 + mmq_y - 1) / mmq_y; // Number of tiles y
|
||||||
|
|
||||||
// kbc == k block continuous, current index in continuous ijk space.
|
// kbc == k block continuous, current index in continuous ijk space.
|
||||||
int64_t kbc = GGML_PAD((int64_t) blockIdx.x *blocks_per_ne00*ntx*nty / gridDim.x, blocks_per_warp);
|
int64_t kbc = (int64_t) blockIdx.x *blocks_per_ne00*ntx*nty / gridDim.x;
|
||||||
const int64_t kbc_stop = GGML_PAD((int64_t)(blockIdx.x + 1)*blocks_per_ne00*ntx*nty / gridDim.x, blocks_per_warp);
|
int64_t kbc_stop = (int64_t)(blockIdx.x + 1)*blocks_per_ne00*ntx*nty / gridDim.x;
|
||||||
|
|
||||||
|
kbc -= (kbc % blocks_per_ne00) % blocks_per_warp;
|
||||||
|
kbc_stop -= (kbc_stop % blocks_per_ne00) % blocks_per_warp;
|
||||||
|
|
||||||
// kb0 == k index when doing the matrix multiplication for an output tile.
|
// kb0 == k index when doing the matrix multiplication for an output tile.
|
||||||
int kb0_start = kbc % blocks_per_ne00;
|
int kb0_start = kbc % blocks_per_ne00;
|
||||||
@ -2358,8 +2505,11 @@ static __global__ void mul_mat_q_stream_k_fixup(
|
|||||||
const int bidx_stop = (blockIdx.y*nty + blockIdx.x + 1) * block_num_mmq / (gridDim.y*gridDim.x) + 1;
|
const int bidx_stop = (blockIdx.y*nty + blockIdx.x + 1) * block_num_mmq / (gridDim.y*gridDim.x) + 1;
|
||||||
|
|
||||||
for (int bidx = bidx_start; bidx < bidx_stop; ++bidx) {
|
for (int bidx = bidx_start; bidx < bidx_stop; ++bidx) {
|
||||||
const int64_t kbc = GGML_PAD((int64_t) bidx *blocks_per_ne00*ntx*nty / block_num_mmq, blocks_per_warp);
|
int64_t kbc = (int64_t) bidx *blocks_per_ne00*ntx*nty / block_num_mmq;
|
||||||
const int64_t kbc_stop = GGML_PAD((int64_t)(bidx + 1)*blocks_per_ne00*ntx*nty / block_num_mmq, blocks_per_warp);
|
int64_t kbc_stop = (int64_t)(bidx + 1)*blocks_per_ne00*ntx*nty / block_num_mmq;
|
||||||
|
|
||||||
|
kbc -= (kbc % blocks_per_ne00) % blocks_per_warp;
|
||||||
|
kbc_stop -= (kbc_stop % blocks_per_ne00) % blocks_per_warp;
|
||||||
|
|
||||||
// Skip fixup tile if the MMQ CUDA block never wrote anything to it:
|
// Skip fixup tile if the MMQ CUDA block never wrote anything to it:
|
||||||
if (kbc == kbc_stop || kbc_stop % blocks_per_ne00 == 0) {
|
if (kbc == kbc_stop || kbc_stop % blocks_per_ne00 == 0) {
|
||||||
@ -2598,6 +2748,8 @@ extern DECL_MMQ_CASE(GGML_TYPE_Q3_K);
|
|||||||
extern DECL_MMQ_CASE(GGML_TYPE_Q4_K);
|
extern DECL_MMQ_CASE(GGML_TYPE_Q4_K);
|
||||||
extern DECL_MMQ_CASE(GGML_TYPE_Q5_K);
|
extern DECL_MMQ_CASE(GGML_TYPE_Q5_K);
|
||||||
extern DECL_MMQ_CASE(GGML_TYPE_Q6_K);
|
extern DECL_MMQ_CASE(GGML_TYPE_Q6_K);
|
||||||
|
extern DECL_MMQ_CASE(GGML_TYPE_IQ4_NL);
|
||||||
|
extern DECL_MMQ_CASE(GGML_TYPE_IQ4_XS);
|
||||||
|
|
||||||
// -------------------------------------------------------------------------------------------------------------------------
|
// -------------------------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
@ -28,16 +28,22 @@ static constexpr __device__ vec_dot_q_cuda_t get_vec_dot_q_cuda(ggml_type type)
|
|||||||
|
|
||||||
static constexpr __device__ int get_vdr_mmvq(ggml_type type) {
|
static constexpr __device__ int get_vdr_mmvq(ggml_type type) {
|
||||||
return type == GGML_TYPE_Q4_0 ? VDR_Q4_0_Q8_1_MMVQ :
|
return type == GGML_TYPE_Q4_0 ? VDR_Q4_0_Q8_1_MMVQ :
|
||||||
type == GGML_TYPE_Q4_1 ? VDR_Q4_1_Q8_1_MMVQ :
|
type == GGML_TYPE_Q4_1 ? VDR_Q4_1_Q8_1_MMVQ :
|
||||||
type == GGML_TYPE_Q5_0 ? VDR_Q5_0_Q8_1_MMVQ :
|
type == GGML_TYPE_Q5_0 ? VDR_Q5_0_Q8_1_MMVQ :
|
||||||
type == GGML_TYPE_Q5_1 ? VDR_Q5_1_Q8_1_MMVQ :
|
type == GGML_TYPE_Q5_1 ? VDR_Q5_1_Q8_1_MMVQ :
|
||||||
type == GGML_TYPE_Q8_0 ? VDR_Q8_0_Q8_1_MMVQ :
|
type == GGML_TYPE_Q8_0 ? VDR_Q8_0_Q8_1_MMVQ :
|
||||||
type == GGML_TYPE_Q2_K ? VDR_Q2_K_Q8_1_MMVQ :
|
type == GGML_TYPE_Q2_K ? VDR_Q2_K_Q8_1_MMVQ :
|
||||||
type == GGML_TYPE_Q3_K ? VDR_Q3_K_Q8_1_MMVQ :
|
type == GGML_TYPE_Q3_K ? VDR_Q3_K_Q8_1_MMVQ :
|
||||||
type == GGML_TYPE_Q4_K ? VDR_Q4_K_Q8_1_MMVQ :
|
type == GGML_TYPE_Q4_K ? VDR_Q4_K_Q8_1_MMVQ :
|
||||||
type == GGML_TYPE_Q5_K ? VDR_Q5_K_Q8_1_MMVQ :
|
type == GGML_TYPE_Q5_K ? VDR_Q5_K_Q8_1_MMVQ :
|
||||||
type == GGML_TYPE_Q6_K ? VDR_Q6_K_Q8_1_MMVQ :
|
type == GGML_TYPE_Q6_K ? VDR_Q6_K_Q8_1_MMVQ :
|
||||||
type == GGML_TYPE_IQ4_NL ? VDR_Q4_K_Q8_1_MMVQ :
|
type == GGML_TYPE_IQ2_XXS ? VDR_IQ2_XXS_Q8_1_MMVQ :
|
||||||
|
type == GGML_TYPE_IQ2_XS ? VDR_IQ2_XS_Q8_1_MMVQ :
|
||||||
|
type == GGML_TYPE_IQ2_S ? VDR_IQ2_S_Q8_1_MMVQ :
|
||||||
|
type == GGML_TYPE_IQ3_XXS ? VDR_IQ3_XXS_Q8_1_MMVQ :
|
||||||
|
type == GGML_TYPE_IQ3_S ? VDR_IQ3_S_Q8_1_MMVQ :
|
||||||
|
type == GGML_TYPE_IQ4_NL ? VDR_IQ4_NL_Q8_1_MMVQ :
|
||||||
|
type == GGML_TYPE_IQ4_XS ? VDR_IQ4_XS_Q8_1_MMVQ :
|
||||||
1;
|
1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -22,7 +22,8 @@ SOURCE_FATTN_WMMA_CASE = "DECL_FATTN_WMMA_F16_CASE({head_size}, {cols_per_block}
|
|||||||
|
|
||||||
TYPES_MMQ = [
|
TYPES_MMQ = [
|
||||||
"GGML_TYPE_Q4_0", "GGML_TYPE_Q4_1", "GGML_TYPE_Q5_0", "GGML_TYPE_Q5_1", "GGML_TYPE_Q8_0",
|
"GGML_TYPE_Q4_0", "GGML_TYPE_Q4_1", "GGML_TYPE_Q5_0", "GGML_TYPE_Q5_1", "GGML_TYPE_Q8_0",
|
||||||
"GGML_TYPE_Q2_K", "GGML_TYPE_Q3_K", "GGML_TYPE_Q4_K", "GGML_TYPE_Q5_K", "GGML_TYPE_Q6_K"
|
"GGML_TYPE_Q2_K", "GGML_TYPE_Q3_K", "GGML_TYPE_Q4_K", "GGML_TYPE_Q5_K", "GGML_TYPE_Q6_K",
|
||||||
|
"GGML_TYPE_IQ4_NL", "GGML_TYPE_IQ4_XS"
|
||||||
]
|
]
|
||||||
|
|
||||||
SOURCE_MMQ = """// This file has been autogenerated by generate_cu_files.py, do not edit manually.
|
SOURCE_MMQ = """// This file has been autogenerated by generate_cu_files.py, do not edit manually.
|
||||||
|
@ -0,0 +1,5 @@
|
|||||||
|
// This file has been autogenerated by generate_cu_files.py, do not edit manually.
|
||||||
|
|
||||||
|
#include "../mmq.cuh"
|
||||||
|
|
||||||
|
DECL_MMQ_CASE(GGML_TYPE_IQ4_NL);
|
@ -0,0 +1,5 @@
|
|||||||
|
// This file has been autogenerated by generate_cu_files.py, do not edit manually.
|
||||||
|
|
||||||
|
#include "../mmq.cuh"
|
||||||
|
|
||||||
|
DECL_MMQ_CASE(GGML_TYPE_IQ4_XS);
|
File diff suppressed because it is too large
Load Diff
@ -6537,4 +6537,3 @@ template [[host_name("kernel_mul_mv_id_iq3_s_f32")]] kernel kernel_mul_mv_id_t
|
|||||||
template [[host_name("kernel_mul_mv_id_iq2_s_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq2_s_f32_impl>>;
|
template [[host_name("kernel_mul_mv_id_iq2_s_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq2_s_f32_impl>>;
|
||||||
template [[host_name("kernel_mul_mv_id_iq4_nl_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_nl_f32_impl>>;
|
template [[host_name("kernel_mul_mv_id_iq4_nl_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_nl_f32_impl>>;
|
||||||
template [[host_name("kernel_mul_mv_id_iq4_xs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_xs_f32_impl>>;
|
template [[host_name("kernel_mul_mv_id_iq4_xs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_xs_f32_impl>>;
|
||||||
|
|
||||||
|
@ -130,4 +130,3 @@ void iq3xs_free_impl(int grid_size);
|
|||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -19,5 +19,8 @@
|
|||||||
#include "dmmv.hpp"
|
#include "dmmv.hpp"
|
||||||
#include "mmq.hpp"
|
#include "mmq.hpp"
|
||||||
#include "mmvq.hpp"
|
#include "mmvq.hpp"
|
||||||
|
#include "rope.hpp"
|
||||||
|
#include "norm.hpp"
|
||||||
|
#include "softmax.hpp"
|
||||||
|
|
||||||
#endif // GGML_SYCL_BACKEND_HPP
|
#endif // GGML_SYCL_BACKEND_HPP
|
||||||
|
@ -47,10 +47,6 @@ static int g_ggml_sycl_debug = 0;
|
|||||||
} \
|
} \
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// #define DEBUG_SYCL_MALLOC
|
|
||||||
|
|
||||||
static int g_work_group_size = 0;
|
|
||||||
// typedef sycl::half ggml_fp16_t;
|
|
||||||
|
|
||||||
#define __SYCL_ARCH__ DPCT_COMPATIBILITY_TEMP
|
#define __SYCL_ARCH__ DPCT_COMPATIBILITY_TEMP
|
||||||
#define VER_4VEC 610 // todo for hardward optimize.
|
#define VER_4VEC 610 // todo for hardward optimize.
|
||||||
@ -193,6 +189,8 @@ struct ggml_sycl_device_info {
|
|||||||
sycl_device_info devices[GGML_SYCL_MAX_DEVICES] = {};
|
sycl_device_info devices[GGML_SYCL_MAX_DEVICES] = {};
|
||||||
|
|
||||||
std::array<float, GGML_SYCL_MAX_DEVICES> default_tensor_split = {};
|
std::array<float, GGML_SYCL_MAX_DEVICES> default_tensor_split = {};
|
||||||
|
|
||||||
|
int max_work_group_sizes[GGML_SYCL_MAX_DEVICES] = {0};
|
||||||
};
|
};
|
||||||
|
|
||||||
const ggml_sycl_device_info & ggml_sycl_info();
|
const ggml_sycl_device_info & ggml_sycl_info();
|
||||||
@ -295,5 +293,57 @@ struct ggml_backend_sycl_context {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// common device functions
|
||||||
|
|
||||||
|
static __dpct_inline__ float warp_reduce_sum(float x,
|
||||||
|
const sycl::nd_item<3>& item_ct1) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
|
||||||
|
/*
|
||||||
|
DPCT1096:98: The right-most dimension of the work-group used in the SYCL
|
||||||
|
kernel that calls this function may be less than "32". The function
|
||||||
|
"dpct::permute_sub_group_by_xor" may return an unexpected result on the
|
||||||
|
CPU device. Modify the size of the work-group to ensure that the value
|
||||||
|
of the right-most dimension is a multiple of "32".
|
||||||
|
*/
|
||||||
|
x += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), x, mask);
|
||||||
|
}
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __dpct_inline__ sycl::float2
|
||||||
|
warp_reduce_sum(sycl::float2 a, const sycl::nd_item<3>& item_ct1) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
|
||||||
|
a.x() += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), a.x(),
|
||||||
|
mask);
|
||||||
|
a.y() += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), a.y(),
|
||||||
|
mask);
|
||||||
|
}
|
||||||
|
return a;
|
||||||
|
}
|
||||||
|
|
||||||
|
static __dpct_inline__ float warp_reduce_max(float x,
|
||||||
|
const sycl::nd_item<3>& item_ct1) {
|
||||||
|
#pragma unroll
|
||||||
|
for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
|
||||||
|
/*
|
||||||
|
DPCT1096:97: The right-most dimension of the work-group used in the SYCL
|
||||||
|
kernel that calls this function may be less than "32". The function
|
||||||
|
"dpct::permute_sub_group_by_xor" may return an unexpected result on the
|
||||||
|
CPU device. Modify the size of the work-group to ensure that the value
|
||||||
|
of the right-most dimension is a multiple of "32".
|
||||||
|
*/
|
||||||
|
x = sycl::fmax(x, dpct::permute_sub_group_by_xor(
|
||||||
|
item_ct1.get_sub_group(), x, mask));
|
||||||
|
}
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper for vec loading aligned data
|
||||||
|
template <typename Tp, int n>
|
||||||
|
inline sycl::vec<Tp, n> vec_aligned_load(const Tp* aligned_ptr) {
|
||||||
|
return *reinterpret_cast<const sycl::vec<Tp, n>*>(aligned_ptr);
|
||||||
|
}
|
||||||
|
|
||||||
#endif // GGML_SYCL_COMMON_HPP
|
#endif // GGML_SYCL_COMMON_HPP
|
||||||
|
@ -152,12 +152,15 @@ static void dequantize_row_q4_K_sycl(const void *vx, dst_t *y, const int k,
|
|||||||
dpct::has_capability_or_fail(stream->get_device(),
|
dpct::has_capability_or_fail(stream->get_device(),
|
||||||
{sycl::aspect::fp16});
|
{sycl::aspect::fp16});
|
||||||
|
|
||||||
stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
|
stream->submit([&](sycl::handler &cgh) {
|
||||||
|
sycl::local_accessor<uint8_t, 1> scale_local_acc(sycl::range<1>(12), cgh);
|
||||||
|
cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
|
||||||
sycl::range<3>(1, 1, 32),
|
sycl::range<3>(1, 1, 32),
|
||||||
sycl::range<3>(1, 1, 32)),
|
sycl::range<3>(1, 1, 32)),
|
||||||
[=](sycl::nd_item<3> item_ct1) {
|
[=](sycl::nd_item<3> item_ct1) {
|
||||||
dequantize_block_q4_K(vx, y, item_ct1);
|
dequantize_block_q4_K(vx, y, scale_local_acc.get_pointer(), item_ct1);
|
||||||
});
|
});
|
||||||
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -293,7 +293,8 @@ static void dequantize_block_q3_K(const void * __restrict__ vx, dst_t * __restri
|
|||||||
#if QK_K == 256
|
#if QK_K == 256
|
||||||
static inline void get_scale_min_k4(int j, const uint8_t * q, uint8_t & d, uint8_t & m) {
|
static inline void get_scale_min_k4(int j, const uint8_t * q, uint8_t & d, uint8_t & m) {
|
||||||
if (j < 4) {
|
if (j < 4) {
|
||||||
d = q[j] & 63; m = q[j + 4] & 63;
|
d = q[j] & 63;
|
||||||
|
m = q[j + 4] & 63;
|
||||||
} else {
|
} else {
|
||||||
d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
|
d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
|
||||||
m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
|
m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
|
||||||
@ -303,7 +304,7 @@ static inline void get_scale_min_k4(int j, const uint8_t * q, uint8_t & d, uint8
|
|||||||
|
|
||||||
template<typename dst_t>
|
template<typename dst_t>
|
||||||
static void dequantize_block_q4_K(const void * __restrict__ vx, dst_t * __restrict__ yy,
|
static void dequantize_block_q4_K(const void * __restrict__ vx, dst_t * __restrict__ yy,
|
||||||
const sycl::nd_item<3> &item_ct1) {
|
uint8_t* scales_local, const sycl::nd_item<3> &item_ct1) {
|
||||||
const block_q4_K * x = (const block_q4_K *) vx;
|
const block_q4_K * x = (const block_q4_K *) vx;
|
||||||
|
|
||||||
const int i = item_ct1.get_group(2);
|
const int i = item_ct1.get_group(2);
|
||||||
@ -318,19 +319,26 @@ static void dequantize_block_q4_K(const void * __restrict__ vx, dst_t * __restri
|
|||||||
|
|
||||||
dst_t * y = yy + i*QK_K + 64*il + n*ir;
|
dst_t * y = yy + i*QK_K + 64*il + n*ir;
|
||||||
|
|
||||||
const float dall = x[i].dm[0];
|
const sycl::half2 dm = x[i].dm;
|
||||||
const float dmin = x[i].dm[1];
|
const float dall = dm[0];
|
||||||
|
const float dmin = dm[1];
|
||||||
|
|
||||||
const uint8_t * q = x[i].qs + 32*il + n*ir;
|
if (tid < 12)
|
||||||
|
scales_local[tid] = x[i].scales[tid];
|
||||||
|
item_ct1.barrier(sycl::access::fence_space::local_space);
|
||||||
|
|
||||||
uint8_t sc, m;
|
uint8_t sc, m;
|
||||||
get_scale_min_k4(is + 0, x[i].scales, sc, m);
|
get_scale_min_k4(is + 0, scales_local, sc, m);
|
||||||
const float d1 = dall * sc; const float m1 = dmin * m;
|
const float d1 = dall * sc;
|
||||||
get_scale_min_k4(is + 1, x[i].scales, sc, m);
|
const float m1 = dmin * m;
|
||||||
const float d2 = dall * sc; const float m2 = dmin * m;
|
get_scale_min_k4(is + 1, scales_local, sc, m);
|
||||||
|
const float d2 = dall * sc;
|
||||||
|
const float m2 = dmin * m;
|
||||||
|
|
||||||
|
sycl::vec<uint8_t, n> q_vec = vec_aligned_load<uint8_t, n>(x[i].qs + 32*il + n*ir);
|
||||||
for (int l = 0; l < n; ++l) {
|
for (int l = 0; l < n; ++l) {
|
||||||
y[l + 0] = d1 * (q[l] & 0xF) - m1;
|
y[l + 0] = d1 * (q_vec[l] & 0xF) - m1;
|
||||||
y[l +32] = d2 * (q[l] >> 4) - m2;
|
y[l +32] = d2 * (q_vec[l] >> 4) - m2;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
const int tid = item_ct1.get_local_id(2);
|
const int tid = item_ct1.get_local_id(2);
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include "dequantize.hpp"
|
#include "dequantize.hpp"
|
||||||
#include "presets.hpp"
|
#include "presets.hpp"
|
||||||
|
|
||||||
|
|
||||||
static void convert_f16(const void * vx, const int ib, const int iqs, dfloat2 & v){
|
static void convert_f16(const void * vx, const int ib, const int iqs, dfloat2 & v){
|
||||||
const sycl::half *x = (const sycl::half *)vx;
|
const sycl::half *x = (const sycl::half *)vx;
|
||||||
|
|
||||||
@ -76,7 +77,7 @@ static void dequantize_mul_mat_vec(const void * __restrict__ vx, const dfloat *
|
|||||||
|
|
||||||
// sum up partial sums and write back result
|
// sum up partial sums and write back result
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
|
||||||
tmp +=
|
tmp +=
|
||||||
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
|
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
|
||||||
}
|
}
|
||||||
@ -104,7 +105,7 @@ static void convert_mul_mat_vec_f16_sycl(const void *vx, const dfloat *y,
|
|||||||
|
|
||||||
stream->parallel_for(
|
stream->parallel_for(
|
||||||
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
||||||
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
|
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
|
||||||
dequantize_mul_mat_vec<1, 1, convert_f16>(vx, y, dst, ncols,
|
dequantize_mul_mat_vec<1, 1, convert_f16>(vx, y, dst, ncols,
|
||||||
nrows, item_ct1);
|
nrows, item_ct1);
|
||||||
});
|
});
|
||||||
@ -227,7 +228,7 @@ static void dequantize_mul_mat_vec_q2_k(const void *__restrict__ vx,
|
|||||||
|
|
||||||
// sum up partial sums and write back result
|
// sum up partial sums and write back result
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) {
|
||||||
tmp +=
|
tmp +=
|
||||||
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
|
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
|
||||||
}
|
}
|
||||||
@ -346,7 +347,7 @@ static void dequantize_mul_mat_vec_q3_k(const void *__restrict__ vx,
|
|||||||
|
|
||||||
// sum up partial sums and write back result
|
// sum up partial sums and write back result
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) {
|
||||||
tmp +=
|
tmp +=
|
||||||
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
|
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
|
||||||
}
|
}
|
||||||
@ -499,7 +500,7 @@ static void dequantize_mul_mat_vec_q4_k(const void *__restrict__ vx,
|
|||||||
|
|
||||||
// sum up partial sums and write back result
|
// sum up partial sums and write back result
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) {
|
||||||
tmp +=
|
tmp +=
|
||||||
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
|
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
|
||||||
}
|
}
|
||||||
@ -633,7 +634,7 @@ static void dequantize_mul_mat_vec_q5_k(const void *__restrict__ vx,
|
|||||||
|
|
||||||
// sum up partial sums and write back result
|
// sum up partial sums and write back result
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) {
|
||||||
tmp +=
|
tmp +=
|
||||||
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
|
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
|
||||||
}
|
}
|
||||||
@ -748,7 +749,7 @@ static void dequantize_mul_mat_vec_q6_k(const void * __restrict__ vx, const floa
|
|||||||
|
|
||||||
// sum up partial sums and write back result
|
// sum up partial sums and write back result
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) {
|
||||||
tmp +=
|
tmp +=
|
||||||
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
|
dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
|
||||||
}
|
}
|
||||||
@ -774,7 +775,7 @@ static void dequantize_mul_mat_vec_q4_0_sycl(const void *vx, const dfloat *y,
|
|||||||
|
|
||||||
stream->parallel_for(
|
stream->parallel_for(
|
||||||
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
||||||
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
|
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
|
||||||
dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>(
|
dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>(
|
||||||
vx, y, dst, ncols, nrows, item_ct1);
|
vx, y, dst, ncols, nrows, item_ct1);
|
||||||
});
|
});
|
||||||
@ -795,7 +796,7 @@ static void dequantize_mul_mat_vec_q4_1_sycl(const void *vx, const dfloat *y,
|
|||||||
|
|
||||||
stream->parallel_for(
|
stream->parallel_for(
|
||||||
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
||||||
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
|
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
|
||||||
dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>(
|
dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>(
|
||||||
vx, y, dst, ncols, nrows, item_ct1);
|
vx, y, dst, ncols, nrows, item_ct1);
|
||||||
});
|
});
|
||||||
@ -816,7 +817,7 @@ static void dequantize_mul_mat_vec_q5_0_sycl(const void *vx, const dfloat *y,
|
|||||||
|
|
||||||
stream->parallel_for(
|
stream->parallel_for(
|
||||||
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
||||||
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
|
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
|
||||||
dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>(
|
dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>(
|
||||||
vx, y, dst, ncols, nrows, item_ct1);
|
vx, y, dst, ncols, nrows, item_ct1);
|
||||||
});
|
});
|
||||||
@ -837,7 +838,7 @@ static void dequantize_mul_mat_vec_q5_1_sycl(const void *vx, const dfloat *y,
|
|||||||
|
|
||||||
stream->parallel_for(
|
stream->parallel_for(
|
||||||
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
||||||
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
|
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
|
||||||
dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>(
|
dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>(
|
||||||
vx, y, dst, ncols, nrows, item_ct1);
|
vx, y, dst, ncols, nrows, item_ct1);
|
||||||
});
|
});
|
||||||
@ -858,7 +859,7 @@ static void dequantize_mul_mat_vec_q8_0_sycl(const void *vx, const dfloat *y,
|
|||||||
|
|
||||||
stream->parallel_for(
|
stream->parallel_for(
|
||||||
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
||||||
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
|
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(WARP_SIZE)]] {
|
||||||
dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>(
|
dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>(
|
||||||
vx, y, dst, ncols, nrows, item_ct1);
|
vx, y, dst, ncols, nrows, item_ct1);
|
||||||
});
|
});
|
||||||
@ -873,10 +874,10 @@ static void dequantize_mul_mat_vec_q2_K_sycl(const void *vx, const float *y,
|
|||||||
const int ny = 2; // very slightly faster than 1 even when K_QUANTS_PER_ITERATION = 2
|
const int ny = 2; // very slightly faster than 1 even when K_QUANTS_PER_ITERATION = 2
|
||||||
const int block_num_y = (nrows + ny - 1) / ny;
|
const int block_num_y = (nrows + ny - 1) / ny;
|
||||||
const sycl::range<3> block_nums(1, 1, block_num_y);
|
const sycl::range<3> block_nums(1, 1, block_num_y);
|
||||||
const sycl::range<3> block_dims(1, ny, 32);
|
const sycl::range<3> block_dims(1, ny, QK_WARP_SIZE);
|
||||||
stream->parallel_for(
|
stream->parallel_for(
|
||||||
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
||||||
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
|
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] {
|
||||||
dequantize_mul_mat_vec_q2_k(vx, y, dst, ncols, nrows, item_ct1);
|
dequantize_mul_mat_vec_q2_k(vx, y, dst, ncols, nrows, item_ct1);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -889,10 +890,10 @@ static void dequantize_mul_mat_vec_q3_K_sycl(const void *vx, const float *y,
|
|||||||
const int ny = 2 / K_QUANTS_PER_ITERATION;
|
const int ny = 2 / K_QUANTS_PER_ITERATION;
|
||||||
const int block_num_y = (nrows + ny - 1) / ny;
|
const int block_num_y = (nrows + ny - 1) / ny;
|
||||||
const sycl::range<3> block_nums(1, 1, block_num_y);
|
const sycl::range<3> block_nums(1, 1, block_num_y);
|
||||||
const sycl::range<3> block_dims(1, ny, 32);
|
const sycl::range<3> block_dims(1, ny, QK_WARP_SIZE);
|
||||||
stream->parallel_for(
|
stream->parallel_for(
|
||||||
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
||||||
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
|
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] {
|
||||||
dequantize_mul_mat_vec_q3_k(vx, y, dst, ncols, nrows, item_ct1);
|
dequantize_mul_mat_vec_q3_k(vx, y, dst, ncols, nrows, item_ct1);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -905,10 +906,10 @@ static void dequantize_mul_mat_vec_q4_K_sycl(const void *vx, const float *y,
|
|||||||
const int ny = 2 / K_QUANTS_PER_ITERATION;
|
const int ny = 2 / K_QUANTS_PER_ITERATION;
|
||||||
const int block_num_y = (nrows + ny - 1) / ny;
|
const int block_num_y = (nrows + ny - 1) / ny;
|
||||||
const sycl::range<3> block_nums(1, 1, block_num_y);
|
const sycl::range<3> block_nums(1, 1, block_num_y);
|
||||||
const sycl::range<3> block_dims(1, ny, 32);
|
const sycl::range<3> block_dims(1, ny, QK_WARP_SIZE);
|
||||||
stream->parallel_for(
|
stream->parallel_for(
|
||||||
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
||||||
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
|
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] {
|
||||||
dequantize_mul_mat_vec_q4_k(vx, y, dst, ncols, nrows, item_ct1);
|
dequantize_mul_mat_vec_q4_k(vx, y, dst, ncols, nrows, item_ct1);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -918,10 +919,10 @@ static void dequantize_mul_mat_vec_q5_K_sycl(const void *vx, const float *y,
|
|||||||
const int nrows,
|
const int nrows,
|
||||||
dpct::queue_ptr stream) {
|
dpct::queue_ptr stream) {
|
||||||
GGML_ASSERT(ncols % QK_K == 0);
|
GGML_ASSERT(ncols % QK_K == 0);
|
||||||
const sycl::range<3> block_dims(1, 1, 32);
|
const sycl::range<3> block_dims(1, 1, QK_WARP_SIZE);
|
||||||
stream->parallel_for(
|
stream->parallel_for(
|
||||||
sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims, block_dims),
|
sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims, block_dims),
|
||||||
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
|
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] {
|
||||||
dequantize_mul_mat_vec_q5_k(vx, y, dst, ncols, item_ct1);
|
dequantize_mul_mat_vec_q5_k(vx, y, dst, ncols, item_ct1);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -934,10 +935,10 @@ static void dequantize_mul_mat_vec_q6_K_sycl(const void *vx, const float *y,
|
|||||||
const int ny = 2 / K_QUANTS_PER_ITERATION;
|
const int ny = 2 / K_QUANTS_PER_ITERATION;
|
||||||
const int block_num_y = (nrows + ny - 1) / ny;
|
const int block_num_y = (nrows + ny - 1) / ny;
|
||||||
const sycl::range<3> block_nums(1, 1, block_num_y);
|
const sycl::range<3> block_nums(1, 1, block_num_y);
|
||||||
const sycl::range<3> block_dims(1, ny, 32);
|
const sycl::range<3> block_dims(1, ny, QK_WARP_SIZE);
|
||||||
stream->parallel_for(
|
stream->parallel_for(
|
||||||
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
||||||
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
|
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(QK_WARP_SIZE)]] {
|
||||||
dequantize_mul_mat_vec_q6_k(vx, y, dst, ncols, nrows, item_ct1);
|
dequantize_mul_mat_vec_q6_k(vx, y, dst, ncols, nrows, item_ct1);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -255,7 +255,7 @@ namespace dpct
|
|||||||
void set_pitch(size_t pitch) { _pitch = pitch; }
|
void set_pitch(size_t pitch) { _pitch = pitch; }
|
||||||
|
|
||||||
size_t get_x() { return _x; }
|
size_t get_x() { return _x; }
|
||||||
void set_x(size_t x) { _x = x; };
|
void set_x(size_t x) { _x = x; }
|
||||||
|
|
||||||
size_t get_y() { return _y; }
|
size_t get_y() { return _y; }
|
||||||
void set_y(size_t y) { _y = y; }
|
void set_y(size_t y) { _y = y; }
|
||||||
@ -1056,7 +1056,7 @@ namespace dpct
|
|||||||
#error "Only support Windows and Linux."
|
#error "Only support Windows and Linux."
|
||||||
#endif
|
#endif
|
||||||
next_free = mapped_address_space;
|
next_free = mapped_address_space;
|
||||||
};
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
using buffer_id_t = int;
|
using buffer_id_t = int;
|
||||||
@ -1077,7 +1077,7 @@ namespace dpct
|
|||||||
#else
|
#else
|
||||||
#error "Only support Windows and Linux."
|
#error "Only support Windows and Linux."
|
||||||
#endif
|
#endif
|
||||||
};
|
}
|
||||||
|
|
||||||
mem_mgr(const mem_mgr &) = delete;
|
mem_mgr(const mem_mgr &) = delete;
|
||||||
mem_mgr &operator=(const mem_mgr &) = delete;
|
mem_mgr &operator=(const mem_mgr &) = delete;
|
||||||
@ -2426,6 +2426,7 @@ namespace dpct
|
|||||||
b, ldb, beta, c, ldc, batch_size);
|
b, ldb, beta, c, ldc, batch_size);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
case detail::get_type_combination_id(
|
case detail::get_type_combination_id(
|
||||||
library_data_t::real_int8, library_data_t::real_int8,
|
library_data_t::real_int8, library_data_t::real_int8,
|
||||||
library_data_t::real_int32, library_data_t::real_int32):
|
library_data_t::real_int32, library_data_t::real_int32):
|
||||||
@ -2458,7 +2459,6 @@ namespace dpct
|
|||||||
batch_size);
|
batch_size);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
case detail::get_type_combination_id(
|
case detail::get_type_combination_id(
|
||||||
library_data_t::real_half, library_data_t::real_half,
|
library_data_t::real_half, library_data_t::real_half,
|
||||||
library_data_t::real_half, library_data_t::real_float):
|
library_data_t::real_half, library_data_t::real_float):
|
||||||
@ -2595,6 +2595,7 @@ namespace dpct
|
|||||||
stride_c, batch_size);
|
stride_c, batch_size);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
case detail::get_type_combination_id(
|
case detail::get_type_combination_id(
|
||||||
library_data_t::real_int8, library_data_t::real_int8,
|
library_data_t::real_int8, library_data_t::real_int8,
|
||||||
library_data_t::real_int32, library_data_t::real_int32):
|
library_data_t::real_int32, library_data_t::real_int32):
|
||||||
@ -2623,7 +2624,6 @@ namespace dpct
|
|||||||
beta, c, ldc, stride_c, batch_size);
|
beta, c, ldc, stride_c, batch_size);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
case detail::get_type_combination_id(
|
case detail::get_type_combination_id(
|
||||||
library_data_t::real_half, library_data_t::real_half,
|
library_data_t::real_half, library_data_t::real_half,
|
||||||
library_data_t::real_half, library_data_t::real_float):
|
library_data_t::real_half, library_data_t::real_float):
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user