mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 19:34:35 +00:00
Merge branch 'master' into xsn/fix_lora
This commit is contained in:
commit
e68344cb06
@ -89,6 +89,22 @@ let
|
|||||||
ps.tiktoken
|
ps.tiktoken
|
||||||
ps.torchWithoutCuda
|
ps.torchWithoutCuda
|
||||||
ps.transformers
|
ps.transformers
|
||||||
|
|
||||||
|
# server bench
|
||||||
|
ps.matplotlib
|
||||||
|
|
||||||
|
# server tests
|
||||||
|
ps.openai
|
||||||
|
ps.behave
|
||||||
|
ps.prometheus-client
|
||||||
|
|
||||||
|
# for examples/pydantic-models-to-grammar-examples.py
|
||||||
|
ps.docstring-parser
|
||||||
|
ps.pydantic
|
||||||
|
|
||||||
|
# for scripts/compare-llama-bench.py
|
||||||
|
ps.gitpython
|
||||||
|
ps.tabulate
|
||||||
]
|
]
|
||||||
);
|
);
|
||||||
|
|
||||||
|
4
.github/labeler.yml
vendored
4
.github/labeler.yml
vendored
@ -16,7 +16,9 @@ SYCL:
|
|||||||
- any-glob-to-any-file:
|
- any-glob-to-any-file:
|
||||||
- ggml/include/ggml-sycl.h
|
- ggml/include/ggml-sycl.h
|
||||||
- ggml/src/ggml-sycl.cpp
|
- ggml/src/ggml-sycl.cpp
|
||||||
- README-sycl.md
|
- ggml/src/ggml-sycl/**
|
||||||
|
- docs/backend/SYCL.md
|
||||||
|
- examples/sycl/**
|
||||||
Nvidia GPU:
|
Nvidia GPU:
|
||||||
- changed-files:
|
- changed-files:
|
||||||
- any-glob-to-any-file:
|
- any-glob-to-any-file:
|
||||||
|
38
.github/workflows/python-type-check.yml
vendored
Normal file
38
.github/workflows/python-type-check.yml
vendored
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
name: Python Type-Check
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths:
|
||||||
|
- '.github/workflows/python-type-check.yml'
|
||||||
|
- '**.py'
|
||||||
|
- '**/requirements*.txt'
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- '.github/workflows/python-type-check.yml'
|
||||||
|
- '**.py'
|
||||||
|
- '**/requirements*.txt'
|
||||||
|
|
||||||
|
concurrency:
|
||||||
|
group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
python-type-check:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: pyright type-check
|
||||||
|
steps:
|
||||||
|
- name: Check out source repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Set up Python environment
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: "3.11"
|
||||||
|
- name: Install Python dependencies
|
||||||
|
# TODO: use a venv
|
||||||
|
run: pip install -r requirements/requirements-all.txt
|
||||||
|
- name: Type-check with Pyright
|
||||||
|
uses: jakebailey/pyright-action@v2
|
||||||
|
with:
|
||||||
|
version: 1.1.370
|
||||||
|
level: warning
|
||||||
|
warnings: true
|
@ -50,9 +50,6 @@ endif()
|
|||||||
# option list
|
# option list
|
||||||
#
|
#
|
||||||
|
|
||||||
# general
|
|
||||||
option(LLAMA_CCACHE "llama: use ccache if available" ON)
|
|
||||||
|
|
||||||
# debug
|
# debug
|
||||||
option(LLAMA_ALL_WARNINGS "llama: enable all compiler warnings" ON)
|
option(LLAMA_ALL_WARNINGS "llama: enable all compiler warnings" ON)
|
||||||
option(LLAMA_ALL_WARNINGS_3RD_PARTY "llama: enable all compiler warnings in 3rd party libs" OFF)
|
option(LLAMA_ALL_WARNINGS_3RD_PARTY "llama: enable all compiler warnings in 3rd party libs" OFF)
|
||||||
@ -77,7 +74,6 @@ option(LLAMA_CURL "llama: use libcurl to download model from an URL" OFF)
|
|||||||
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info.cmake)
|
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info.cmake)
|
||||||
|
|
||||||
# override ggml options
|
# override ggml options
|
||||||
set(GGML_CCACHE ${LLAMA_CCACHE})
|
|
||||||
set(GGML_SANITIZE_THREAD ${LLAMA_SANITIZE_THREAD})
|
set(GGML_SANITIZE_THREAD ${LLAMA_SANITIZE_THREAD})
|
||||||
set(GGML_SANITIZE_ADDRESS ${LLAMA_SANITIZE_ADDRESS})
|
set(GGML_SANITIZE_ADDRESS ${LLAMA_SANITIZE_ADDRESS})
|
||||||
set(GGML_SANITIZE_UNDEFINED ${LLAMA_SANITIZE_UNDEFINED})
|
set(GGML_SANITIZE_UNDEFINED ${LLAMA_SANITIZE_UNDEFINED})
|
||||||
@ -115,7 +111,10 @@ llama_option_depr(WARNING LLAMA_SYCL_F16 GGML_SYCL_F16)
|
|||||||
# build the library
|
# build the library
|
||||||
#
|
#
|
||||||
|
|
||||||
add_subdirectory(ggml)
|
if (NOT TARGET ggml)
|
||||||
|
add_subdirectory(ggml)
|
||||||
|
# ... otherwise assume ggml is added by a parent CMakeLists.txt
|
||||||
|
endif()
|
||||||
add_subdirectory(src)
|
add_subdirectory(src)
|
||||||
|
|
||||||
#
|
#
|
||||||
|
100
Makefile
100
Makefile
@ -64,10 +64,14 @@ TEST_TARGETS = \
|
|||||||
tests/test-tokenizer-1-spm
|
tests/test-tokenizer-1-spm
|
||||||
|
|
||||||
# Legacy build targets that were renamed in #7809, but should still be removed when the project is cleaned
|
# Legacy build targets that were renamed in #7809, but should still be removed when the project is cleaned
|
||||||
LEGACY_TARGETS = main quantize quantize-stats perplexity imatrix embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \
|
LEGACY_TARGETS_CLEAN = main quantize quantize-stats perplexity imatrix embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \
|
||||||
simple batched batched-bench save-load-state server gguf gguf-split eval-callback llama-bench libllava.a llava-cli baby-llama \
|
simple batched batched-bench save-load-state server gguf gguf-split eval-callback llama-bench libllava.a llava-cli baby-llama \
|
||||||
retrieval speculative infill tokenize benchmark-matmult parallel finetune export-lora lookahead lookup passkey gritlm
|
retrieval speculative infill tokenize benchmark-matmult parallel finetune export-lora lookahead lookup passkey gritlm
|
||||||
|
|
||||||
|
# Legacy build targets that were renamed in #7809, but we want to build binaries that for them that output a deprecation warning if people try to use them.
|
||||||
|
# We don't want to clutter things too much, so we only build replacements for the most commonly used binaries.
|
||||||
|
LEGACY_TARGETS_BUILD = main quantize perplexity embedding server finetune
|
||||||
|
|
||||||
# Deprecation aliases
|
# Deprecation aliases
|
||||||
ifdef LLAMA_CUBLAS
|
ifdef LLAMA_CUBLAS
|
||||||
$(error LLAMA_CUBLAS is removed. Use GGML_CUDA instead.)
|
$(error LLAMA_CUBLAS is removed. Use GGML_CUDA instead.)
|
||||||
@ -193,7 +197,7 @@ ifdef GGML_RPC
|
|||||||
BUILD_TARGETS += rpc-server
|
BUILD_TARGETS += rpc-server
|
||||||
endif
|
endif
|
||||||
|
|
||||||
default: $(BUILD_TARGETS)
|
default: $(BUILD_TARGETS) $(LEGACY_TARGETS_BUILD)
|
||||||
|
|
||||||
test: $(TEST_TARGETS)
|
test: $(TEST_TARGETS)
|
||||||
@failures=0; \
|
@failures=0; \
|
||||||
@ -228,7 +232,7 @@ test: $(TEST_TARGETS)
|
|||||||
fi
|
fi
|
||||||
@echo 'All tests passed.'
|
@echo 'All tests passed.'
|
||||||
|
|
||||||
all: $(BUILD_TARGETS) $(TEST_TARGETS)
|
all: $(BUILD_TARGETS) $(TEST_TARGETS) $(LEGACY_TARGETS_BUILD)
|
||||||
|
|
||||||
ifdef RISCV_CROSS_COMPILE
|
ifdef RISCV_CROSS_COMPILE
|
||||||
CC := riscv64-unknown-linux-gnu-gcc
|
CC := riscv64-unknown-linux-gnu-gcc
|
||||||
@ -245,17 +249,22 @@ MK_CFLAGS = -std=c11 -fPIC
|
|||||||
MK_CXXFLAGS = -std=c++11 -fPIC
|
MK_CXXFLAGS = -std=c++11 -fPIC
|
||||||
MK_NVCCFLAGS = -std=c++11
|
MK_NVCCFLAGS = -std=c++11
|
||||||
|
|
||||||
ifndef LLAMA_NO_CCACHE
|
ifdef LLAMA_NO_CCACHE
|
||||||
|
GGML_NO_CCACHE := 1
|
||||||
|
DEPRECATE_WARNING := 1
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifndef GGML_NO_CCACHE
|
||||||
CCACHE := $(shell which ccache)
|
CCACHE := $(shell which ccache)
|
||||||
ifdef CCACHE
|
ifdef CCACHE
|
||||||
export CCACHE_SLOPPINESS = time_macros
|
export CCACHE_SLOPPINESS = time_macros
|
||||||
$(info I ccache found, compilation results will be cached. Disable with LLAMA_NO_CCACHE.)
|
$(info I ccache found, compilation results will be cached. Disable with GGML_NO_CCACHE.)
|
||||||
CC := $(CCACHE) $(CC)
|
CC := $(CCACHE) $(CC)
|
||||||
CXX := $(CCACHE) $(CXX)
|
CXX := $(CCACHE) $(CXX)
|
||||||
else
|
else
|
||||||
$(info I ccache not found. Consider installing it for faster compilation.)
|
$(info I ccache not found. Consider installing it for faster compilation.)
|
||||||
endif # CCACHE
|
endif # CCACHE
|
||||||
endif # LLAMA_NO_CCACHE
|
endif # GGML_NO_CCACHE
|
||||||
|
|
||||||
# clock_gettime came in POSIX.1b (1993)
|
# clock_gettime came in POSIX.1b (1993)
|
||||||
# CLOCK_MONOTONIC came in POSIX.1-2001 / SUSv3 as optional
|
# CLOCK_MONOTONIC came in POSIX.1-2001 / SUSv3 as optional
|
||||||
@ -545,7 +554,7 @@ endif # GGML_BLIS
|
|||||||
|
|
||||||
ifndef GGML_NO_LLAMAFILE
|
ifndef GGML_NO_LLAMAFILE
|
||||||
MK_CPPFLAGS += -DGGML_USE_LLAMAFILE
|
MK_CPPFLAGS += -DGGML_USE_LLAMAFILE
|
||||||
OBJ_GGML += ggml/src/sgemm.o
|
OBJ_GGML += ggml/src/llamafile/sgemm.o
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifdef GGML_RPC
|
ifdef GGML_RPC
|
||||||
@ -826,7 +835,8 @@ OBJ_GGML += \
|
|||||||
ggml/src/ggml.o \
|
ggml/src/ggml.o \
|
||||||
ggml/src/ggml-alloc.o \
|
ggml/src/ggml-alloc.o \
|
||||||
ggml/src/ggml-backend.o \
|
ggml/src/ggml-backend.o \
|
||||||
ggml/src/ggml-quants.o
|
ggml/src/ggml-quants.o \
|
||||||
|
ggml/src/ggml-aarch64.o
|
||||||
|
|
||||||
OBJ_LLAMA = \
|
OBJ_LLAMA = \
|
||||||
src/llama.o \
|
src/llama.o \
|
||||||
@ -926,6 +936,7 @@ $(info - LLAMA_NO_LLAMAFILE)
|
|||||||
$(info - LLAMA_NO_ACCELERATE)
|
$(info - LLAMA_NO_ACCELERATE)
|
||||||
$(info - LLAMA_NO_OPENMP)
|
$(info - LLAMA_NO_OPENMP)
|
||||||
$(info - LLAMA_NO_METAL)
|
$(info - LLAMA_NO_METAL)
|
||||||
|
$(info - LLAMA_NO_CCACHE)
|
||||||
$(info )
|
$(info )
|
||||||
endif
|
endif
|
||||||
|
|
||||||
@ -959,15 +970,22 @@ ggml/src/ggml-quants.o: \
|
|||||||
ggml/src/ggml-common.h
|
ggml/src/ggml-common.h
|
||||||
$(CC) $(CFLAGS) -c $< -o $@
|
$(CC) $(CFLAGS) -c $< -o $@
|
||||||
|
|
||||||
|
ggml/src/ggml-aarch64.o: \
|
||||||
|
ggml/src/ggml-aarch64.c \
|
||||||
|
ggml/include/ggml.h \
|
||||||
|
ggml/src/ggml-aarch64.h \
|
||||||
|
ggml/src/ggml-common.h
|
||||||
|
$(CC) $(CFLAGS) -c $< -o $@
|
||||||
|
|
||||||
ggml/src/ggml-blas.o: \
|
ggml/src/ggml-blas.o: \
|
||||||
ggml/src/ggml-blas.cpp \
|
ggml/src/ggml-blas.cpp \
|
||||||
ggml/include/ggml-blas.h
|
ggml/include/ggml-blas.h
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
|
|
||||||
ifndef GGML_NO_LLAMAFILE
|
ifndef GGML_NO_LLAMAFILE
|
||||||
ggml/src/sgemm.o: \
|
ggml/src/llamafile/sgemm.o: \
|
||||||
ggml/src/sgemm.cpp \
|
ggml/src/llamafile/sgemm.cpp \
|
||||||
ggml/src/sgemm.h \
|
ggml/src/llamafile/sgemm.h \
|
||||||
ggml/include/ggml.h
|
ggml/include/ggml.h
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
endif # GGML_NO_LLAMAFILE
|
endif # GGML_NO_LLAMAFILE
|
||||||
@ -1092,7 +1110,7 @@ clean:
|
|||||||
rm -vrf ggml/src/ggml-cuda/template-instances/*.o
|
rm -vrf ggml/src/ggml-cuda/template-instances/*.o
|
||||||
rm -rvf $(BUILD_TARGETS)
|
rm -rvf $(BUILD_TARGETS)
|
||||||
rm -rvf $(TEST_TARGETS)
|
rm -rvf $(TEST_TARGETS)
|
||||||
rm -rvf $(LEGACY_TARGETS)
|
rm -rvf $(LEGACY_TARGETS_CLEAN)
|
||||||
find examples pocs -type f -name "*.o" -delete
|
find examples pocs -type f -name "*.o" -delete
|
||||||
|
|
||||||
#
|
#
|
||||||
@ -1488,3 +1506,61 @@ llama-q8dot: pocs/vdot/q8dot.cpp ggml/src/ggml.o \
|
|||||||
$(OBJ_GGML)
|
$(OBJ_GGML)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Deprecated binaries that we want to keep around long enough for people to migrate to the new filenames, then these can be removed.
|
||||||
|
#
|
||||||
|
# Mark legacy binary targets as .PHONY so that they are always checked.
|
||||||
|
.PHONY: main quantize perplexity embedding server finetune
|
||||||
|
|
||||||
|
# NOTE: We currently will always build the deprecation-warning `main` and `server` binaries to help users migrate.
|
||||||
|
# Eventually we will want to remove these target from building all the time.
|
||||||
|
main: examples/deprecation-warning/deprecation-warning.cpp
|
||||||
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
|
$(CXX) $(CXXFLAGS) $(filter-out $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
@echo "NOTICE: The 'main' binary is deprecated. Please use 'llama-cli' instead."
|
||||||
|
|
||||||
|
server: examples/deprecation-warning/deprecation-warning.cpp
|
||||||
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
@echo "NOTICE: The 'server' binary is deprecated. Please use 'llama-server' instead."
|
||||||
|
|
||||||
|
quantize: examples/deprecation-warning/deprecation-warning.cpp
|
||||||
|
ifneq (,$(wildcard quantize))
|
||||||
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
@echo "#########"
|
||||||
|
@echo "WARNING: The 'quantize' binary is deprecated. Please use 'llama-quantize' instead."
|
||||||
|
@echo " Remove the 'quantize' binary to remove this warning."
|
||||||
|
@echo "#########"
|
||||||
|
endif
|
||||||
|
|
||||||
|
perplexity: examples/deprecation-warning/deprecation-warning.cpp
|
||||||
|
ifneq (,$(wildcard perplexity))
|
||||||
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
@echo "#########"
|
||||||
|
@echo "WARNING: The 'perplexity' binary is deprecated. Please use 'llama-perplexity' instead."
|
||||||
|
@echo " Remove the 'perplexity' binary to remove this warning."
|
||||||
|
@echo "#########"
|
||||||
|
endif
|
||||||
|
|
||||||
|
embedding: examples/deprecation-warning/deprecation-warning.cpp
|
||||||
|
ifneq (,$(wildcard embedding))
|
||||||
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
@echo "#########"
|
||||||
|
@echo "WARNING: The 'embedding' binary is deprecated. Please use 'llama-embedding' instead."
|
||||||
|
@echo " Remove the 'embedding' binary to remove this warning."
|
||||||
|
@echo "#########"
|
||||||
|
endif
|
||||||
|
|
||||||
|
finetune: examples/deprecation-warning/deprecation-warning.cpp
|
||||||
|
ifneq (,$(wildcard finetune))
|
||||||
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
@echo "#########"
|
||||||
|
@echo "WARNING: The 'finetune' binary is deprecated. Please use 'llama-finetune' instead."
|
||||||
|
@echo " Remove the 'finetune' binary to remove this warning."
|
||||||
|
@echo "#########"
|
||||||
|
endif
|
||||||
|
@ -10,6 +10,7 @@ var sources = [
|
|||||||
"ggml/src/ggml-alloc.c",
|
"ggml/src/ggml-alloc.c",
|
||||||
"ggml/src/ggml-backend.c",
|
"ggml/src/ggml-backend.c",
|
||||||
"ggml/src/ggml-quants.c",
|
"ggml/src/ggml-quants.c",
|
||||||
|
"ggml/src/ggml-aarch64.c",
|
||||||
]
|
]
|
||||||
|
|
||||||
var resources: [Resource] = []
|
var resources: [Resource] = []
|
||||||
|
@ -96,8 +96,9 @@ Typically finetunes of the base models below are supported as well.
|
|||||||
- [x] [GritLM-7B](https://huggingface.co/GritLM/GritLM-7B) + [GritLM-8x7B](https://huggingface.co/GritLM/GritLM-8x7B)
|
- [x] [GritLM-7B](https://huggingface.co/GritLM/GritLM-7B) + [GritLM-8x7B](https://huggingface.co/GritLM/GritLM-8x7B)
|
||||||
- [x] [OLMo](https://allenai.org/olmo)
|
- [x] [OLMo](https://allenai.org/olmo)
|
||||||
- [x] [GPT-NeoX](https://github.com/EleutherAI/gpt-neox) + [Pythia](https://github.com/EleutherAI/pythia)
|
- [x] [GPT-NeoX](https://github.com/EleutherAI/gpt-neox) + [Pythia](https://github.com/EleutherAI/pythia)
|
||||||
|
- [x] [ChatGLM3-6b](https://huggingface.co/THUDM/chatglm3-6b) + [ChatGLM4-9b](https://huggingface.co/THUDM/glm-4-9b)
|
||||||
|
|
||||||
(instructions for supporting more models: [HOWTO-add-model.md](./docs/HOWTO-add-model.md))
|
(instructions for supporting more models: [HOWTO-add-model.md](./docs/development/HOWTO-add-model.md))
|
||||||
|
|
||||||
**Multimodal models:**
|
**Multimodal models:**
|
||||||
|
|
||||||
@ -452,7 +453,7 @@ To learn more how to measure perplexity using llama.cpp, [read this documentatio
|
|||||||
- [How to build](./docs/build.md)
|
- [How to build](./docs/build.md)
|
||||||
- [Running on Docker](./docs/docker.md)
|
- [Running on Docker](./docs/docker.md)
|
||||||
- [Build on Android](./docs/android.md)
|
- [Build on Android](./docs/android.md)
|
||||||
- [Performance troubleshooting](./docs/token_generation_performance_tips.md)
|
- [Performance troubleshooting](./docs/development/token_generation_performance_tips.md)
|
||||||
- [GGML tips & tricks](https://github.com/ggerganov/llama.cpp/wiki/GGML-Tips-&-Tricks)
|
- [GGML tips & tricks](https://github.com/ggerganov/llama.cpp/wiki/GGML-Tips-&-Tricks)
|
||||||
|
|
||||||
**Seminal papers and background on the models**
|
**Seminal papers and background on the models**
|
||||||
|
@ -1,3 +1,7 @@
|
|||||||
|
#if defined(_MSC_VER)
|
||||||
|
#define _SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING
|
||||||
|
#endif
|
||||||
|
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
// Change JSON_ASSERT from assert() to GGML_ASSERT:
|
// Change JSON_ASSERT from assert() to GGML_ASSERT:
|
||||||
#define JSON_ASSERT GGML_ASSERT
|
#define JSON_ASSERT GGML_ASSERT
|
||||||
|
@ -630,7 +630,7 @@ inline std::string LOG_TOKENS_TOSTR_PRETTY(const C & ctx, const T & tokens)
|
|||||||
buf << "[ ";
|
buf << "[ ";
|
||||||
|
|
||||||
bool first = true;
|
bool first = true;
|
||||||
for (const auto &token : tokens)
|
for (const auto & token : tokens)
|
||||||
{
|
{
|
||||||
if (!first) {
|
if (!first) {
|
||||||
buf << ", ";
|
buf << ", ";
|
||||||
|
@ -282,8 +282,6 @@ static llama_token llama_sampling_sample_impl(
|
|||||||
GGML_ASSERT(!original_logits.empty());
|
GGML_ASSERT(!original_logits.empty());
|
||||||
}
|
}
|
||||||
llama_token id = 0;
|
llama_token id = 0;
|
||||||
// Get a pointer to the logits
|
|
||||||
float * logits = llama_get_logits_ith(ctx_main, idx);
|
|
||||||
|
|
||||||
if (temp < 0.0) {
|
if (temp < 0.0) {
|
||||||
// greedy sampling, with probs
|
// greedy sampling, with probs
|
||||||
@ -324,6 +322,9 @@ static llama_token llama_sampling_sample_impl(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (ctx_sampling->grammar != NULL && !is_resampling) {
|
if (ctx_sampling->grammar != NULL && !is_resampling) {
|
||||||
|
// Get a pointer to the logits
|
||||||
|
float * logits = llama_get_logits_ith(ctx_main, idx);
|
||||||
|
|
||||||
// Create an array with a single token data element for the sampled id
|
// Create an array with a single token data element for the sampled id
|
||||||
llama_token_data single_token_data = {id, logits[id], 0.0f};
|
llama_token_data single_token_data = {id, logits[id], 0.0f};
|
||||||
llama_token_data_array single_token_data_array = { &single_token_data, 1, false };
|
llama_token_data_array single_token_data_array = { &single_token_data, 1, false };
|
||||||
@ -377,7 +378,7 @@ static llama_token_data_array llama_sampling_prepare_impl(
|
|||||||
if (ctx_sampling->grammar != NULL && !apply_grammar) {
|
if (ctx_sampling->grammar != NULL && !apply_grammar) {
|
||||||
GGML_ASSERT(original_logits != NULL);
|
GGML_ASSERT(original_logits != NULL);
|
||||||
// Only make a copy of the original logits if we are not applying grammar checks, not sure if I actually have to do this.
|
// Only make a copy of the original logits if we are not applying grammar checks, not sure if I actually have to do this.
|
||||||
*original_logits = {logits, logits + llama_n_vocab(llama_get_model(ctx_main))};
|
*original_logits = {logits, logits + n_vocab};
|
||||||
}
|
}
|
||||||
|
|
||||||
// apply params.logit_bias map
|
// apply params.logit_bias map
|
||||||
@ -390,10 +391,10 @@ static llama_token_data_array llama_sampling_prepare_impl(
|
|||||||
llama_sample_apply_guidance(ctx_main, logits, logits_guidance, params.cfg_scale);
|
llama_sample_apply_guidance(ctx_main, logits, logits_guidance, params.cfg_scale);
|
||||||
}
|
}
|
||||||
|
|
||||||
cur.clear();
|
cur.resize(n_vocab);
|
||||||
|
|
||||||
for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
|
for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
|
||||||
cur.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f});
|
cur[token_id] = llama_token_data{token_id, logits[token_id], 0.0f};
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_token_data_array cur_p = { cur.data(), cur.size(), false };
|
llama_token_data_array cur_p = { cur.data(), cur.size(), false };
|
||||||
|
@ -265,7 +265,7 @@ class Model:
|
|||||||
break
|
break
|
||||||
|
|
||||||
for new_name, data in ((n, d.squeeze().numpy()) for n, d in self.modify_tensors(data_torch, name, bid)):
|
for new_name, data in ((n, d.squeeze().numpy()) for n, d in self.modify_tensors(data_torch, name, bid)):
|
||||||
data: np.ndarray = data # type hint
|
data: np.ndarray # type hint
|
||||||
n_dims = len(data.shape)
|
n_dims = len(data.shape)
|
||||||
data_dtype = data.dtype
|
data_dtype = data.dtype
|
||||||
data_qtype: gguf.GGMLQuantizationType | None = None
|
data_qtype: gguf.GGMLQuantizationType | None = None
|
||||||
@ -602,10 +602,6 @@ class Model:
|
|||||||
|
|
||||||
tokenizer_path = self.dir_model / 'tokenizer.model'
|
tokenizer_path = self.dir_model / 'tokenizer.model'
|
||||||
|
|
||||||
tokens: list[bytes] = []
|
|
||||||
scores: list[float] = []
|
|
||||||
toktypes: list[int] = []
|
|
||||||
|
|
||||||
if not tokenizer_path.is_file():
|
if not tokenizer_path.is_file():
|
||||||
raise FileNotFoundError(f"File not found: {tokenizer_path}")
|
raise FileNotFoundError(f"File not found: {tokenizer_path}")
|
||||||
|
|
||||||
@ -1363,7 +1359,7 @@ class LlamaModel(Model):
|
|||||||
|
|
||||||
def set_vocab(self):
|
def set_vocab(self):
|
||||||
try:
|
try:
|
||||||
self. _set_vocab_sentencepiece()
|
self._set_vocab_sentencepiece()
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
try:
|
try:
|
||||||
self._set_vocab_llama_hf()
|
self._set_vocab_llama_hf()
|
||||||
@ -2127,7 +2123,7 @@ class InternLM2Model(Model):
|
|||||||
logger.error(f'Error: Missing {tokenizer_path}')
|
logger.error(f'Error: Missing {tokenizer_path}')
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
sentencepiece_model = model.ModelProto()
|
sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
|
||||||
sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
|
sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
|
||||||
add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
|
add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix
|
||||||
|
|
||||||
@ -2155,6 +2151,9 @@ class InternLM2Model(Model):
|
|||||||
toktype = SentencePieceTokenTypes.UNUSED
|
toktype = SentencePieceTokenTypes.UNUSED
|
||||||
elif tokenizer.IsByte(token_id):
|
elif tokenizer.IsByte(token_id):
|
||||||
toktype = SentencePieceTokenTypes.BYTE
|
toktype = SentencePieceTokenTypes.BYTE
|
||||||
|
# take care of ununsed raw token
|
||||||
|
if piece.startswith('[UNUSED'):
|
||||||
|
toktype = SentencePieceTokenTypes.UNKNOWN
|
||||||
|
|
||||||
tokens.append(text)
|
tokens.append(text)
|
||||||
scores.append(score)
|
scores.append(score)
|
||||||
@ -2170,6 +2169,47 @@ class InternLM2Model(Model):
|
|||||||
scores.append(-1000.0)
|
scores.append(-1000.0)
|
||||||
toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
|
toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
|
||||||
|
|
||||||
|
chat_eos_token = '<|im_end|>'
|
||||||
|
chat_eos_token_id = None
|
||||||
|
|
||||||
|
tokenizer_config_file = self.dir_model / 'tokenizer_config.json'
|
||||||
|
if tokenizer_config_file.is_file():
|
||||||
|
with open(tokenizer_config_file, "r", encoding="utf-8") as f:
|
||||||
|
tokenizer_config_json = json.load(f)
|
||||||
|
added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {})
|
||||||
|
for token_id, foken_data in added_tokens_decoder.items():
|
||||||
|
token_id = int(token_id)
|
||||||
|
token = foken_data["content"]
|
||||||
|
if token == chat_eos_token:
|
||||||
|
chat_eos_token_id = token_id
|
||||||
|
token = token.encode("utf-8")
|
||||||
|
if toktypes[token_id] != SentencePieceTokenTypes.UNKNOWN:
|
||||||
|
assert(tokens[token_id] == token)
|
||||||
|
tokens[token_id] = token
|
||||||
|
scores[token_id] = -1000.0
|
||||||
|
toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
|
||||||
|
if foken_data.get("special"):
|
||||||
|
toktypes[token_id] = SentencePieceTokenTypes.CONTROL
|
||||||
|
|
||||||
|
tokenizer_file = self.dir_model / 'tokenizer.json'
|
||||||
|
if tokenizer_file.is_file():
|
||||||
|
with open(tokenizer_file, "r", encoding="utf-8") as f:
|
||||||
|
tokenizer_json = json.load(f)
|
||||||
|
added_tokens = tokenizer_json.get("added_tokens", [])
|
||||||
|
for foken_data in added_tokens:
|
||||||
|
token_id = int(foken_data["id"])
|
||||||
|
token = foken_data["content"]
|
||||||
|
if token == chat_eos_token:
|
||||||
|
chat_eos_token_id = token_id
|
||||||
|
token = token.encode("utf-8")
|
||||||
|
if toktypes[token_id] != SentencePieceTokenTypes.UNKNOWN:
|
||||||
|
assert(tokens[token_id] == token)
|
||||||
|
tokens[token_id] = token
|
||||||
|
scores[token_id] = -1000.0
|
||||||
|
toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
|
||||||
|
if foken_data.get("special"):
|
||||||
|
toktypes[token_id] = SentencePieceTokenTypes.CONTROL
|
||||||
|
|
||||||
self.gguf_writer.add_tokenizer_model("llama")
|
self.gguf_writer.add_tokenizer_model("llama")
|
||||||
self.gguf_writer.add_tokenizer_pre("default")
|
self.gguf_writer.add_tokenizer_pre("default")
|
||||||
self.gguf_writer.add_token_list(tokens)
|
self.gguf_writer.add_token_list(tokens)
|
||||||
@ -2179,28 +2219,16 @@ class InternLM2Model(Model):
|
|||||||
|
|
||||||
special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
|
special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens))
|
||||||
old_eos = special_vocab.special_token_ids["eos"]
|
old_eos = special_vocab.special_token_ids["eos"]
|
||||||
if "chat" in os.path.basename(self.dir_model.absolute()):
|
if chat_eos_token_id is not None:
|
||||||
# For the chat model, we replace the eos with '<|im_end|>'.
|
# For the chat model, we replace the eos with '<|im_end|>'.
|
||||||
# TODO: this is a hack, should be fixed
|
# TODO: this is a hack, should be fixed
|
||||||
# https://github.com/ggerganov/llama.cpp/pull/6745#issuecomment-2067687048
|
# https://github.com/ggerganov/llama.cpp/pull/6745#issuecomment-2067687048
|
||||||
special_vocab.special_token_ids["eos"] = self._try_get_sft_eos(tokenizer)
|
special_vocab.special_token_ids["eos"] = chat_eos_token_id
|
||||||
logger.warning(f"Replace eos:{old_eos} with a special token:{special_vocab.special_token_ids['eos']} \
|
logger.warning(f"Replace eos:{old_eos} with a special token:{chat_eos_token_id}"
|
||||||
in chat mode so that the conversation can end normally.")
|
" in chat mode so that the conversation can end normally.")
|
||||||
|
|
||||||
special_vocab.add_to_gguf(self.gguf_writer)
|
special_vocab.add_to_gguf(self.gguf_writer)
|
||||||
|
|
||||||
def _try_get_sft_eos(self, tokenizer):
|
|
||||||
unused_145_list = tokenizer.Encode('[UNUSED_TOKEN_145]')
|
|
||||||
im_end_list = tokenizer.Encode('<|im_end|>')
|
|
||||||
eos_token = None
|
|
||||||
assert (len(unused_145_list) == 1) ^ (len(im_end_list) == 1)
|
|
||||||
if len(unused_145_list) == 1:
|
|
||||||
eos_token = unused_145_list[0]
|
|
||||||
if len(im_end_list) == 1:
|
|
||||||
eos_token = im_end_list[0]
|
|
||||||
assert eos_token
|
|
||||||
return eos_token
|
|
||||||
|
|
||||||
def _hf_permute_qk(self, weights, n_head: int, n_head_kv: int):
|
def _hf_permute_qk(self, weights, n_head: int, n_head_kv: int):
|
||||||
if n_head_kv is not None and n_head != n_head_kv:
|
if n_head_kv is not None and n_head != n_head_kv:
|
||||||
n_head = n_head_kv
|
n_head = n_head_kv
|
||||||
@ -2219,6 +2247,10 @@ in chat mode so that the conversation can end normally.")
|
|||||||
self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
|
self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"])
|
||||||
self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"])
|
self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"])
|
||||||
self.gguf_writer.add_file_type(self.ftype)
|
self.gguf_writer.add_file_type(self.ftype)
|
||||||
|
if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]:
|
||||||
|
if self.hparams["rope_scaling"].get("type") == "linear":
|
||||||
|
self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR)
|
||||||
|
self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"])
|
||||||
|
|
||||||
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
|
||||||
num_heads = self.hparams["num_attention_heads"]
|
num_heads = self.hparams["num_attention_heads"]
|
||||||
@ -2979,7 +3011,7 @@ class T5Model(Model):
|
|||||||
if not tokenizer_path.is_file():
|
if not tokenizer_path.is_file():
|
||||||
raise FileNotFoundError(f"File not found: {tokenizer_path}")
|
raise FileNotFoundError(f"File not found: {tokenizer_path}")
|
||||||
|
|
||||||
sentencepiece_model = model.ModelProto()
|
sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue]
|
||||||
sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
|
sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read())
|
||||||
|
|
||||||
# some models like Pile-T5 family use BPE tokenizer instead of Unigram
|
# some models like Pile-T5 family use BPE tokenizer instead of Unigram
|
||||||
@ -3159,7 +3191,7 @@ class JaisModel(Model):
|
|||||||
# but Jais's PyTorch model simply precalculates the slope values and places them
|
# but Jais's PyTorch model simply precalculates the slope values and places them
|
||||||
# in relative_pes.slopes
|
# in relative_pes.slopes
|
||||||
n_head_closest_log2 = 2 ** math.floor(math.log2(self.hparams["n_head"]))
|
n_head_closest_log2 = 2 ** math.floor(math.log2(self.hparams["n_head"]))
|
||||||
first_val = float(data_torch._data[0])
|
first_val = float(data_torch[0].item())
|
||||||
self.max_alibi_bias = -round(math.log2(first_val) * n_head_closest_log2)
|
self.max_alibi_bias = -round(math.log2(first_val) * n_head_closest_log2)
|
||||||
|
|
||||||
return tensors
|
return tensors
|
||||||
@ -3193,7 +3225,7 @@ class ChatGLMModel(Model):
|
|||||||
def set_vocab_chatglm3(self):
|
def set_vocab_chatglm3(self):
|
||||||
dir_model = self.dir_model
|
dir_model = self.dir_model
|
||||||
hparams = self.hparams
|
hparams = self.hparams
|
||||||
tokens: list[bytearray] = []
|
tokens: list[bytes] = []
|
||||||
toktypes: list[int] = []
|
toktypes: list[int] = []
|
||||||
scores: list[float] = []
|
scores: list[float] = []
|
||||||
|
|
||||||
@ -3342,7 +3374,7 @@ class ChatGLMModel(Model):
|
|||||||
special_vocab.add_to_gguf(self.gguf_writer)
|
special_vocab.add_to_gguf(self.gguf_writer)
|
||||||
|
|
||||||
def set_gguf_parameters(self):
|
def set_gguf_parameters(self):
|
||||||
self.gguf_writer.add_name(self.hparams.get("_name_or_path").split("/")[1]) # THUDM/glm4-9b-chat or THUDM/chatglm3-6b
|
self.gguf_writer.add_name(self.hparams["_name_or_path"].split("/")[1]) # THUDM/glm4-9b-chat or THUDM/chatglm3-6b
|
||||||
n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
|
n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed"))
|
||||||
n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
|
n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads"))
|
||||||
n_head_kv = self.hparams.get("multi_query_group_num", n_head)
|
n_head_kv = self.hparams.get("multi_query_group_num", n_head)
|
||||||
|
@ -354,7 +354,8 @@ class GGMLToGGUF:
|
|||||||
|
|
||||||
|
|
||||||
def handle_metadata(cfg, hp):
|
def handle_metadata(cfg, hp):
|
||||||
import convert
|
import examples.convert_legacy_llama as convert
|
||||||
|
|
||||||
assert cfg.model_metadata_dir.is_dir(), 'Metadata dir is not a directory'
|
assert cfg.model_metadata_dir.is_dir(), 'Metadata dir is not a directory'
|
||||||
hf_config_path = cfg.model_metadata_dir / "config.json"
|
hf_config_path = cfg.model_metadata_dir / "config.json"
|
||||||
orig_config_path = cfg.model_metadata_dir / "params.json"
|
orig_config_path = cfg.model_metadata_dir / "params.json"
|
||||||
|
@ -28,6 +28,7 @@ In order to build llama.cpp you have four different options.
|
|||||||
```
|
```
|
||||||
|
|
||||||
- Notes:
|
- Notes:
|
||||||
|
- For `Q4_0_4_4` quantization type build, add the `GGML_NO_LLAMAFILE=1` flag. For example, use `make GGML_NO_LLAMAFILE=1`.
|
||||||
- For faster compilation, add the `-j` argument to run multiple jobs in parallel. For example, `make -j 8` will run 8 jobs in parallel.
|
- For faster compilation, add the `-j` argument to run multiple jobs in parallel. For example, `make -j 8` will run 8 jobs in parallel.
|
||||||
- For faster repeated compilation, install [ccache](https://ccache.dev/).
|
- For faster repeated compilation, install [ccache](https://ccache.dev/).
|
||||||
- For debug builds, run `make LLAMA_DEBUG=1`
|
- For debug builds, run `make LLAMA_DEBUG=1`
|
||||||
@ -41,6 +42,7 @@ In order to build llama.cpp you have four different options.
|
|||||||
|
|
||||||
**Notes**:
|
**Notes**:
|
||||||
|
|
||||||
|
- For `Q4_0_4_4` quantization type build, add the `-DGGML_LLAMAFILE=OFF` cmake option. For example, use `cmake -B build -DGGML_LLAMAFILE=OFF`.
|
||||||
- For faster compilation, add the `-j` argument to run multiple jobs in parallel. For example, `cmake --build build --config Release -j 8` will run 8 jobs in parallel.
|
- For faster compilation, add the `-j` argument to run multiple jobs in parallel. For example, `cmake --build build --config Release -j 8` will run 8 jobs in parallel.
|
||||||
- For faster repeated compilation, install [ccache](https://ccache.dev/).
|
- For faster repeated compilation, install [ccache](https://ccache.dev/).
|
||||||
- For debug builds, there are two cases:
|
- For debug builds, there are two cases:
|
||||||
|
@ -353,7 +353,7 @@ class Metadata:
|
|||||||
version: Optional[str] = None
|
version: Optional[str] = None
|
||||||
url: Optional[str] = None
|
url: Optional[str] = None
|
||||||
description: Optional[str] = None
|
description: Optional[str] = None
|
||||||
licence: Optional[str] = None
|
license: Optional[str] = None
|
||||||
source_url: Optional[str] = None
|
source_url: Optional[str] = None
|
||||||
source_hf_repo: Optional[str] = None
|
source_hf_repo: Optional[str] = None
|
||||||
|
|
||||||
@ -492,12 +492,13 @@ class LazyTensor:
|
|||||||
|
|
||||||
LazyModel: TypeAlias = 'dict[str, LazyTensor]'
|
LazyModel: TypeAlias = 'dict[str, LazyTensor]'
|
||||||
|
|
||||||
|
ModelFormat: TypeAlias = Literal['ggml', 'torch', 'safetensors', 'none']
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class ModelPlus:
|
class ModelPlus:
|
||||||
model: LazyModel
|
model: LazyModel
|
||||||
paths: list[Path] # Where this was read from.
|
paths: list[Path] # Where this was read from.
|
||||||
format: Literal['ggml', 'torch', 'safetensors', 'none']
|
format: ModelFormat
|
||||||
vocab: BaseVocab | None # For GGML models (which have vocab built in), the vocab.
|
vocab: BaseVocab | None # For GGML models (which have vocab built in), the vocab.
|
||||||
|
|
||||||
|
|
||||||
@ -536,7 +537,7 @@ def merge_sharded(models: list[LazyModel]) -> LazyModel:
|
|||||||
|
|
||||||
|
|
||||||
def merge_multifile_models(models_plus: list[ModelPlus]) -> ModelPlus:
|
def merge_multifile_models(models_plus: list[ModelPlus]) -> ModelPlus:
|
||||||
formats = set(mp.format for mp in models_plus)
|
formats: set[ModelFormat] = set(mp.format for mp in models_plus)
|
||||||
assert len(formats) == 1, "different formats?"
|
assert len(formats) == 1, "different formats?"
|
||||||
format = formats.pop()
|
format = formats.pop()
|
||||||
paths = [path for mp in models_plus for path in mp.paths]
|
paths = [path for mp in models_plus for path in mp.paths]
|
||||||
@ -555,7 +556,7 @@ def merge_multifile_models(models_plus: list[ModelPlus]) -> ModelPlus:
|
|||||||
else:
|
else:
|
||||||
model = merge_sharded([mp.model for mp in models_plus])
|
model = merge_sharded([mp.model for mp in models_plus])
|
||||||
|
|
||||||
return ModelPlus(model, paths, format, vocab) # pytype: disable=wrong-arg-types
|
return ModelPlus(model, paths, format, vocab)
|
||||||
|
|
||||||
|
|
||||||
def permute_lazy(lazy_tensor: LazyTensor, n_head: int, n_head_kv: int) -> LazyTensor:
|
def permute_lazy(lazy_tensor: LazyTensor, n_head: int, n_head_kv: int) -> LazyTensor:
|
||||||
@ -805,7 +806,7 @@ class OutputFile:
|
|||||||
def __init__(self, fname_out: Path, endianess:gguf.GGUFEndian = gguf.GGUFEndian.LITTLE):
|
def __init__(self, fname_out: Path, endianess:gguf.GGUFEndian = gguf.GGUFEndian.LITTLE):
|
||||||
self.gguf = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH], endianess=endianess)
|
self.gguf = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH], endianess=endianess)
|
||||||
|
|
||||||
def add_meta_model(self, params: Params, metadata: Metadata) -> None:
|
def add_meta_model(self, params: Params, metadata: Metadata | None) -> None:
|
||||||
# Metadata About The Model And Its Provenence
|
# Metadata About The Model And Its Provenence
|
||||||
name = "LLaMA"
|
name = "LLaMA"
|
||||||
if metadata is not None and metadata.name is not None:
|
if metadata is not None and metadata.name is not None:
|
||||||
@ -827,8 +828,8 @@ class OutputFile:
|
|||||||
self.gguf.add_url(metadata.url)
|
self.gguf.add_url(metadata.url)
|
||||||
if metadata.description is not None:
|
if metadata.description is not None:
|
||||||
self.gguf.add_description(metadata.description)
|
self.gguf.add_description(metadata.description)
|
||||||
if metadata.licence is not None:
|
if metadata.license is not None:
|
||||||
self.gguf.add_licence(metadata.licence)
|
self.gguf.add_licence(metadata.license)
|
||||||
if metadata.source_url is not None:
|
if metadata.source_url is not None:
|
||||||
self.gguf.add_source_url(metadata.source_url)
|
self.gguf.add_source_url(metadata.source_url)
|
||||||
if metadata.source_hf_repo is not None:
|
if metadata.source_hf_repo is not None:
|
||||||
@ -943,7 +944,7 @@ class OutputFile:
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def write_vocab_only(
|
def write_vocab_only(
|
||||||
fname_out: Path, params: Params, vocab: Vocab, svocab: gguf.SpecialVocab,
|
fname_out: Path, params: Params, vocab: Vocab, svocab: gguf.SpecialVocab,
|
||||||
endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE, pad_vocab: bool = False, metadata: Metadata = None,
|
endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE, pad_vocab: bool = False, metadata: Metadata | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
check_vocab_size(params, vocab, pad_vocab=pad_vocab)
|
check_vocab_size(params, vocab, pad_vocab=pad_vocab)
|
||||||
|
|
||||||
@ -977,7 +978,7 @@ class OutputFile:
|
|||||||
fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyModel, vocab: BaseVocab, svocab: gguf.SpecialVocab,
|
fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyModel, vocab: BaseVocab, svocab: gguf.SpecialVocab,
|
||||||
concurrency: int = DEFAULT_CONCURRENCY, endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE,
|
concurrency: int = DEFAULT_CONCURRENCY, endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE,
|
||||||
pad_vocab: bool = False,
|
pad_vocab: bool = False,
|
||||||
metadata: Metadata = None,
|
metadata: Metadata | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
check_vocab_size(params, vocab, pad_vocab=pad_vocab)
|
check_vocab_size(params, vocab, pad_vocab=pad_vocab)
|
||||||
|
|
||||||
@ -1396,6 +1397,8 @@ def main(args_in: list[str] | None = None) -> None:
|
|||||||
if model_plus.vocab is not None and args.vocab_dir is None and not args.no_vocab:
|
if model_plus.vocab is not None and args.vocab_dir is None and not args.no_vocab:
|
||||||
vocab = model_plus.vocab
|
vocab = model_plus.vocab
|
||||||
|
|
||||||
|
assert params is not None
|
||||||
|
|
||||||
logger.info(f"Vocab info: {vocab}")
|
logger.info(f"Vocab info: {vocab}")
|
||||||
logger.info(f"Special vocab info: {special_vocab}")
|
logger.info(f"Special vocab info: {special_vocab}")
|
||||||
model = model_plus.model
|
model = model_plus.model
|
||||||
|
51
examples/deprecation-warning/README.md
Normal file
51
examples/deprecation-warning/README.md
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
# Migration notice for binary filenames
|
||||||
|
|
||||||
|
> [!IMPORTANT]
|
||||||
|
[2024 Jun 12] Binaries have been renamed w/ a `llama-` prefix. `main` is now `llama-cli`, `server` is `llama-server`, etc (https://github.com/ggerganov/llama.cpp/pull/7809)
|
||||||
|
|
||||||
|
This migration was important, but it is a breaking change that may not always be immediately obvious to users.
|
||||||
|
|
||||||
|
Please update all scripts and workflows to use the new binary names.
|
||||||
|
|
||||||
|
| Old Filename | New Filename |
|
||||||
|
| ---- | ---- |
|
||||||
|
| main | llama-cli |
|
||||||
|
| server | llama-server |
|
||||||
|
| llama-bench | llama-bench |
|
||||||
|
| embedding | llama-embedding |
|
||||||
|
| finetune | llama-finetune |
|
||||||
|
| quantize | llama-quantize |
|
||||||
|
| tokenize | llama-tokenize |
|
||||||
|
| export-lora | llama-export-lora |
|
||||||
|
| libllava.a | libllava.a |
|
||||||
|
| baby-llama | llama-baby-llama |
|
||||||
|
| batched | llama-batched |
|
||||||
|
| batched-bench | llama-batched-bench |
|
||||||
|
| benchmark-matmult | llama-benchmark-matmult |
|
||||||
|
| convert-llama2c-to-ggml | llama-convert-llama2c-to-ggml |
|
||||||
|
| eval-callback | llama-eval-callback |
|
||||||
|
| gbnf-validator | llama-gbnf-validator |
|
||||||
|
| gguf | llama-gguf |
|
||||||
|
| gguf-split | llama-gguf-split |
|
||||||
|
| gritlm | llama-gritlm |
|
||||||
|
| imatrix | llama-imatrix |
|
||||||
|
| infill | llama-infill |
|
||||||
|
| llava-cli | llama-llava-cli |
|
||||||
|
| lookahead | llama-lookahead |
|
||||||
|
| lookup | llama-lookup |
|
||||||
|
| lookup-create | llama-lookup-create |
|
||||||
|
| lookup-merge | llama-lookup-merge |
|
||||||
|
| lookup-stats | llama-lookup-stats |
|
||||||
|
| parallel | llama-parallel |
|
||||||
|
| passkey | llama-passkey |
|
||||||
|
| perplexity | llama-perplexity |
|
||||||
|
| q8dot | llama-q8dot |
|
||||||
|
| quantize-stats | llama-quantize-stats |
|
||||||
|
| retrieval | llama-retrieval |
|
||||||
|
| save-load-state | llama-save-load-state |
|
||||||
|
| simple | llama-simple |
|
||||||
|
| speculative | llama-speculative |
|
||||||
|
| train-text-from-scratch | llama-train-text-from-scratch |
|
||||||
|
| vdot | llama-vdot |
|
||||||
|
| tests/test-c.o | tests/test-c.o |
|
||||||
|
|
35
examples/deprecation-warning/deprecation-warning.cpp
Normal file
35
examples/deprecation-warning/deprecation-warning.cpp
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
// Warns users that this filename was deprecated, and provides a link for more information.
|
||||||
|
|
||||||
|
#include <cstdio>
|
||||||
|
#include <string>
|
||||||
|
#include <unordered_map>
|
||||||
|
|
||||||
|
// Main
|
||||||
|
int main(int argc, char** argv) {
|
||||||
|
std::string filename = "main";
|
||||||
|
if (argc >= 1) {
|
||||||
|
filename = argv[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get only the program name from the full path
|
||||||
|
auto pos = filename.find_last_of('/');
|
||||||
|
if (pos != std::string::npos) {
|
||||||
|
filename = filename.substr(pos+1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append "llama-" to the beginning of filename to get the replacemnt filename
|
||||||
|
auto replacement_filename = "llama-" + filename;
|
||||||
|
|
||||||
|
// The exception is if the filename is "main", then our replacement filename is "llama-cli"
|
||||||
|
if (filename == "main") {
|
||||||
|
replacement_filename = "llama-cli";
|
||||||
|
}
|
||||||
|
|
||||||
|
fprintf(stdout, "\n");
|
||||||
|
fprintf(stdout, "WARNING: The binary '%s' is deprecated.\n", filename.c_str());
|
||||||
|
fprintf(stdout, " Please use '%s' instead.\n", replacement_filename.c_str());
|
||||||
|
fprintf(stdout, " See https://github.com/ggerganov/llama.cpp/tree/master/examples/deprecation-warning/README.md for more information.\n");
|
||||||
|
fprintf(stdout, "\n");
|
||||||
|
|
||||||
|
return EXIT_FAILURE;
|
||||||
|
}
|
@ -74,7 +74,7 @@ class Tensor:
|
|||||||
if len(self.ne) == 0:
|
if len(self.ne) == 0:
|
||||||
self.nbytes = 0
|
self.nbytes = 0
|
||||||
else:
|
else:
|
||||||
self.nbytes = int(np.product(self.ne)) * 4
|
self.nbytes = int(np.prod(self.ne)) * 4
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Unhandled data type '{self.dtype}'")
|
raise ValueError(f"Unhandled data type '{self.dtype}'")
|
||||||
|
|
||||||
|
@ -204,21 +204,17 @@ int main(int argc, char ** argv) {
|
|||||||
GGML_ASSERT(llama_add_eos_token(model) != 1);
|
GGML_ASSERT(llama_add_eos_token(model) != 1);
|
||||||
LOG("add_bos: %d\n", add_bos);
|
LOG("add_bos: %d\n", add_bos);
|
||||||
|
|
||||||
bool suff_rm_leading_spc = params.escape;
|
|
||||||
if (suff_rm_leading_spc && params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1) {
|
|
||||||
params.input_suffix.erase(0, 1);
|
|
||||||
suff_rm_leading_spc = false;
|
|
||||||
}
|
|
||||||
std::vector<llama_token> embd_inp;
|
std::vector<llama_token> embd_inp;
|
||||||
std::vector<llama_token> embd_end;
|
std::vector<llama_token> embd_end;
|
||||||
std::vector<llama_token> inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false);
|
std::vector<llama_token> inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false);
|
||||||
std::vector<llama_token> inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false);
|
std::vector<llama_token> inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false);
|
||||||
const int space_token = 29871;
|
|
||||||
if (suff_rm_leading_spc && inp_sfx[0] == space_token) {
|
GGML_ASSERT(llama_token_prefix(model) >= 0);
|
||||||
inp_sfx.erase(inp_sfx.begin());
|
GGML_ASSERT(llama_token_suffix(model) >= 0);
|
||||||
}
|
|
||||||
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
|
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
|
||||||
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
|
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
|
||||||
|
|
||||||
embd_inp = params.spm_infill ? inp_sfx : inp_pfx;
|
embd_inp = params.spm_infill ? inp_sfx : inp_pfx;
|
||||||
embd_end = params.spm_infill ? inp_pfx : inp_sfx;
|
embd_end = params.spm_infill ? inp_pfx : inp_sfx;
|
||||||
if (add_bos) {
|
if (add_bos) {
|
||||||
@ -516,19 +512,14 @@ int main(int argc, char ** argv) {
|
|||||||
string_process_escapes(params.input_prefix);
|
string_process_escapes(params.input_prefix);
|
||||||
string_process_escapes(params.input_suffix);
|
string_process_escapes(params.input_suffix);
|
||||||
}
|
}
|
||||||
suff_rm_leading_spc = params.escape;
|
|
||||||
if (suff_rm_leading_spc && params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1) {
|
|
||||||
params.input_suffix.erase(0, 1);
|
|
||||||
suff_rm_leading_spc = false;
|
|
||||||
}
|
|
||||||
// tokenize new prefix and suffix
|
// tokenize new prefix and suffix
|
||||||
std::vector<llama_token> inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false);
|
std::vector<llama_token> inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false);
|
||||||
std::vector<llama_token> inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false);
|
std::vector<llama_token> inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false);
|
||||||
if (suff_rm_leading_spc && inp_sfx[0] == space_token) {
|
|
||||||
inp_sfx.erase(inp_sfx.begin());
|
|
||||||
}
|
|
||||||
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
|
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
|
||||||
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
|
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
|
||||||
|
|
||||||
embd_inp = params.spm_infill ? inp_sfx : inp_pfx;
|
embd_inp = params.spm_infill ? inp_sfx : inp_pfx;
|
||||||
embd_end = params.spm_infill ? inp_pfx : inp_sfx;
|
embd_end = params.spm_infill ? inp_pfx : inp_sfx;
|
||||||
if (add_bos) {
|
if (add_bos) {
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
#! pip install pydantic
|
#! pip install pydantic
|
||||||
#! python json_schema_pydantic_example.py
|
#! python json_schema_pydantic_example.py
|
||||||
|
|
||||||
from pydantic import BaseModel, Extra, TypeAdapter
|
from pydantic import BaseModel, Field, TypeAdapter
|
||||||
from annotated_types import MinLen
|
from annotated_types import MinLen
|
||||||
from typing import Annotated, List, Optional
|
from typing import Annotated, List, Optional
|
||||||
import json, requests
|
import json, requests
|
||||||
@ -17,6 +17,9 @@ if True:
|
|||||||
|
|
||||||
The response_model param takes a type (+ supports Pydantic) and behaves just as w/ Instructor (see below)
|
The response_model param takes a type (+ supports Pydantic) and behaves just as w/ Instructor (see below)
|
||||||
'''
|
'''
|
||||||
|
response_format = None
|
||||||
|
type_adapter = None
|
||||||
|
|
||||||
if response_model:
|
if response_model:
|
||||||
type_adapter = TypeAdapter(response_model)
|
type_adapter = TypeAdapter(response_model)
|
||||||
schema = type_adapter.json_schema()
|
schema = type_adapter.json_schema()
|
||||||
|
@ -1,4 +1,6 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import itertools
|
import itertools
|
||||||
import json
|
import json
|
||||||
@ -188,7 +190,7 @@ def _generate_min_max_int(min_value: Optional[int], max_value: Optional[int], ou
|
|||||||
raise RuntimeError("At least one of min_value or max_value must be set")
|
raise RuntimeError("At least one of min_value or max_value must be set")
|
||||||
|
|
||||||
class BuiltinRule:
|
class BuiltinRule:
|
||||||
def __init__(self, content: str, deps: list = None):
|
def __init__(self, content: str, deps: list | None = None):
|
||||||
self.content = content
|
self.content = content
|
||||||
self.deps = deps or []
|
self.deps = deps or []
|
||||||
|
|
||||||
@ -248,7 +250,7 @@ class SchemaConverter:
|
|||||||
|
|
||||||
def _format_literal(self, literal):
|
def _format_literal(self, literal):
|
||||||
escaped = GRAMMAR_LITERAL_ESCAPE_RE.sub(
|
escaped = GRAMMAR_LITERAL_ESCAPE_RE.sub(
|
||||||
lambda m: GRAMMAR_LITERAL_ESCAPES.get(m.group(0)), literal
|
lambda m: GRAMMAR_LITERAL_ESCAPES.get(m.group(0)) or m.group(0), literal
|
||||||
)
|
)
|
||||||
return f'"{escaped}"'
|
return f'"{escaped}"'
|
||||||
|
|
||||||
@ -403,11 +405,11 @@ class SchemaConverter:
|
|||||||
i = 0
|
i = 0
|
||||||
length = len(pattern)
|
length = len(pattern)
|
||||||
|
|
||||||
def to_rule(s: Tuple[str, bool]) -> str:
|
def to_rule(s: tuple[str, bool]) -> str:
|
||||||
(txt, is_literal) = s
|
(txt, is_literal) = s
|
||||||
return "\"" + txt + "\"" if is_literal else txt
|
return "\"" + txt + "\"" if is_literal else txt
|
||||||
|
|
||||||
def transform() -> Tuple[str, bool]:
|
def transform() -> tuple[str, bool]:
|
||||||
'''
|
'''
|
||||||
Parse a unit at index i (advancing it), and return its string representation + whether it's a literal.
|
Parse a unit at index i (advancing it), and return its string representation + whether it's a literal.
|
||||||
'''
|
'''
|
||||||
@ -420,7 +422,7 @@ class SchemaConverter:
|
|||||||
# We only need a flat structure here to apply repetition operators to the last item, and
|
# We only need a flat structure here to apply repetition operators to the last item, and
|
||||||
# to merge literals at the and (we're parsing grouped ( sequences ) recursively and don't treat '|' specially
|
# to merge literals at the and (we're parsing grouped ( sequences ) recursively and don't treat '|' specially
|
||||||
# (GBNF's syntax is luckily very close to regular expressions!)
|
# (GBNF's syntax is luckily very close to regular expressions!)
|
||||||
seq: list[Tuple[str, bool]] = []
|
seq: list[tuple[str, bool]] = []
|
||||||
|
|
||||||
def get_dot():
|
def get_dot():
|
||||||
if self._dotall:
|
if self._dotall:
|
||||||
|
@ -185,6 +185,8 @@ else:
|
|||||||
fout.add_description("two-tower CLIP model")
|
fout.add_description("two-tower CLIP model")
|
||||||
|
|
||||||
if has_text_encoder:
|
if has_text_encoder:
|
||||||
|
assert t_hparams is not None
|
||||||
|
assert tokens is not None
|
||||||
# text_model hparams
|
# text_model hparams
|
||||||
fout.add_uint32(k(KEY_CONTEXT_LENGTH, TEXT), t_hparams["max_position_embeddings"])
|
fout.add_uint32(k(KEY_CONTEXT_LENGTH, TEXT), t_hparams["max_position_embeddings"])
|
||||||
fout.add_uint32(k(KEY_EMBEDDING_LENGTH, TEXT), t_hparams["hidden_size"])
|
fout.add_uint32(k(KEY_EMBEDDING_LENGTH, TEXT), t_hparams["hidden_size"])
|
||||||
@ -259,8 +261,8 @@ if has_vision_encoder:
|
|||||||
|
|
||||||
|
|
||||||
if processor is not None:
|
if processor is not None:
|
||||||
image_mean = processor.image_processor.image_mean if args.image_mean is None or args.image_mean == default_image_mean else args.image_mean
|
image_mean = processor.image_processor.image_mean if args.image_mean is None or args.image_mean == default_image_mean else args.image_mean # pyright: ignore[reportAttributeAccessIssue]
|
||||||
image_std = processor.image_processor.image_std if args.image_std is None or args.image_std == default_image_std else args.image_std
|
image_std = processor.image_processor.image_std if args.image_std is None or args.image_std == default_image_std else args.image_std # pyright: ignore[reportAttributeAccessIssue]
|
||||||
else:
|
else:
|
||||||
image_mean = args.image_mean if args.image_mean is not None else default_image_mean
|
image_mean = args.image_mean if args.image_mean is not None else default_image_mean
|
||||||
image_std = args.image_std if args.image_std is not None else default_image_std
|
image_std = args.image_std if args.image_std is not None else default_image_std
|
||||||
@ -272,7 +274,7 @@ fout.add_bool("clip.use_gelu", use_gelu)
|
|||||||
|
|
||||||
|
|
||||||
if has_llava_projector:
|
if has_llava_projector:
|
||||||
model.vision_model.encoder.layers.pop(-1)
|
model.vision_model.encoder.layers.pop(-1) # pyright: ignore[reportAttributeAccessIssue]
|
||||||
projector = torch.load(args.llava_projector)
|
projector = torch.load(args.llava_projector)
|
||||||
for name, data in projector.items():
|
for name, data in projector.items():
|
||||||
name = get_tensor_name(name)
|
name = get_tensor_name(name)
|
||||||
@ -286,7 +288,7 @@ if has_llava_projector:
|
|||||||
|
|
||||||
print("Projector tensors added\n")
|
print("Projector tensors added\n")
|
||||||
|
|
||||||
state_dict = model.state_dict()
|
state_dict = model.state_dict() # pyright: ignore[reportAttributeAccessIssue]
|
||||||
for name, data in state_dict.items():
|
for name, data in state_dict.items():
|
||||||
if should_skip_tensor(name, has_text_encoder, has_vision_encoder, has_llava_projector):
|
if should_skip_tensor(name, has_text_encoder, has_vision_encoder, has_llava_projector):
|
||||||
# we don't need this
|
# we don't need this
|
||||||
|
@ -2,7 +2,9 @@ import argparse
|
|||||||
import glob
|
import glob
|
||||||
import os
|
import os
|
||||||
import torch
|
import torch
|
||||||
from safetensors.torch import load as safe_load, save as safe_save, safe_open, save_file
|
from safetensors import safe_open
|
||||||
|
from safetensors.torch import save_file
|
||||||
|
from typing import Any, ContextManager, cast
|
||||||
|
|
||||||
# Function to determine if file is a SafeTensor file
|
# Function to determine if file is a SafeTensor file
|
||||||
def is_safetensor_file(file_path):
|
def is_safetensor_file(file_path):
|
||||||
@ -13,7 +15,7 @@ def is_safetensor_file(file_path):
|
|||||||
def load_model(file_path):
|
def load_model(file_path):
|
||||||
if is_safetensor_file(file_path):
|
if is_safetensor_file(file_path):
|
||||||
tensors = {}
|
tensors = {}
|
||||||
with safe_open(file_path, framework="pt", device="cpu") as f:
|
with cast(ContextManager[Any], safe_open(file_path, framework="pt", device="cpu")) as f:
|
||||||
for key in f.keys():
|
for key in f.keys():
|
||||||
tensors[key] = f.get_tensor(key).clone()
|
tensors[key] = f.get_tensor(key).clone()
|
||||||
# output shape
|
# output shape
|
||||||
@ -134,7 +136,7 @@ if len(mm_tensors) == 0:
|
|||||||
if last_checkpoint is not None:
|
if last_checkpoint is not None:
|
||||||
for k, v in last_checkpoint.items():
|
for k, v in last_checkpoint.items():
|
||||||
print(k)
|
print(k)
|
||||||
print(f"Found {len(mm_tensors)} tensors to extract out of {len(last_checkpoint)} tensors.")
|
print(f"Found {len(mm_tensors)} tensors to extract out of {len(last_checkpoint) if last_checkpoint is not None else 0} tensors.")
|
||||||
print("No tensors found. Is this a LLaVA model?")
|
print("No tensors found. Is this a LLaVA model?")
|
||||||
exit()
|
exit()
|
||||||
|
|
||||||
@ -143,8 +145,10 @@ print(f"Found additional {len(first_mm_tensors)} tensors to extract.")
|
|||||||
# projector = {name: checkpoint.[name].float() for name in mm_tensors}
|
# projector = {name: checkpoint.[name].float() for name in mm_tensors}
|
||||||
projector = {}
|
projector = {}
|
||||||
for name in mm_tensors:
|
for name in mm_tensors:
|
||||||
|
assert last_checkpoint is not None
|
||||||
projector[name] = last_checkpoint[name].float()
|
projector[name] = last_checkpoint[name].float()
|
||||||
for name in first_mm_tensors:
|
for name in first_mm_tensors:
|
||||||
|
assert first_checkpoint is not None
|
||||||
projector[name] = first_checkpoint[name].float()
|
projector[name] = first_checkpoint[name].float()
|
||||||
|
|
||||||
if len(projector) > 0:
|
if len(projector) > 0:
|
||||||
|
@ -6,10 +6,10 @@ import re
|
|||||||
from copy import copy
|
from copy import copy
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from inspect import getdoc, isclass
|
from inspect import getdoc, isclass
|
||||||
from typing import TYPE_CHECKING, Any, Callable, List, Optional, Union, get_args, get_origin, get_type_hints
|
from typing import TYPE_CHECKING, Any, Callable, List, Optional, Union, get_args, get_origin
|
||||||
|
|
||||||
from docstring_parser import parse
|
from docstring_parser import parse
|
||||||
from pydantic import BaseModel, Field, create_model
|
from pydantic import BaseModel, create_model
|
||||||
|
|
||||||
if TYPE_CHECKING:
|
if TYPE_CHECKING:
|
||||||
from types import GenericAlias
|
from types import GenericAlias
|
||||||
@ -17,6 +17,9 @@ else:
|
|||||||
# python 3.8 compat
|
# python 3.8 compat
|
||||||
from typing import _GenericAlias as GenericAlias
|
from typing import _GenericAlias as GenericAlias
|
||||||
|
|
||||||
|
# TODO: fix this
|
||||||
|
# pyright: reportAttributeAccessIssue=information
|
||||||
|
|
||||||
|
|
||||||
class PydanticDataType(Enum):
|
class PydanticDataType(Enum):
|
||||||
"""
|
"""
|
||||||
@ -234,8 +237,9 @@ def generate_gbnf_float_rules(max_digit=None, min_digit=None, max_precision=None
|
|||||||
|
|
||||||
# Define the integer part rule
|
# Define the integer part rule
|
||||||
integer_part_rule = (
|
integer_part_rule = (
|
||||||
"integer-part" + (f"-max{max_digit}" if max_digit is not None else "") + (
|
"integer-part"
|
||||||
f"-min{min_digit}" if min_digit is not None else "")
|
+ (f"-max{max_digit}" if max_digit is not None else "")
|
||||||
|
+ (f"-min{min_digit}" if min_digit is not None else "")
|
||||||
)
|
)
|
||||||
|
|
||||||
# Define the fractional part rule based on precision constraints
|
# Define the fractional part rule based on precision constraints
|
||||||
@ -458,7 +462,7 @@ def generate_gbnf_grammar(model: type[BaseModel], processed_models: set[type[Bas
|
|||||||
if not issubclass(model, BaseModel):
|
if not issubclass(model, BaseModel):
|
||||||
# For non-Pydantic classes, generate model_fields from __annotations__ or __init__
|
# For non-Pydantic classes, generate model_fields from __annotations__ or __init__
|
||||||
if hasattr(model, "__annotations__") and model.__annotations__:
|
if hasattr(model, "__annotations__") and model.__annotations__:
|
||||||
model_fields = {name: (typ, ...) for name, typ in model.__annotations__.items()}
|
model_fields = {name: (typ, ...) for name, typ in model.__annotations__.items()} # pyright: ignore[reportGeneralTypeIssues]
|
||||||
else:
|
else:
|
||||||
init_signature = inspect.signature(model.__init__)
|
init_signature = inspect.signature(model.__init__)
|
||||||
parameters = init_signature.parameters
|
parameters = init_signature.parameters
|
||||||
@ -680,7 +684,7 @@ def generate_markdown_documentation(
|
|||||||
str: Generated text documentation.
|
str: Generated text documentation.
|
||||||
"""
|
"""
|
||||||
documentation = ""
|
documentation = ""
|
||||||
pyd_models = [(model, True) for model in pydantic_models]
|
pyd_models: list[tuple[type[BaseModel], bool]] = [(model, True) for model in pydantic_models]
|
||||||
for model, add_prefix in pyd_models:
|
for model, add_prefix in pyd_models:
|
||||||
if add_prefix:
|
if add_prefix:
|
||||||
documentation += f"{model_prefix}: {model.__name__}\n"
|
documentation += f"{model_prefix}: {model.__name__}\n"
|
||||||
@ -700,7 +704,7 @@ def generate_markdown_documentation(
|
|||||||
# Indenting the fields section
|
# Indenting the fields section
|
||||||
documentation += f" {fields_prefix}:\n"
|
documentation += f" {fields_prefix}:\n"
|
||||||
else:
|
else:
|
||||||
documentation += f" Fields:\n"
|
documentation += f" Fields:\n" # noqa: F541
|
||||||
if isclass(model) and issubclass(model, BaseModel):
|
if isclass(model) and issubclass(model, BaseModel):
|
||||||
for name, field_type in model.__annotations__.items():
|
for name, field_type in model.__annotations__.items():
|
||||||
# if name == "markdown_code_block":
|
# if name == "markdown_code_block":
|
||||||
@ -778,7 +782,7 @@ def generate_field_markdown(
|
|||||||
return field_text
|
return field_text
|
||||||
|
|
||||||
if field_description != "":
|
if field_description != "":
|
||||||
field_text += f" Description: " + field_description + "\n"
|
field_text += f" Description: {field_description}\n"
|
||||||
|
|
||||||
# Check for and include field-specific examples if available
|
# Check for and include field-specific examples if available
|
||||||
if hasattr(model, "Config") and hasattr(model.Config,
|
if hasattr(model, "Config") and hasattr(model.Config,
|
||||||
@ -833,7 +837,7 @@ def generate_text_documentation(
|
|||||||
str: Generated text documentation.
|
str: Generated text documentation.
|
||||||
"""
|
"""
|
||||||
documentation = ""
|
documentation = ""
|
||||||
pyd_models = [(model, True) for model in pydantic_models]
|
pyd_models: list[tuple[type[BaseModel], bool]] = [(model, True) for model in pydantic_models]
|
||||||
for model, add_prefix in pyd_models:
|
for model, add_prefix in pyd_models:
|
||||||
if add_prefix:
|
if add_prefix:
|
||||||
documentation += f"{model_prefix}: {model.__name__}\n"
|
documentation += f"{model_prefix}: {model.__name__}\n"
|
||||||
@ -1164,7 +1168,7 @@ def create_dynamic_model_from_function(func: Callable[..., Any]):
|
|||||||
dynamic_fields[param.name] = (
|
dynamic_fields[param.name] = (
|
||||||
param.annotation if param.annotation != inspect.Parameter.empty else str, default_value)
|
param.annotation if param.annotation != inspect.Parameter.empty else str, default_value)
|
||||||
# Creating the dynamic model
|
# Creating the dynamic model
|
||||||
dynamic_model = create_model(f"{func.__name__}", **dynamic_fields) # type: ignore[call-overload]
|
dynamic_model = create_model(f"{func.__name__}", **dynamic_fields)
|
||||||
|
|
||||||
for name, param_doc in param_docs:
|
for name, param_doc in param_docs:
|
||||||
dynamic_model.model_fields[name].description = param_doc.description
|
dynamic_model.model_fields[name].description = param_doc.description
|
||||||
@ -1228,9 +1232,6 @@ def map_grammar_names_to_pydantic_model_class(pydantic_model_list):
|
|||||||
return output
|
return output
|
||||||
|
|
||||||
|
|
||||||
from enum import Enum
|
|
||||||
|
|
||||||
|
|
||||||
def json_schema_to_python_types(schema):
|
def json_schema_to_python_types(schema):
|
||||||
type_map = {
|
type_map = {
|
||||||
"any": Any,
|
"any": Any,
|
||||||
@ -1275,7 +1276,7 @@ def convert_dictionary_to_pydantic_model(dictionary: dict[str, Any], model_name:
|
|||||||
if items != {}:
|
if items != {}:
|
||||||
array = {"properties": items}
|
array = {"properties": items}
|
||||||
array_type = convert_dictionary_to_pydantic_model(array, f"{model_name}_{field_name}_items")
|
array_type = convert_dictionary_to_pydantic_model(array, f"{model_name}_{field_name}_items")
|
||||||
fields[field_name] = (List[array_type], ...) # type: ignore[valid-type]
|
fields[field_name] = (List[array_type], ...)
|
||||||
else:
|
else:
|
||||||
fields[field_name] = (list, ...)
|
fields[field_name] = (list, ...)
|
||||||
elif field_type == "object":
|
elif field_type == "object":
|
||||||
@ -1285,7 +1286,8 @@ def convert_dictionary_to_pydantic_model(dictionary: dict[str, Any], model_name:
|
|||||||
required = field_data.get("enum", [])
|
required = field_data.get("enum", [])
|
||||||
for key, field in fields.items():
|
for key, field in fields.items():
|
||||||
if key not in required:
|
if key not in required:
|
||||||
fields[key] = (Optional[fields[key][0]], ...)
|
optional_type = fields[key][0]
|
||||||
|
fields[key] = (Optional[optional_type], ...)
|
||||||
else:
|
else:
|
||||||
field_type = json_schema_to_python_types(field_type)
|
field_type = json_schema_to_python_types(field_type)
|
||||||
fields[field_name] = (field_type, ...)
|
fields[field_name] = (field_type, ...)
|
||||||
@ -1305,6 +1307,7 @@ def convert_dictionary_to_pydantic_model(dictionary: dict[str, Any], model_name:
|
|||||||
required = dictionary.get("required", [])
|
required = dictionary.get("required", [])
|
||||||
for key, field in fields.items():
|
for key, field in fields.items():
|
||||||
if key not in required:
|
if key not in required:
|
||||||
fields[key] = (Optional[fields[key][0]], ...)
|
optional_type = fields[key][0]
|
||||||
|
fields[key] = (Optional[optional_type], ...)
|
||||||
custom_model = create_model(model_name, **fields)
|
custom_model = create_model(model_name, **fields)
|
||||||
return custom_model
|
return custom_model
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
# Function calling example using pydantic models.
|
# Function calling example using pydantic models.
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import importlib
|
|
||||||
import json
|
import json
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
from typing import Optional, Union
|
from typing import Optional, Union
|
||||||
@ -215,9 +216,9 @@ for call in json_data:
|
|||||||
if call["function"] == "Calculator":
|
if call["function"] == "Calculator":
|
||||||
print(Calculator(**call["params"]).run())
|
print(Calculator(**call["params"]).run())
|
||||||
elif call["function"] == "get_current_datetime":
|
elif call["function"] == "get_current_datetime":
|
||||||
print(current_datetime_model(**call["params"]).run())
|
print(current_datetime_model(**call["params"]).run()) # pyright: ignore[reportAttributeAccessIssue]
|
||||||
elif call["function"] == "get_current_weather":
|
elif call["function"] == "get_current_weather":
|
||||||
print(current_weather_tool_model(**call["params"]).run())
|
print(current_weather_tool_model(**call["params"]).run()) # pyright: ignore[reportAttributeAccessIssue]
|
||||||
# Should output something like this:
|
# Should output something like this:
|
||||||
# 2024-01-14 13:36:06
|
# 2024-01-14 13:36:06
|
||||||
# {"location": "London", "temperature": "42", "unit": "celsius"}
|
# {"location": "London", "temperature": "42", "unit": "celsius"}
|
||||||
|
@ -46,6 +46,9 @@ static const std::vector<struct quant_option> QUANT_OPTIONS = {
|
|||||||
{ "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 5.33G, +0.0569 ppl @ Llama-3-8B", },
|
{ "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 5.33G, +0.0569 ppl @ Llama-3-8B", },
|
||||||
{ "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 6.14G, +0.0217 ppl @ Llama-3-8B", },
|
{ "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 6.14G, +0.0217 ppl @ Llama-3-8B", },
|
||||||
{ "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 7.96G, +0.0026 ppl @ Llama-3-8B", },
|
{ "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 7.96G, +0.0026 ppl @ Llama-3-8B", },
|
||||||
|
{ "Q4_0_4_4", LLAMA_FTYPE_MOSTLY_Q4_0_4_4, " 4.34G, +0.4685 ppl @ Llama-3-8B", },
|
||||||
|
{ "Q4_0_4_8", LLAMA_FTYPE_MOSTLY_Q4_0_4_8, " 4.34G, +0.4685 ppl @ Llama-3-8B", },
|
||||||
|
{ "Q4_0_8_8", LLAMA_FTYPE_MOSTLY_Q4_0_8_8, " 4.34G, +0.4685 ppl @ Llama-3-8B", },
|
||||||
{ "F16", LLAMA_FTYPE_MOSTLY_F16, "14.00G, +0.0020 ppl @ Mistral-7B", },
|
{ "F16", LLAMA_FTYPE_MOSTLY_F16, "14.00G, +0.0020 ppl @ Mistral-7B", },
|
||||||
{ "BF16", LLAMA_FTYPE_MOSTLY_BF16, "14.00G, -0.0050 ppl @ Mistral-7B", },
|
{ "BF16", LLAMA_FTYPE_MOSTLY_BF16, "14.00G, -0.0050 ppl @ Mistral-7B", },
|
||||||
{ "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", },
|
{ "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", },
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
@ -59,10 +61,11 @@ def main(args_in: list[str] | None = None) -> None:
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
# start the benchmark
|
# start the benchmark
|
||||||
|
iterations = 0
|
||||||
|
data = {}
|
||||||
try:
|
try:
|
||||||
start_benchmark(args)
|
start_benchmark(args)
|
||||||
|
|
||||||
iterations = 0
|
|
||||||
with open("results.github.env", 'w') as github_env:
|
with open("results.github.env", 'w') as github_env:
|
||||||
# parse output
|
# parse output
|
||||||
with open('k6-results.json', 'r') as bench_results:
|
with open('k6-results.json', 'r') as bench_results:
|
||||||
@ -129,7 +132,7 @@ def main(args_in: list[str] | None = None) -> None:
|
|||||||
timestamps, metric_values = zip(*values)
|
timestamps, metric_values = zip(*values)
|
||||||
metric_values = [float(value) for value in metric_values]
|
metric_values = [float(value) for value in metric_values]
|
||||||
prometheus_metrics[metric] = metric_values
|
prometheus_metrics[metric] = metric_values
|
||||||
timestamps_dt = [datetime.fromtimestamp(int(ts)) for ts in timestamps]
|
timestamps_dt = [str(datetime.fromtimestamp(int(ts))) for ts in timestamps]
|
||||||
plt.figure(figsize=(16, 10), dpi=80)
|
plt.figure(figsize=(16, 10), dpi=80)
|
||||||
plt.plot(timestamps_dt, metric_values, label=metric)
|
plt.plot(timestamps_dt, metric_values, label=metric)
|
||||||
plt.xticks(rotation=0, fontsize=14, horizontalalignment='center', alpha=.7)
|
plt.xticks(rotation=0, fontsize=14, horizontalalignment='center', alpha=.7)
|
||||||
@ -156,7 +159,7 @@ def main(args_in: list[str] | None = None) -> None:
|
|||||||
plt.close()
|
plt.close()
|
||||||
|
|
||||||
# Mermaid format in case images upload failed
|
# Mermaid format in case images upload failed
|
||||||
with (open(f"{metric}.mermaid", 'w') as mermaid_f):
|
with open(f"{metric}.mermaid", 'w') as mermaid_f:
|
||||||
mermaid = (
|
mermaid = (
|
||||||
f"""---
|
f"""---
|
||||||
config:
|
config:
|
||||||
@ -278,7 +281,7 @@ def start_server_background(args):
|
|||||||
}
|
}
|
||||||
server_process = subprocess.Popen(
|
server_process = subprocess.Popen(
|
||||||
args,
|
args,
|
||||||
**pkwargs)
|
**pkwargs) # pyright: ignore[reportArgumentType, reportCallIssue]
|
||||||
|
|
||||||
def server_log(in_stream, out_stream):
|
def server_log(in_stream, out_stream):
|
||||||
for line in iter(in_stream.readline, b''):
|
for line in iter(in_stream.readline, b''):
|
||||||
|
@ -884,7 +884,8 @@ struct server_context {
|
|||||||
|
|
||||||
bool launch_slot_with_task(server_slot & slot, const server_task & task) {
|
bool launch_slot_with_task(server_slot & slot, const server_task & task) {
|
||||||
slot_params default_params;
|
slot_params default_params;
|
||||||
llama_sampling_params default_sparams;
|
// Sampling parameter defaults are loaded from the global server context (but individual requests can still override them)
|
||||||
|
llama_sampling_params default_sparams = params.sparams;
|
||||||
auto & data = task.data;
|
auto & data = task.data;
|
||||||
|
|
||||||
if (data.count("__oaicompat") != 0) {
|
if (data.count("__oaicompat") != 0) {
|
||||||
|
@ -1,5 +1,4 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import collections
|
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
@ -8,19 +7,23 @@ import subprocess
|
|||||||
import sys
|
import sys
|
||||||
import threading
|
import threading
|
||||||
import time
|
import time
|
||||||
|
from collections.abc import Sequence
|
||||||
from contextlib import closing
|
from contextlib import closing
|
||||||
from re import RegexFlag
|
from re import RegexFlag
|
||||||
|
from typing import Any, Literal, cast
|
||||||
|
|
||||||
import aiohttp
|
import aiohttp
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import openai
|
import openai
|
||||||
from behave import step
|
from openai.types.chat import ChatCompletionChunk
|
||||||
|
from behave import step # pyright: ignore[reportAttributeAccessIssue]
|
||||||
from behave.api.async_step import async_run_until_complete
|
from behave.api.async_step import async_run_until_complete
|
||||||
from prometheus_client import parser
|
from prometheus_client import parser
|
||||||
|
|
||||||
|
# pyright: reportRedeclaration=false
|
||||||
|
|
||||||
@step("a server listening on {server_fqdn}:{server_port}")
|
@step("a server listening on {server_fqdn}:{server_port}")
|
||||||
def step_server_config(context, server_fqdn, server_port):
|
def step_server_config(context, server_fqdn: str, server_port: str):
|
||||||
context.server_fqdn = server_fqdn
|
context.server_fqdn = server_fqdn
|
||||||
context.server_port = int(server_port)
|
context.server_port = int(server_port)
|
||||||
context.n_threads = None
|
context.n_threads = None
|
||||||
@ -74,34 +77,34 @@ def step_server_config(context, server_fqdn, server_port):
|
|||||||
|
|
||||||
|
|
||||||
@step('a model file {hf_file} from HF repo {hf_repo}')
|
@step('a model file {hf_file} from HF repo {hf_repo}')
|
||||||
def step_download_hf_model(context, hf_file, hf_repo):
|
def step_download_hf_model(context, hf_file: str, hf_repo: str):
|
||||||
context.model_hf_repo = hf_repo
|
context.model_hf_repo = hf_repo
|
||||||
context.model_hf_file = hf_file
|
context.model_hf_file = hf_file
|
||||||
context.model_file = os.path.basename(hf_file)
|
context.model_file = os.path.basename(hf_file)
|
||||||
|
|
||||||
|
|
||||||
@step('a model file {model_file}')
|
@step('a model file {model_file}')
|
||||||
def step_model_file(context, model_file):
|
def step_model_file(context, model_file: str):
|
||||||
context.model_file = model_file
|
context.model_file = model_file
|
||||||
|
|
||||||
|
|
||||||
@step('a model url {model_url}')
|
@step('a model url {model_url}')
|
||||||
def step_model_url(context, model_url):
|
def step_model_url(context, model_url: str):
|
||||||
context.model_url = model_url
|
context.model_url = model_url
|
||||||
|
|
||||||
|
|
||||||
@step('a model alias {model_alias}')
|
@step('a model alias {model_alias}')
|
||||||
def step_model_alias(context, model_alias):
|
def step_model_alias(context, model_alias: str):
|
||||||
context.model_alias = model_alias
|
context.model_alias = model_alias
|
||||||
|
|
||||||
|
|
||||||
@step('{seed:d} as server seed')
|
@step('{seed:d} as server seed')
|
||||||
def step_seed(context, seed):
|
def step_seed(context, seed: int):
|
||||||
context.server_seed = seed
|
context.server_seed = seed
|
||||||
|
|
||||||
|
|
||||||
@step('{ngl:d} GPU offloaded layers')
|
@step('{ngl:d} GPU offloaded layers')
|
||||||
def step_n_gpu_layer(context, ngl):
|
def step_n_gpu_layer(context, ngl: int):
|
||||||
if 'N_GPU_LAYERS' in os.environ:
|
if 'N_GPU_LAYERS' in os.environ:
|
||||||
new_ngl = int(os.environ['N_GPU_LAYERS'])
|
new_ngl = int(os.environ['N_GPU_LAYERS'])
|
||||||
if context.debug:
|
if context.debug:
|
||||||
@ -111,37 +114,37 @@ def step_n_gpu_layer(context, ngl):
|
|||||||
|
|
||||||
|
|
||||||
@step('{n_threads:d} threads')
|
@step('{n_threads:d} threads')
|
||||||
def step_n_threads(context, n_threads):
|
def step_n_threads(context, n_threads: int):
|
||||||
context.n_thread = n_threads
|
context.n_thread = n_threads
|
||||||
|
|
||||||
|
|
||||||
@step('{draft:d} as draft')
|
@step('{draft:d} as draft')
|
||||||
def step_draft(context, draft):
|
def step_draft(context, draft: int):
|
||||||
context.draft = draft
|
context.draft = draft
|
||||||
|
|
||||||
|
|
||||||
@step('{n_ctx:d} KV cache size')
|
@step('{n_ctx:d} KV cache size')
|
||||||
def step_n_ctx(context, n_ctx):
|
def step_n_ctx(context, n_ctx: int):
|
||||||
context.n_ctx = n_ctx
|
context.n_ctx = n_ctx
|
||||||
|
|
||||||
|
|
||||||
@step('{n_slots:d} slots')
|
@step('{n_slots:d} slots')
|
||||||
def step_n_slots(context, n_slots):
|
def step_n_slots(context, n_slots: int):
|
||||||
context.n_slots = n_slots
|
context.n_slots = n_slots
|
||||||
|
|
||||||
|
|
||||||
@step('{n_predict:d} server max tokens to predict')
|
@step('{n_predict:d} server max tokens to predict')
|
||||||
def step_server_n_predict(context, n_predict):
|
def step_server_n_predict(context, n_predict: int):
|
||||||
context.n_server_predict = n_predict
|
context.n_server_predict = n_predict
|
||||||
|
|
||||||
|
|
||||||
@step('{slot_save_path} as slot save path')
|
@step('{slot_save_path} as slot save path')
|
||||||
def step_slot_save_path(context, slot_save_path):
|
def step_slot_save_path(context, slot_save_path: str):
|
||||||
context.slot_save_path = slot_save_path
|
context.slot_save_path = slot_save_path
|
||||||
|
|
||||||
|
|
||||||
@step('using slot id {id_slot:d}')
|
@step('using slot id {id_slot:d}')
|
||||||
def step_id_slot(context, id_slot):
|
def step_id_slot(context, id_slot: int):
|
||||||
context.id_slot = id_slot
|
context.id_slot = id_slot
|
||||||
|
|
||||||
|
|
||||||
@ -191,7 +194,7 @@ def step_start_server(context):
|
|||||||
|
|
||||||
@step("the server is {expecting_status}")
|
@step("the server is {expecting_status}")
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_wait_for_the_server_to_be_started(context, expecting_status):
|
async def step_wait_for_the_server_to_be_started(context, expecting_status: Literal['healthy', 'ready', 'idle', 'busy'] | str):
|
||||||
match expecting_status:
|
match expecting_status:
|
||||||
case 'healthy':
|
case 'healthy':
|
||||||
await wait_for_health_status(context, context.base_url, 200, 'ok',
|
await wait_for_health_status(context, context.base_url, 200, 'ok',
|
||||||
@ -221,7 +224,7 @@ async def step_wait_for_the_server_to_be_started(context, expecting_status):
|
|||||||
|
|
||||||
@step('all slots are {expected_slot_status_string}')
|
@step('all slots are {expected_slot_status_string}')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_all_slots_status(context, expected_slot_status_string):
|
async def step_all_slots_status(context, expected_slot_status_string: Literal['idle', 'busy'] | str):
|
||||||
match expected_slot_status_string:
|
match expected_slot_status_string:
|
||||||
case 'idle':
|
case 'idle':
|
||||||
expected_slot_status = 0
|
expected_slot_status = 0
|
||||||
@ -237,7 +240,7 @@ async def step_all_slots_status(context, expected_slot_status_string):
|
|||||||
|
|
||||||
@step('a completion request with {api_error} api error')
|
@step('a completion request with {api_error} api error')
|
||||||
@async_run_until_complete
|
@async_run_until_complete
|
||||||
async def step_request_completion(context, api_error):
|
async def step_request_completion(context, api_error: Literal['raised'] | str):
|
||||||
expect_api_error = api_error == 'raised'
|
expect_api_error = api_error == 'raised'
|
||||||
seeds = await completions_seed(context, num_seeds=1)
|
seeds = await completions_seed(context, num_seeds=1)
|
||||||
completion = await request_completion(context.prompts.pop(),
|
completion = await request_completion(context.prompts.pop(),
|
||||||
@ -777,8 +780,8 @@ def step_assert_metric_value(context, metric_name, metric_value):
|
|||||||
def step_available_models(context):
|
def step_available_models(context):
|
||||||
# openai client always expects an api_key
|
# openai client always expects an api_key
|
||||||
openai.api_key = context.user_api_key if context.user_api_key is not None else 'nope'
|
openai.api_key = context.user_api_key if context.user_api_key is not None else 'nope'
|
||||||
openai.api_base = f'{context.base_url}/v1'
|
openai.base_url = f'{context.base_url}/v1/'
|
||||||
context.models = openai.Model.list().data
|
context.models = openai.models.list().data
|
||||||
|
|
||||||
|
|
||||||
@step('{n_model:d} models are supported')
|
@step('{n_model:d} models are supported')
|
||||||
@ -789,7 +792,7 @@ def step_supported_models(context, n_model):
|
|||||||
|
|
||||||
|
|
||||||
@step('model {i_model:d} is {param} {preposition} {param_value}')
|
@step('model {i_model:d} is {param} {preposition} {param_value}')
|
||||||
def step_supported_models(context, i_model, param, preposition, param_value):
|
def step_supported_models(context, i_model: int, param: Literal['identified', 'trained'] | str, preposition: str, param_value: str):
|
||||||
assert i_model < len(context.models)
|
assert i_model < len(context.models)
|
||||||
model = context.models[i_model]
|
model = context.models[i_model]
|
||||||
|
|
||||||
@ -798,7 +801,7 @@ def step_supported_models(context, i_model, param, preposition, param_value):
|
|||||||
case 'identified':
|
case 'identified':
|
||||||
value = model.id
|
value = model.id
|
||||||
case 'trained':
|
case 'trained':
|
||||||
value = str(model.meta.n_ctx_train)
|
value = str(model.meta["n_ctx_train"])
|
||||||
case _:
|
case _:
|
||||||
assert False, "param {param} not supported"
|
assert False, "param {param} not supported"
|
||||||
assert param_value == value, f"model param {param} {value} != {param_value}"
|
assert param_value == value, f"model param {param} {value} != {param_value}"
|
||||||
@ -810,6 +813,7 @@ async def concurrent_requests(context, f_completion, *args, **kwargs):
|
|||||||
print(f"starting {context.n_prompts} concurrent completion requests...")
|
print(f"starting {context.n_prompts} concurrent completion requests...")
|
||||||
assert context.n_prompts > 0
|
assert context.n_prompts > 0
|
||||||
seeds = await completions_seed(context)
|
seeds = await completions_seed(context)
|
||||||
|
assert seeds is not None
|
||||||
for prompt_no in range(context.n_prompts):
|
for prompt_no in range(context.n_prompts):
|
||||||
shifted_args = [context.prompts.pop(), seeds[prompt_no], *args]
|
shifted_args = [context.prompts.pop(), seeds[prompt_no], *args]
|
||||||
context.concurrent_tasks.append(asyncio.create_task(f_completion(*shifted_args, **kwargs)))
|
context.concurrent_tasks.append(asyncio.create_task(f_completion(*shifted_args, **kwargs)))
|
||||||
@ -861,7 +865,7 @@ async def request_completion(prompt,
|
|||||||
id_slot=None,
|
id_slot=None,
|
||||||
expect_api_error=None,
|
expect_api_error=None,
|
||||||
user_api_key=None,
|
user_api_key=None,
|
||||||
temperature=None):
|
temperature=None) -> int | dict[str, Any]:
|
||||||
if debug:
|
if debug:
|
||||||
print(f"Sending completion request: {prompt}")
|
print(f"Sending completion request: {prompt}")
|
||||||
origin = "my.super.domain"
|
origin = "my.super.domain"
|
||||||
@ -899,8 +903,8 @@ async def request_completion(prompt,
|
|||||||
async def oai_chat_completions(user_prompt,
|
async def oai_chat_completions(user_prompt,
|
||||||
seed,
|
seed,
|
||||||
system_prompt,
|
system_prompt,
|
||||||
base_url,
|
base_url: str,
|
||||||
base_path,
|
base_path: str,
|
||||||
async_client,
|
async_client,
|
||||||
debug=False,
|
debug=False,
|
||||||
temperature=None,
|
temperature=None,
|
||||||
@ -909,7 +913,7 @@ async def oai_chat_completions(user_prompt,
|
|||||||
enable_streaming=None,
|
enable_streaming=None,
|
||||||
response_format=None,
|
response_format=None,
|
||||||
user_api_key=None,
|
user_api_key=None,
|
||||||
expect_api_error=None):
|
expect_api_error=None) -> int | dict[str, Any]:
|
||||||
if debug:
|
if debug:
|
||||||
print(f"Sending OAI Chat completions request: {user_prompt}")
|
print(f"Sending OAI Chat completions request: {user_prompt}")
|
||||||
# openai client always expects an api key
|
# openai client always expects an api key
|
||||||
@ -989,32 +993,35 @@ async def oai_chat_completions(user_prompt,
|
|||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
openai.api_key = user_api_key
|
openai.api_key = user_api_key
|
||||||
openai.api_base = f'{base_url}{base_path}'
|
openai.base_url = f'{base_url}{base_path.removesuffix("chat")}'
|
||||||
chat_completion = openai.Completion.create(
|
assert model is not None
|
||||||
|
chat_completion = openai.chat.completions.create(
|
||||||
messages=payload['messages'],
|
messages=payload['messages'],
|
||||||
model=model,
|
model=model,
|
||||||
max_tokens=n_predict,
|
max_tokens=n_predict,
|
||||||
stream=enable_streaming,
|
stream=enable_streaming,
|
||||||
response_format=payload.get('response_format'),
|
response_format=payload.get('response_format') or openai.NOT_GIVEN,
|
||||||
seed=seed,
|
seed=seed,
|
||||||
temperature=payload['temperature']
|
temperature=payload['temperature']
|
||||||
)
|
)
|
||||||
except openai.error.AuthenticationError as e:
|
except openai.AuthenticationError as e:
|
||||||
if expect_api_error is not None and expect_api_error:
|
if expect_api_error is not None and expect_api_error:
|
||||||
return 401
|
return 401
|
||||||
else:
|
else:
|
||||||
assert False, f'error raised: {e}'
|
assert False, f'error raised: {e}'
|
||||||
|
|
||||||
if enable_streaming:
|
if enable_streaming:
|
||||||
|
chat_completion = cast(openai.Stream[ChatCompletionChunk], chat_completion)
|
||||||
for chunk in chat_completion:
|
for chunk in chat_completion:
|
||||||
assert len(chunk.choices) == 1
|
assert len(chunk.choices) == 1
|
||||||
delta = chunk.choices[0].delta
|
delta = chunk.choices[0].delta
|
||||||
if 'content' in delta:
|
if delta.content is not None:
|
||||||
completion_response['content'] += delta['content']
|
completion_response['content'] += delta.content
|
||||||
completion_response['timings']['predicted_n'] += 1
|
completion_response['timings']['predicted_n'] += 1
|
||||||
completion_response['truncated'] = chunk.choices[0].finish_reason != 'stop'
|
completion_response['truncated'] = chunk.choices[0].finish_reason != 'stop'
|
||||||
else:
|
else:
|
||||||
assert len(chat_completion.choices) == 1
|
assert len(chat_completion.choices) == 1
|
||||||
|
assert chat_completion.usage is not None
|
||||||
completion_response = {
|
completion_response = {
|
||||||
'content': chat_completion.choices[0].message.content,
|
'content': chat_completion.choices[0].message.content,
|
||||||
'timings': {
|
'timings': {
|
||||||
@ -1028,7 +1035,7 @@ async def oai_chat_completions(user_prompt,
|
|||||||
return completion_response
|
return completion_response
|
||||||
|
|
||||||
|
|
||||||
async def request_embedding(content, seed, base_url=None):
|
async def request_embedding(content, seed, base_url=None) -> list[list[float]]:
|
||||||
async with aiohttp.ClientSession() as session:
|
async with aiohttp.ClientSession() as session:
|
||||||
async with session.post(f'{base_url}/embedding',
|
async with session.post(f'{base_url}/embedding',
|
||||||
json={
|
json={
|
||||||
@ -1041,7 +1048,7 @@ async def request_embedding(content, seed, base_url=None):
|
|||||||
|
|
||||||
async def request_oai_embeddings(input, seed,
|
async def request_oai_embeddings(input, seed,
|
||||||
base_url=None, user_api_key=None,
|
base_url=None, user_api_key=None,
|
||||||
model=None, async_client=False):
|
model=None, async_client=False) -> list[list[float]]:
|
||||||
# openai client always expects an api_key
|
# openai client always expects an api_key
|
||||||
user_api_key = user_api_key if user_api_key is not None else 'nope'
|
user_api_key = user_api_key if user_api_key is not None else 'nope'
|
||||||
if async_client:
|
if async_client:
|
||||||
@ -1063,7 +1070,7 @@ async def request_oai_embeddings(input, seed,
|
|||||||
response_json = await response.json()
|
response_json = await response.json()
|
||||||
assert response_json['model'] == model, f"invalid model received: {response_json['model']}"
|
assert response_json['model'] == model, f"invalid model received: {response_json['model']}"
|
||||||
assert response_json['object'] == 'list'
|
assert response_json['object'] == 'list'
|
||||||
if isinstance(input, collections.abc.Sequence):
|
if isinstance(input, Sequence):
|
||||||
embeddings = []
|
embeddings = []
|
||||||
for an_oai_embeddings in response_json['data']:
|
for an_oai_embeddings in response_json['data']:
|
||||||
embeddings.append(an_oai_embeddings['embedding'])
|
embeddings.append(an_oai_embeddings['embedding'])
|
||||||
@ -1072,19 +1079,14 @@ async def request_oai_embeddings(input, seed,
|
|||||||
return embeddings
|
return embeddings
|
||||||
else:
|
else:
|
||||||
openai.api_key = user_api_key
|
openai.api_key = user_api_key
|
||||||
openai.api_base = f'{base_url}/v1'
|
openai.base_url = f'{base_url}/v1/'
|
||||||
oai_embeddings = openai.Embedding.create(
|
assert model is not None
|
||||||
|
oai_embeddings = openai.embeddings.create(
|
||||||
model=model,
|
model=model,
|
||||||
input=input,
|
input=input,
|
||||||
)
|
)
|
||||||
|
|
||||||
if isinstance(input, collections.abc.Sequence):
|
return [e.embedding for e in oai_embeddings.data]
|
||||||
embeddings = []
|
|
||||||
for an_oai_embeddings in oai_embeddings.data:
|
|
||||||
embeddings.append(an_oai_embeddings.embedding)
|
|
||||||
else:
|
|
||||||
embeddings = [oai_embeddings.data.embedding]
|
|
||||||
return embeddings
|
|
||||||
|
|
||||||
|
|
||||||
def assert_n_tokens_predicted(completion_response, expected_predicted_n=None, re_content=None):
|
def assert_n_tokens_predicted(completion_response, expected_predicted_n=None, re_content=None):
|
||||||
@ -1343,7 +1345,7 @@ def start_server_background(context):
|
|||||||
}
|
}
|
||||||
context.server_process = subprocess.Popen(
|
context.server_process = subprocess.Popen(
|
||||||
[str(arg) for arg in [context.server_path, *server_args]],
|
[str(arg) for arg in [context.server_path, *server_args]],
|
||||||
**pkwargs)
|
**pkwargs) # pyright: ignore[reportArgumentType, reportCallIssue]
|
||||||
|
|
||||||
def server_log(in_stream, out_stream):
|
def server_log(in_stream, out_stream):
|
||||||
for line in iter(in_stream.readline, b''):
|
for line in iter(in_stream.readline, b''):
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
aiohttp~=3.9.3
|
aiohttp~=3.9.3
|
||||||
behave~=1.2.6
|
behave~=1.2.6
|
||||||
huggingface_hub~=0.20.3
|
huggingface_hub~=0.20.3
|
||||||
numpy~=1.24.4
|
numpy~=1.26.4
|
||||||
openai~=0.25.0
|
openai~=1.30.3
|
||||||
prometheus-client~=0.20.0
|
prometheus-client~=0.20.0
|
||||||
|
@ -1,13 +1,15 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
|
import asyncio.threads
|
||||||
import requests
|
import requests
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
n = 8
|
n = 8
|
||||||
|
|
||||||
result = []
|
result = []
|
||||||
|
|
||||||
async def requests_post_async(*args, **kwargs):
|
async def requests_post_async(*args, **kwargs):
|
||||||
return await asyncio.to_thread(requests.post, *args, **kwargs)
|
return await asyncio.threads.to_thread(requests.post, *args, **kwargs)
|
||||||
|
|
||||||
async def main():
|
async def main():
|
||||||
model_url = "http://127.0.0.1:6900"
|
model_url = "http://127.0.0.1:6900"
|
||||||
|
@ -66,7 +66,7 @@ class Tensor:
|
|||||||
if len(self.ne) == 0:
|
if len(self.ne) == 0:
|
||||||
self.nbytes = 0
|
self.nbytes = 0
|
||||||
else:
|
else:
|
||||||
self.nbytes = int(np.product(self.ne)) * 4
|
self.nbytes = int(np.prod(self.ne)) * 4
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Unhandled data type '{self.dtype}'")
|
raise ValueError(f"Unhandled data type '{self.dtype}'")
|
||||||
|
|
||||||
|
20
flake.lock
20
flake.lock
@ -5,11 +5,11 @@
|
|||||||
"nixpkgs-lib": "nixpkgs-lib"
|
"nixpkgs-lib": "nixpkgs-lib"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1717285511,
|
"lastModified": 1719994518,
|
||||||
"narHash": "sha256-iKzJcpdXih14qYVcZ9QC9XuZYnPc6T8YImb6dX166kw=",
|
"narHash": "sha256-pQMhCCHyQGRzdfAkdJ4cIWiw+JNuWsTX7f0ZYSyz0VY=",
|
||||||
"owner": "hercules-ci",
|
"owner": "hercules-ci",
|
||||||
"repo": "flake-parts",
|
"repo": "flake-parts",
|
||||||
"rev": "2a55567fcf15b1b1c7ed712a2c6fadaec7412ea8",
|
"rev": "9227223f6d922fee3c7b190b2cc238a99527bbb7",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -20,11 +20,11 @@
|
|||||||
},
|
},
|
||||||
"nixpkgs": {
|
"nixpkgs": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1719506693,
|
"lastModified": 1720031269,
|
||||||
"narHash": "sha256-C8e9S7RzshSdHB7L+v9I51af1gDM5unhJ2xO1ywxNH8=",
|
"narHash": "sha256-rwz8NJZV+387rnWpTYcXaRNvzUSnnF9aHONoJIYmiUQ=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "b2852eb9365c6de48ffb0dc2c9562591f652242a",
|
"rev": "9f4128e00b0ae8ec65918efeba59db998750ead6",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
@ -36,14 +36,14 @@
|
|||||||
},
|
},
|
||||||
"nixpkgs-lib": {
|
"nixpkgs-lib": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1717284937,
|
"lastModified": 1719876945,
|
||||||
"narHash": "sha256-lIbdfCsf8LMFloheeE6N31+BMIeixqyQWbSr2vk79EQ=",
|
"narHash": "sha256-Fm2rDDs86sHy0/1jxTOKB1118Q0O3Uc7EC0iXvXKpbI=",
|
||||||
"type": "tarball",
|
"type": "tarball",
|
||||||
"url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz"
|
"url": "https://github.com/NixOS/nixpkgs/archive/5daf0514482af3f97abaefc78a6606365c9108e2.tar.gz"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
"type": "tarball",
|
"type": "tarball",
|
||||||
"url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz"
|
"url": "https://github.com/NixOS/nixpkgs/archive/5daf0514482af3f97abaefc78a6606365c9108e2.tar.gz"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"root": {
|
"root": {
|
||||||
|
@ -104,7 +104,7 @@ option(GGML_ACCELERATE "ggml: enable Accelerate framework"
|
|||||||
option(GGML_BLAS "ggml: use BLAS" ${GGML_BLAS_DEFAULT})
|
option(GGML_BLAS "ggml: use BLAS" ${GGML_BLAS_DEFAULT})
|
||||||
set(GGML_BLAS_VENDOR ${GGML_BLAS_VENDOR_DEFAULT} CACHE STRING
|
set(GGML_BLAS_VENDOR ${GGML_BLAS_VENDOR_DEFAULT} CACHE STRING
|
||||||
"ggml: BLAS library vendor")
|
"ggml: BLAS library vendor")
|
||||||
option(GGML_LLAMAFILE "ggml: use ggml SGEMM" OFF)
|
option(GGML_LLAMAFILE "ggml: use LLAMAFILE" OFF)
|
||||||
|
|
||||||
option(GGML_CUDA "ggml: use CUDA" OFF)
|
option(GGML_CUDA "ggml: use CUDA" OFF)
|
||||||
option(GGML_CUDA_FORCE_DMMV "ggml: use dmmv instead of mmvq CUDA kernels" OFF)
|
option(GGML_CUDA_FORCE_DMMV "ggml: use dmmv instead of mmvq CUDA kernels" OFF)
|
||||||
|
@ -99,6 +99,8 @@ async def main():
|
|||||||
|
|
||||||
tasks = []
|
tasks = []
|
||||||
|
|
||||||
|
base_dict = {"FLOAT_TYPE": "float"}
|
||||||
|
|
||||||
for fp16 in (False, True):
|
for fp16 in (False, True):
|
||||||
# MUL_MAT
|
# MUL_MAT
|
||||||
matmul_shaders(tasks, fp16, False)
|
matmul_shaders(tasks, fp16, False)
|
||||||
@ -106,8 +108,6 @@ async def main():
|
|||||||
matmul_shaders(tasks, fp16, True)
|
matmul_shaders(tasks, fp16, True)
|
||||||
|
|
||||||
for tname in type_names:
|
for tname in type_names:
|
||||||
base_dict = {"FLOAT_TYPE": "float"}
|
|
||||||
|
|
||||||
# mul mat vec
|
# mul mat vec
|
||||||
data_a_key = f"DATA_A_{tname.upper()}"
|
data_a_key = f"DATA_A_{tname.upper()}"
|
||||||
shader = f"mul_mat_vec_{tname}.comp" if tname.endswith("_k") else "mul_mat_vec.comp"
|
shader = f"mul_mat_vec_{tname}.comp" if tname.endswith("_k") else "mul_mat_vec.comp"
|
||||||
|
@ -383,6 +383,9 @@ extern "C" {
|
|||||||
GGML_TYPE_F64 = 28,
|
GGML_TYPE_F64 = 28,
|
||||||
GGML_TYPE_IQ1_M = 29,
|
GGML_TYPE_IQ1_M = 29,
|
||||||
GGML_TYPE_BF16 = 30,
|
GGML_TYPE_BF16 = 30,
|
||||||
|
GGML_TYPE_Q4_0_4_4 = 31,
|
||||||
|
GGML_TYPE_Q4_0_4_8 = 32,
|
||||||
|
GGML_TYPE_Q4_0_8_8 = 33,
|
||||||
GGML_TYPE_COUNT,
|
GGML_TYPE_COUNT,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -424,6 +427,9 @@ extern "C" {
|
|||||||
GGML_FTYPE_MOSTLY_IQ4_XS = 22, // except 1d tensors
|
GGML_FTYPE_MOSTLY_IQ4_XS = 22, // except 1d tensors
|
||||||
GGML_FTYPE_MOSTLY_IQ1_M = 23, // except 1d tensors
|
GGML_FTYPE_MOSTLY_IQ1_M = 23, // except 1d tensors
|
||||||
GGML_FTYPE_MOSTLY_BF16 = 24, // except 1d tensors
|
GGML_FTYPE_MOSTLY_BF16 = 24, // except 1d tensors
|
||||||
|
GGML_FTYPE_MOSTLY_Q4_0_4_4 = 25, // except 1d tensors
|
||||||
|
GGML_FTYPE_MOSTLY_Q4_0_4_8 = 26, // except 1d tensors
|
||||||
|
GGML_FTYPE_MOSTLY_Q4_0_8_8 = 27, // except 1d tensors
|
||||||
};
|
};
|
||||||
|
|
||||||
// available tensor operations:
|
// available tensor operations:
|
||||||
@ -2406,6 +2412,12 @@ extern "C" {
|
|||||||
typedef void (*ggml_from_float_t)(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
typedef void (*ggml_from_float_t)(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
||||||
typedef void (*ggml_vec_dot_t) (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT x, size_t bx,
|
typedef void (*ggml_vec_dot_t) (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT x, size_t bx,
|
||||||
const void * GGML_RESTRICT y, size_t by, int nrc);
|
const void * GGML_RESTRICT y, size_t by, int nrc);
|
||||||
|
typedef void (*ggml_from_float_to_mat_t)(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t nr,
|
||||||
|
int64_t k, int64_t bx);
|
||||||
|
typedef void (*ggml_gemv_t) (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT x,
|
||||||
|
const void * GGML_RESTRICT y, int nr, int nc);
|
||||||
|
typedef void (*ggml_gemm_t) (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT x,
|
||||||
|
const void * GGML_RESTRICT y, int nr, int nc);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
const char * type_name;
|
const char * type_name;
|
||||||
@ -2418,6 +2430,11 @@ extern "C" {
|
|||||||
ggml_vec_dot_t vec_dot;
|
ggml_vec_dot_t vec_dot;
|
||||||
enum ggml_type vec_dot_type;
|
enum ggml_type vec_dot_type;
|
||||||
int64_t nrows; // number of rows to process simultaneously;
|
int64_t nrows; // number of rows to process simultaneously;
|
||||||
|
int64_t ncols; // number of columns to process simultaneously;
|
||||||
|
int64_t interleave_blcksize; // interleave elements in blocks of interleave_blcksize;
|
||||||
|
ggml_from_float_to_mat_t from_float_to_mat;
|
||||||
|
ggml_gemv_t gemv;
|
||||||
|
ggml_gemm_t gemm;
|
||||||
} ggml_type_traits_t;
|
} ggml_type_traits_t;
|
||||||
|
|
||||||
GGML_API ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type);
|
GGML_API ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type);
|
||||||
|
@ -238,12 +238,12 @@ if (GGML_BLAS)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (GGML_LLAMAFILE)
|
if (GGML_LLAMAFILE)
|
||||||
message(STATUS "Using ggml SGEMM")
|
message(STATUS "Using llamafile")
|
||||||
|
|
||||||
add_compile_definitions(GGML_USE_LLAMAFILE)
|
add_compile_definitions(GGML_USE_LLAMAFILE)
|
||||||
|
|
||||||
set(GGML_HEADERS_LLAMAFILE sgemm.h)
|
set(GGML_HEADERS_LLAMAFILE llamafile/sgemm.h)
|
||||||
set(GGML_SOURCES_LLAMAFILE sgemm.cpp)
|
set(GGML_SOURCES_LLAMAFILE llamafile/sgemm.cpp)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (GGML_CUDA)
|
if (GGML_CUDA)
|
||||||
@ -1153,6 +1153,7 @@ add_library(ggml
|
|||||||
${GGML_SOURCES_ROCM} ${GGML_HEADERS_ROCM}
|
${GGML_SOURCES_ROCM} ${GGML_HEADERS_ROCM}
|
||||||
${GGML_SOURCES_BLAS} ${GGML_HEADERS_BLAS}
|
${GGML_SOURCES_BLAS} ${GGML_HEADERS_BLAS}
|
||||||
${GGML_SOURCES_LLAMAFILE} ${GGML_HEADERS_LLAMAFILE}
|
${GGML_SOURCES_LLAMAFILE} ${GGML_HEADERS_LLAMAFILE}
|
||||||
|
ggml-aarch64.c ggml-aarch64.h
|
||||||
)
|
)
|
||||||
|
|
||||||
if (EMSCRIPTEN)
|
if (EMSCRIPTEN)
|
||||||
|
2187
ggml/src/ggml-aarch64.c
Normal file
2187
ggml/src/ggml-aarch64.c
Normal file
File diff suppressed because it is too large
Load Diff
39
ggml/src/ggml-aarch64.h
Normal file
39
ggml/src/ggml-aarch64.h
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
// SPDX-FileCopyrightText: Copyright 2024 Arm Ltd.
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#define GGML_COMMON_DECL_C
|
||||||
|
#include "ggml-common.h"
|
||||||
|
|
||||||
|
#include "ggml.h"
|
||||||
|
|
||||||
|
// GGML internal header
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Quantization
|
||||||
|
void quantize_q8_0_4x4(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
||||||
|
void quantize_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
|
||||||
|
|
||||||
|
void quantize_mat_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t nrows, int64_t n_per_row, int64_t interleave_blcksize);
|
||||||
|
|
||||||
|
// Quantization utilizing an importance matrix (a.k.a. "Activation aWare Quantization")
|
||||||
|
size_t quantize_q4_0_4x4(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
size_t quantize_q4_0_4x8(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
size_t quantize_q4_0_8x8(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
||||||
|
|
||||||
|
// GEMV
|
||||||
|
void ggml_gemv_q4_0_4x4_q8_0 (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc);
|
||||||
|
void ggml_gemv_q4_0_4x8_q8_0 (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc);
|
||||||
|
void ggml_gemv_q4_0_8x8_q8_0 (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc);
|
||||||
|
|
||||||
|
// GEMM
|
||||||
|
void ggml_gemm_q4_0_4x4_q8_0 (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc);
|
||||||
|
void ggml_gemm_q4_0_4x8_q8_0 (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc);
|
||||||
|
void ggml_gemm_q4_0_8x8_q8_0 (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
@ -199,6 +199,30 @@ typedef struct {
|
|||||||
} block_q8_1;
|
} block_q8_1;
|
||||||
static_assert(sizeof(block_q8_1) == 2*sizeof(ggml_half) + QK8_1, "wrong q8_1 block size/padding");
|
static_assert(sizeof(block_q8_1) == 2*sizeof(ggml_half) + QK8_1, "wrong q8_1 block size/padding");
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
ggml_half d[4]; // deltas for 4 q4_0 blocks
|
||||||
|
uint8_t qs[QK4_0 * 2]; // nibbles / quants for 4 q4_0 blocks
|
||||||
|
} block_q4_0x4;
|
||||||
|
static_assert(sizeof(block_q4_0x4) == 4 * sizeof(ggml_half) + QK4_0 * 2, "wrong q4_0x4 block size/padding");
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
ggml_half d[8]; // deltas for 8 q4_0 blocks
|
||||||
|
uint8_t qs[QK4_0 * 4]; // nibbles / quants for 8 q4_0 blocks
|
||||||
|
} block_q4_0x8;
|
||||||
|
static_assert(sizeof(block_q4_0x8) == 8 * sizeof(ggml_half) + QK4_0 * 4, "wrong q4_0x8 block size/padding");
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
ggml_half d[4]; // deltas for 4 q8_0 blocks
|
||||||
|
int8_t qs[QK8_0 * 4]; // quants for 4 q8_0 blocks
|
||||||
|
} block_q8_0x4;
|
||||||
|
static_assert(sizeof(block_q8_0x4) == 4 * sizeof(ggml_half) + QK8_0 * 4, "wrong q8_0x4 block size/padding");
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
ggml_half d[8]; // deltas for 8 q8_0 blocks
|
||||||
|
int8_t qs[QK8_0 * 8]; // quants for 8 q8_0 blocks
|
||||||
|
} block_q8_0x8;
|
||||||
|
static_assert(sizeof(block_q8_0x8) == 8 * sizeof(ggml_half) + QK8_0 * 8, "wrong q8_0x8 block size/padding");
|
||||||
|
|
||||||
//
|
//
|
||||||
// Super-block quantization structures
|
// Super-block quantization structures
|
||||||
//
|
//
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
#include "ggml-cuda/tsembd.cuh"
|
#include "ggml-cuda/tsembd.cuh"
|
||||||
#include "ggml-cuda/unary.cuh"
|
#include "ggml-cuda/unary.cuh"
|
||||||
#include "ggml-cuda/upscale.cuh"
|
#include "ggml-cuda/upscale.cuh"
|
||||||
|
#include "ggml-cuda/conv-transpose-1d.cuh"
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <array>
|
#include <array>
|
||||||
@ -2262,6 +2263,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
|
|||||||
case GGML_OP_IM2COL:
|
case GGML_OP_IM2COL:
|
||||||
ggml_cuda_op_im2col(ctx, dst);
|
ggml_cuda_op_im2col(ctx, dst);
|
||||||
break;
|
break;
|
||||||
|
case GGML_OP_CONV_TRANSPOSE_1D:
|
||||||
|
ggml_cuda_op_conv_transpose_1d(ctx,dst);
|
||||||
|
break;
|
||||||
case GGML_OP_POOL_2D:
|
case GGML_OP_POOL_2D:
|
||||||
ggml_cuda_op_pool2d(ctx, dst);
|
ggml_cuda_op_pool2d(ctx, dst);
|
||||||
break;
|
break;
|
||||||
@ -2805,6 +2809,15 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
|
|||||||
ggml_type src0_type = op->src[0]->type;
|
ggml_type src0_type = op->src[0]->type;
|
||||||
return src0_type != GGML_TYPE_I32 && src0_type != GGML_TYPE_I16;
|
return src0_type != GGML_TYPE_I32 && src0_type != GGML_TYPE_I16;
|
||||||
} break;
|
} break;
|
||||||
|
case GGML_OP_CONV_TRANSPOSE_1D:
|
||||||
|
{
|
||||||
|
ggml_type src0_type = op->src[0]->type;
|
||||||
|
ggml_type src1_type = op->src[1]->type;
|
||||||
|
if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
} break;
|
||||||
case GGML_OP_NONE:
|
case GGML_OP_NONE:
|
||||||
case GGML_OP_RESHAPE:
|
case GGML_OP_RESHAPE:
|
||||||
case GGML_OP_VIEW:
|
case GGML_OP_VIEW:
|
||||||
|
87
ggml/src/ggml-cuda/conv-transpose-1d.cu
Normal file
87
ggml/src/ggml-cuda/conv-transpose-1d.cu
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
#include "conv-transpose-1d.cuh"
|
||||||
|
|
||||||
|
static __global__ void conv_transpose_1d_kernel(
|
||||||
|
const int s0, const int p0, const int d0, const int output_size,
|
||||||
|
const int src0_ne0, const int src0_ne1, const int src0_ne2, const int src0_ne3,
|
||||||
|
const int src1_ne0, const int src1_ne1, const int src1_ne2, const int src1_ne3,
|
||||||
|
const int dst_ne0, const int dst_ne1, const int dst_ne2, const int dst_ne3,
|
||||||
|
const float * src0, const float * src1, float * dst) {
|
||||||
|
int global_index = threadIdx.x + blockIdx.x * blockDim.x;
|
||||||
|
if (global_index >= output_size) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
int out_index = global_index / dst_ne0;
|
||||||
|
|
||||||
|
float accumulator = 0;
|
||||||
|
|
||||||
|
for (int c = 0; c < src0_ne2; c++) {
|
||||||
|
int idx = global_index % dst_ne0;
|
||||||
|
|
||||||
|
int kernel_offset = (src0_ne0 * src0_ne1 * c) + (out_index * src0_ne0);
|
||||||
|
int input_offset = src1_ne0 * c;
|
||||||
|
|
||||||
|
for (int i = 0; i < src1_ne0; i++) {
|
||||||
|
if (!(idx >= i*s0 && idx < i*s0 + src0_ne0)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
int weight_idx = idx - i*s0;
|
||||||
|
|
||||||
|
float kernel_weight = src0[kernel_offset + weight_idx];
|
||||||
|
float input_value = src1[input_offset+i];
|
||||||
|
|
||||||
|
accumulator += kernel_weight * input_value;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dst[global_index] = accumulator;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void conv_transpose_1d_f32_f32_cuda(
|
||||||
|
const int s0, const int p0, const int d0, const int output_size,
|
||||||
|
const int src0_ne0, const int src0_ne1, const int src0_ne2, const int src0_ne3,
|
||||||
|
const int src1_ne0, const int src1_ne1, const int src1_ne2, const int src1_ne3,
|
||||||
|
const int dst_ne0, const int dst_ne1, const int dst_ne2, const int dst_ne3,
|
||||||
|
const float * src0, const float * src1, float * dst,
|
||||||
|
cudaStream_t stream) {
|
||||||
|
|
||||||
|
const int num_blocks = (output_size + CUDA_CONV_TRANPOSE_1D_BLOCK_SIZE - 1) / CUDA_CONV_TRANPOSE_1D_BLOCK_SIZE;
|
||||||
|
conv_transpose_1d_kernel<<<num_blocks,CUDA_CONV_TRANPOSE_1D_BLOCK_SIZE, 0, stream>>>(
|
||||||
|
s0,p0,d0,output_size,
|
||||||
|
src0_ne0, src0_ne1, src0_ne2, src0_ne3,
|
||||||
|
src1_ne0, src1_ne1, src1_ne2, src1_ne3,
|
||||||
|
dst_ne0, dst_ne1, dst_ne2, dst_ne3,
|
||||||
|
src0,src1, dst);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_op_conv_transpose_1d(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
const ggml_tensor * src0 = dst->src[0];
|
||||||
|
const float * src0_d = (const float *)src0->data;
|
||||||
|
|
||||||
|
const ggml_tensor * src1 = dst->src[1];
|
||||||
|
const float * src1_d = (const float *)src1->data;
|
||||||
|
|
||||||
|
float * dst_d = (float *)dst->data;
|
||||||
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
GGML_ASSERT(ggml_is_contiguous(src0));
|
||||||
|
GGML_ASSERT(ggml_is_contiguous(src1));
|
||||||
|
|
||||||
|
const int32_t * opts = (const int32_t *)dst->op_params;
|
||||||
|
|
||||||
|
const int s0 = opts[0];
|
||||||
|
const int p0 = 0;//opts[3];
|
||||||
|
const int d0 = 1;//opts[4];
|
||||||
|
|
||||||
|
const int64_t kernel_size = ggml_nelements(src0);
|
||||||
|
const int64_t input_size = ggml_nelements(src1);
|
||||||
|
const int64_t output_size = ggml_nelements(dst);
|
||||||
|
|
||||||
|
conv_transpose_1d_f32_f32_cuda(s0, p0, d0, output_size,
|
||||||
|
src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3],
|
||||||
|
src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3],
|
||||||
|
dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3],
|
||||||
|
src0_d, src1_d, dst_d, stream);
|
||||||
|
}
|
5
ggml/src/ggml-cuda/conv-transpose-1d.cuh
Normal file
5
ggml/src/ggml-cuda/conv-transpose-1d.cuh
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
#include "common.cuh"
|
||||||
|
|
||||||
|
#define CUDA_CONV_TRANPOSE_1D_BLOCK_SIZE 256
|
||||||
|
|
||||||
|
void ggml_cuda_op_conv_transpose_1d(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
@ -609,6 +609,10 @@ static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
|
|||||||
|
|
||||||
#endif // defined(__ARM_NEON) && (!defined(__MSC_VER)
|
#endif // defined(__ARM_NEON) && (!defined(__MSC_VER)
|
||||||
|
|
||||||
|
#ifdef __ARM_FEATURE_SVE
|
||||||
|
#include <arm_sve.h>
|
||||||
|
#endif // __ARM_FEATURE_SVE
|
||||||
|
|
||||||
// precomputed f32 table for f16 (256 KB)
|
// precomputed f32 table for f16 (256 KB)
|
||||||
// defined in ggml.c, initialized in ggml_init()
|
// defined in ggml.c, initialized in ggml_init()
|
||||||
extern float ggml_table_f32_f16[1 << 16];
|
extern float ggml_table_f32_f16[1 << 16];
|
||||||
|
@ -3814,6 +3814,7 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * r
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#if defined(__ARM_FEATURE_SVE)
|
#if defined(__ARM_FEATURE_SVE)
|
||||||
|
if (svcntb() == QK8_0) {
|
||||||
const svbool_t ptrueh = svptrue_pat_b8(SV_VL16);
|
const svbool_t ptrueh = svptrue_pat_b8(SV_VL16);
|
||||||
const svbool_t ptruel = svnot_b_z(svptrue_b8(), ptrueh);
|
const svbool_t ptruel = svnot_b_z(svptrue_b8(), ptrueh);
|
||||||
|
|
||||||
@ -3850,7 +3851,10 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * r
|
|||||||
}
|
}
|
||||||
|
|
||||||
*s = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1));
|
*s = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1));
|
||||||
#elif defined(__ARM_NEON)
|
return;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
#if defined(__ARM_NEON)
|
||||||
float32x4_t sumv0 = vdupq_n_f32(0.0f);
|
float32x4_t sumv0 = vdupq_n_f32(0.0f);
|
||||||
float32x4_t sumv1 = vdupq_n_f32(0.0f);
|
float32x4_t sumv1 = vdupq_n_f32(0.0f);
|
||||||
|
|
||||||
@ -5422,6 +5426,7 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * r
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#if defined(__ARM_FEATURE_SVE)
|
#if defined(__ARM_FEATURE_SVE)
|
||||||
|
if (svcntb() == QK8_0) {
|
||||||
svfloat32_t sumv0 = svdup_n_f32(0.0f);
|
svfloat32_t sumv0 = svdup_n_f32(0.0f);
|
||||||
svfloat32_t sumv1 = svdup_n_f32(0.0f);
|
svfloat32_t sumv1 = svdup_n_f32(0.0f);
|
||||||
|
|
||||||
@ -5446,7 +5451,10 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * r
|
|||||||
}
|
}
|
||||||
|
|
||||||
*s = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1));
|
*s = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1));
|
||||||
#elif defined(__ARM_NEON)
|
return;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
#if defined(__ARM_NEON)
|
||||||
float32x4_t sumv0 = vdupq_n_f32(0.0f);
|
float32x4_t sumv0 = vdupq_n_f32(0.0f);
|
||||||
float32x4_t sumv1 = vdupq_n_f32(0.0f);
|
float32x4_t sumv1 = vdupq_n_f32(0.0f);
|
||||||
|
|
||||||
@ -14760,6 +14768,16 @@ static bool validate_fp16(ggml_fp16_t f, size_t i) {
|
|||||||
} \
|
} \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define VALIDATE_ROW_DATA_DVEC_F16_IMPL(type, data, nb, nr) \
|
||||||
|
const type * q = (const type *) (data); \
|
||||||
|
for (size_t i = 0; i < (nb); ++i) { \
|
||||||
|
for (size_t j = 0; j < (nr); ++j) { \
|
||||||
|
if (!validate_fp16(q[i].d[j], i)) { \
|
||||||
|
return false; \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
|
||||||
bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbytes) {
|
bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbytes) {
|
||||||
if (type < 0 || type >= GGML_TYPE_COUNT) {
|
if (type < 0 || type >= GGML_TYPE_COUNT) {
|
||||||
fprintf(stderr, "%s: invalid type %d\n", __func__, type);
|
fprintf(stderr, "%s: invalid type %d\n", __func__, type);
|
||||||
@ -14977,6 +14995,16 @@ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbyte
|
|||||||
{
|
{
|
||||||
VALIDATE_ROW_DATA_D_F16_IMPL(block_iq4_nl, data, nb);
|
VALIDATE_ROW_DATA_D_F16_IMPL(block_iq4_nl, data, nb);
|
||||||
} break;
|
} break;
|
||||||
|
case GGML_TYPE_Q4_0_4_4:
|
||||||
|
case GGML_TYPE_Q4_0_4_8:
|
||||||
|
{
|
||||||
|
VALIDATE_ROW_DATA_DVEC_F16_IMPL(block_q4_0x4, data, nbytes / sizeof(block_q4_0x4), 4);
|
||||||
|
} break;
|
||||||
|
case GGML_TYPE_Q4_0_8_8:
|
||||||
|
{
|
||||||
|
VALIDATE_ROW_DATA_DVEC_F16_IMPL(block_q4_0x8, data, nbytes / sizeof(block_q4_0x8), 8);
|
||||||
|
} break;
|
||||||
|
|
||||||
case GGML_TYPE_I8:
|
case GGML_TYPE_I8:
|
||||||
case GGML_TYPE_I16:
|
case GGML_TYPE_I16:
|
||||||
case GGML_TYPE_I32:
|
case GGML_TYPE_I32:
|
||||||
|
@ -3658,6 +3658,10 @@ static void ggml_sycl_mul_mat(ggml_backend_sycl_context & ctx, const ggml_tensor
|
|||||||
use_mul_mat_q = use_mul_mat_q && (src1->ne[1] <= MMQ_MAX_BATCH_SIZE);
|
use_mul_mat_q = use_mul_mat_q && (src1->ne[1] <= MMQ_MAX_BATCH_SIZE);
|
||||||
#endif // SYCL_USE_XMX
|
#endif // SYCL_USE_XMX
|
||||||
|
|
||||||
|
// mmvq path is faster in the CUDA backend.
|
||||||
|
if (ctx.stream()->get_backend() == sycl::backend::ext_oneapi_cuda)
|
||||||
|
use_dequantize_mul_mat_vec = use_dequantize_mul_mat_vec && !use_mul_mat_vec_q;
|
||||||
|
|
||||||
if (!split && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) {
|
if (!split && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) {
|
||||||
// KQ single-batch
|
// KQ single-batch
|
||||||
ggml_sycl_mul_mat_vec_p021(ctx, src0, src1, dst);
|
ggml_sycl_mul_mat_vec_p021(ctx, src0, src1, dst);
|
||||||
|
@ -346,4 +346,10 @@ inline sycl::vec<Tp, n> vec_aligned_load(const Tp* aligned_ptr) {
|
|||||||
return *reinterpret_cast<const sycl::vec<Tp, n>*>(aligned_ptr);
|
return *reinterpret_cast<const sycl::vec<Tp, n>*>(aligned_ptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Helper for accessing pointers with no warnings
|
||||||
|
template <typename Tp, int dim>
|
||||||
|
static __dpct_inline__ Tp* get_pointer(sycl::local_accessor<Tp, dim> acc) {
|
||||||
|
return acc.template get_multi_ptr<sycl::access::decorated::no>().get();
|
||||||
|
}
|
||||||
|
|
||||||
#endif // GGML_SYCL_COMMON_HPP
|
#endif // GGML_SYCL_COMMON_HPP
|
||||||
|
@ -158,7 +158,7 @@ static void dequantize_row_q4_K_sycl(const void *vx, dst_t *y, const int k,
|
|||||||
sycl::range<3>(1, 1, 32),
|
sycl::range<3>(1, 1, 32),
|
||||||
sycl::range<3>(1, 1, 32)),
|
sycl::range<3>(1, 1, 32)),
|
||||||
[=](sycl::nd_item<3> item_ct1) {
|
[=](sycl::nd_item<3> item_ct1) {
|
||||||
dequantize_block_q4_K(vx, y, scale_local_acc.get_pointer(), item_ct1);
|
dequantize_block_q4_K(vx, y, get_pointer(scale_local_acc), item_ct1);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -1835,10 +1835,10 @@ static void ggml_mul_mat_q4_0_q8_1_sycl(const void *vx, const void *vy,
|
|||||||
mul_mat_q4_0<need_check>(
|
mul_mat_q4_0<need_check>(
|
||||||
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
||||||
nrows_dst, item_ct1,
|
nrows_dst, item_ct1,
|
||||||
tile_x_qs_q4_0_acc_ct1.get_pointer(),
|
get_pointer(tile_x_qs_q4_0_acc_ct1),
|
||||||
tile_x_d_q4_0_acc_ct1.get_pointer(),
|
get_pointer(tile_x_d_q4_0_acc_ct1),
|
||||||
tile_y_qs_acc_ct1.get_pointer(),
|
get_pointer(tile_y_qs_acc_ct1),
|
||||||
tile_y_ds_acc_ct1.get_pointer());
|
get_pointer(tile_y_ds_acc_ct1));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -1870,10 +1870,10 @@ static void ggml_mul_mat_q4_0_q8_1_sycl(const void *vx, const void *vy,
|
|||||||
mul_mat_q4_0<need_check>(
|
mul_mat_q4_0<need_check>(
|
||||||
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
||||||
nrows_dst, item_ct1,
|
nrows_dst, item_ct1,
|
||||||
tile_x_qs_q4_0_acc_ct1.get_pointer(),
|
get_pointer(tile_x_qs_q4_0_acc_ct1),
|
||||||
tile_x_d_q4_0_acc_ct1.get_pointer(),
|
get_pointer(tile_x_d_q4_0_acc_ct1),
|
||||||
tile_y_qs_acc_ct1.get_pointer(),
|
get_pointer(tile_y_qs_acc_ct1),
|
||||||
tile_y_ds_acc_ct1.get_pointer());
|
get_pointer(tile_y_ds_acc_ct1));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -1950,10 +1950,10 @@ static void ggml_mul_mat_q4_1_q8_1_sycl(const void *vx, const void *vy,
|
|||||||
mul_mat_q4_1<need_check>(
|
mul_mat_q4_1<need_check>(
|
||||||
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
||||||
nrows_dst, item_ct1,
|
nrows_dst, item_ct1,
|
||||||
tile_x_qs_q4_1_acc_ct1.get_pointer(),
|
get_pointer(tile_x_qs_q4_1_acc_ct1),
|
||||||
tile_x_dm_q4_1_acc_ct1.get_pointer(),
|
get_pointer(tile_x_dm_q4_1_acc_ct1),
|
||||||
tile_y_qs_acc_ct1.get_pointer(),
|
get_pointer(tile_y_qs_acc_ct1),
|
||||||
tile_y_ds_acc_ct1.get_pointer());
|
get_pointer(tile_y_ds_acc_ct1));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -1985,10 +1985,10 @@ static void ggml_mul_mat_q4_1_q8_1_sycl(const void *vx, const void *vy,
|
|||||||
mul_mat_q4_1<need_check>(
|
mul_mat_q4_1<need_check>(
|
||||||
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
||||||
nrows_dst, item_ct1,
|
nrows_dst, item_ct1,
|
||||||
tile_x_qs_q4_1_acc_ct1.get_pointer(),
|
get_pointer(tile_x_qs_q4_1_acc_ct1),
|
||||||
tile_x_dm_q4_1_acc_ct1.get_pointer(),
|
get_pointer(tile_x_dm_q4_1_acc_ct1),
|
||||||
tile_y_qs_acc_ct1.get_pointer(),
|
get_pointer(tile_y_qs_acc_ct1),
|
||||||
tile_y_ds_acc_ct1.get_pointer());
|
get_pointer(tile_y_ds_acc_ct1));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -2065,10 +2065,10 @@ static void ggml_mul_mat_q5_0_q8_1_sycl(const void *vx, const void *vy,
|
|||||||
mul_mat_q5_0<need_check>(
|
mul_mat_q5_0<need_check>(
|
||||||
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
||||||
nrows_dst, item_ct1,
|
nrows_dst, item_ct1,
|
||||||
tile_x_ql_q5_0_acc_ct1.get_pointer(),
|
get_pointer(tile_x_ql_q5_0_acc_ct1),
|
||||||
tile_x_d_q5_0_acc_ct1.get_pointer(),
|
get_pointer(tile_x_d_q5_0_acc_ct1),
|
||||||
tile_y_qs_acc_ct1.get_pointer(),
|
get_pointer(tile_y_qs_acc_ct1),
|
||||||
tile_y_ds_acc_ct1.get_pointer());
|
get_pointer(tile_y_ds_acc_ct1));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -2100,10 +2100,10 @@ static void ggml_mul_mat_q5_0_q8_1_sycl(const void *vx, const void *vy,
|
|||||||
mul_mat_q5_0<need_check>(
|
mul_mat_q5_0<need_check>(
|
||||||
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
||||||
nrows_dst, item_ct1,
|
nrows_dst, item_ct1,
|
||||||
tile_x_ql_q5_0_acc_ct1.get_pointer(),
|
get_pointer(tile_x_ql_q5_0_acc_ct1),
|
||||||
tile_x_d_q5_0_acc_ct1.get_pointer(),
|
get_pointer(tile_x_d_q5_0_acc_ct1),
|
||||||
tile_y_qs_acc_ct1.get_pointer(),
|
get_pointer(tile_y_qs_acc_ct1),
|
||||||
tile_y_ds_acc_ct1.get_pointer());
|
get_pointer(tile_y_ds_acc_ct1));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -2180,10 +2180,10 @@ static void ggml_mul_mat_q5_1_q8_1_sycl(const void *vx, const void *vy,
|
|||||||
mul_mat_q5_1<need_check>(
|
mul_mat_q5_1<need_check>(
|
||||||
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
||||||
nrows_dst, item_ct1,
|
nrows_dst, item_ct1,
|
||||||
tile_x_ql_q5_1_acc_ct1.get_pointer(),
|
get_pointer(tile_x_ql_q5_1_acc_ct1),
|
||||||
tile_x_dm_q5_1_acc_ct1.get_pointer(),
|
get_pointer(tile_x_dm_q5_1_acc_ct1),
|
||||||
tile_y_qs_acc_ct1.get_pointer(),
|
get_pointer(tile_y_qs_acc_ct1),
|
||||||
tile_y_ds_acc_ct1.get_pointer());
|
get_pointer(tile_y_ds_acc_ct1));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -2215,10 +2215,10 @@ static void ggml_mul_mat_q5_1_q8_1_sycl(const void *vx, const void *vy,
|
|||||||
mul_mat_q5_1<need_check>(
|
mul_mat_q5_1<need_check>(
|
||||||
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
||||||
nrows_dst, item_ct1,
|
nrows_dst, item_ct1,
|
||||||
tile_x_ql_q5_1_acc_ct1.get_pointer(),
|
get_pointer(tile_x_ql_q5_1_acc_ct1),
|
||||||
tile_x_dm_q5_1_acc_ct1.get_pointer(),
|
get_pointer(tile_x_dm_q5_1_acc_ct1),
|
||||||
tile_y_qs_acc_ct1.get_pointer(),
|
get_pointer(tile_y_qs_acc_ct1),
|
||||||
tile_y_ds_acc_ct1.get_pointer());
|
get_pointer(tile_y_ds_acc_ct1));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -2295,10 +2295,10 @@ static void ggml_mul_mat_q8_0_q8_1_sycl(const void *vx, const void *vy,
|
|||||||
mul_mat_q8_0<need_check>(
|
mul_mat_q8_0<need_check>(
|
||||||
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
||||||
nrows_dst, item_ct1,
|
nrows_dst, item_ct1,
|
||||||
tile_x_qs_q8_0_acc_ct1.get_pointer(),
|
get_pointer(tile_x_qs_q8_0_acc_ct1),
|
||||||
tile_x_d_q8_0_acc_ct1.get_pointer(),
|
get_pointer(tile_x_d_q8_0_acc_ct1),
|
||||||
tile_y_qs_acc_ct1.get_pointer(),
|
get_pointer(tile_y_qs_acc_ct1),
|
||||||
tile_y_ds_acc_ct1.get_pointer());
|
get_pointer(tile_y_ds_acc_ct1));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -2330,10 +2330,10 @@ static void ggml_mul_mat_q8_0_q8_1_sycl(const void *vx, const void *vy,
|
|||||||
mul_mat_q8_0<need_check>(
|
mul_mat_q8_0<need_check>(
|
||||||
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
||||||
nrows_dst, item_ct1,
|
nrows_dst, item_ct1,
|
||||||
tile_x_qs_q8_0_acc_ct1.get_pointer(),
|
get_pointer(tile_x_qs_q8_0_acc_ct1),
|
||||||
tile_x_d_q8_0_acc_ct1.get_pointer(),
|
get_pointer(tile_x_d_q8_0_acc_ct1),
|
||||||
tile_y_qs_acc_ct1.get_pointer(),
|
get_pointer(tile_y_qs_acc_ct1),
|
||||||
tile_y_ds_acc_ct1.get_pointer());
|
get_pointer(tile_y_ds_acc_ct1));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -2412,11 +2412,11 @@ static void ggml_mul_mat_q2_K_q8_1_sycl(const void *vx, const void *vy,
|
|||||||
mul_mat_q2_K<need_check>(
|
mul_mat_q2_K<need_check>(
|
||||||
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
||||||
nrows_dst, item_ct1,
|
nrows_dst, item_ct1,
|
||||||
tile_x_ql_q2_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_ql_q2_K_acc_ct1),
|
||||||
tile_x_dm_q2_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_dm_q2_K_acc_ct1),
|
||||||
tile_x_sc_q2_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_sc_q2_K_acc_ct1),
|
||||||
tile_y_qs_acc_ct1.get_pointer(),
|
get_pointer(tile_y_qs_acc_ct1),
|
||||||
tile_y_ds_acc_ct1.get_pointer());
|
get_pointer(tile_y_ds_acc_ct1));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -2450,11 +2450,11 @@ static void ggml_mul_mat_q2_K_q8_1_sycl(const void *vx, const void *vy,
|
|||||||
mul_mat_q2_K<need_check>(
|
mul_mat_q2_K<need_check>(
|
||||||
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
||||||
nrows_dst, item_ct1,
|
nrows_dst, item_ct1,
|
||||||
tile_x_ql_q2_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_ql_q2_K_acc_ct1),
|
||||||
tile_x_dm_q2_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_dm_q2_K_acc_ct1),
|
||||||
tile_x_sc_q2_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_sc_q2_K_acc_ct1),
|
||||||
tile_y_qs_acc_ct1.get_pointer(),
|
get_pointer(tile_y_qs_acc_ct1),
|
||||||
tile_y_ds_acc_ct1.get_pointer());
|
get_pointer(tile_y_ds_acc_ct1));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -2537,12 +2537,12 @@ static void ggml_mul_mat_q3_K_q8_1_sycl(const void *vx, const void *vy,
|
|||||||
mul_mat_q3_K<need_check>(
|
mul_mat_q3_K<need_check>(
|
||||||
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
||||||
nrows_dst, item_ct1,
|
nrows_dst, item_ct1,
|
||||||
tile_x_ql_q3_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_ql_q3_K_acc_ct1),
|
||||||
tile_x_dm_q3_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_dm_q3_K_acc_ct1),
|
||||||
tile_x_qh_q3_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_qh_q3_K_acc_ct1),
|
||||||
tile_x_sc_q3_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_sc_q3_K_acc_ct1),
|
||||||
tile_y_qs_acc_ct1.get_pointer(),
|
get_pointer(tile_y_qs_acc_ct1),
|
||||||
tile_y_ds_acc_ct1.get_pointer());
|
get_pointer(tile_y_ds_acc_ct1));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -2578,12 +2578,12 @@ static void ggml_mul_mat_q3_K_q8_1_sycl(const void *vx, const void *vy,
|
|||||||
mul_mat_q3_K<need_check>(
|
mul_mat_q3_K<need_check>(
|
||||||
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
||||||
nrows_dst, item_ct1,
|
nrows_dst, item_ct1,
|
||||||
tile_x_ql_q3_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_ql_q3_K_acc_ct1),
|
||||||
tile_x_dm_q3_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_dm_q3_K_acc_ct1),
|
||||||
tile_x_qh_q3_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_qh_q3_K_acc_ct1),
|
||||||
tile_x_sc_q3_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_sc_q3_K_acc_ct1),
|
||||||
tile_y_qs_acc_ct1.get_pointer(),
|
get_pointer(tile_y_qs_acc_ct1),
|
||||||
tile_y_ds_acc_ct1.get_pointer());
|
get_pointer(tile_y_ds_acc_ct1));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -2663,11 +2663,11 @@ static void ggml_mul_mat_q4_K_q8_1_sycl(const void *vx, const void *vy,
|
|||||||
mul_mat_q4_K<need_check>(
|
mul_mat_q4_K<need_check>(
|
||||||
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
||||||
nrows_dst, item_ct1,
|
nrows_dst, item_ct1,
|
||||||
tile_x_ql_q4_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_ql_q4_K_acc_ct1),
|
||||||
tile_x_dm_q4_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_dm_q4_K_acc_ct1),
|
||||||
tile_x_sc_q4_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_sc_q4_K_acc_ct1),
|
||||||
tile_y_qs_acc_ct1.get_pointer(),
|
get_pointer(tile_y_qs_acc_ct1),
|
||||||
tile_y_ds_acc_ct1.get_pointer());
|
get_pointer(tile_y_ds_acc_ct1));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -2701,11 +2701,11 @@ static void ggml_mul_mat_q4_K_q8_1_sycl(const void *vx, const void *vy,
|
|||||||
mul_mat_q4_K<need_check>(
|
mul_mat_q4_K<need_check>(
|
||||||
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
||||||
nrows_dst, item_ct1,
|
nrows_dst, item_ct1,
|
||||||
tile_x_ql_q4_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_ql_q4_K_acc_ct1),
|
||||||
tile_x_dm_q4_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_dm_q4_K_acc_ct1),
|
||||||
tile_x_sc_q4_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_sc_q4_K_acc_ct1),
|
||||||
tile_y_qs_acc_ct1.get_pointer(),
|
get_pointer(tile_y_qs_acc_ct1),
|
||||||
tile_y_ds_acc_ct1.get_pointer());
|
get_pointer(tile_y_ds_acc_ct1));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -2784,11 +2784,11 @@ static void ggml_mul_mat_q5_K_q8_1_sycl(const void *vx, const void *vy,
|
|||||||
mul_mat_q5_K<need_check>(
|
mul_mat_q5_K<need_check>(
|
||||||
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
||||||
nrows_dst, item_ct1,
|
nrows_dst, item_ct1,
|
||||||
tile_x_ql_q5_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_ql_q5_K_acc_ct1),
|
||||||
tile_x_dm_q5_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_dm_q5_K_acc_ct1),
|
||||||
tile_x_sc_q5_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_sc_q5_K_acc_ct1),
|
||||||
tile_y_qs_acc_ct1.get_pointer(),
|
get_pointer(tile_y_qs_acc_ct1),
|
||||||
tile_y_ds_acc_ct1.get_pointer());
|
get_pointer(tile_y_ds_acc_ct1));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -2822,11 +2822,11 @@ static void ggml_mul_mat_q5_K_q8_1_sycl(const void *vx, const void *vy,
|
|||||||
mul_mat_q5_K<need_check>(
|
mul_mat_q5_K<need_check>(
|
||||||
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
||||||
nrows_dst, item_ct1,
|
nrows_dst, item_ct1,
|
||||||
tile_x_ql_q5_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_ql_q5_K_acc_ct1),
|
||||||
tile_x_dm_q5_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_dm_q5_K_acc_ct1),
|
||||||
tile_x_sc_q5_K_acc_ct1.get_pointer(),
|
get_pointer(tile_x_sc_q5_K_acc_ct1),
|
||||||
tile_y_qs_acc_ct1.get_pointer(),
|
get_pointer(tile_y_qs_acc_ct1),
|
||||||
tile_y_ds_acc_ct1.get_pointer());
|
get_pointer(tile_y_ds_acc_ct1));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -2905,11 +2905,11 @@ static void ggml_mul_mat_q6_K_q8_1_sycl(const void *vx, const void *vy,
|
|||||||
mul_mat_q6_K<need_check>(
|
mul_mat_q6_K<need_check>(
|
||||||
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
||||||
nrows_dst, item_ct1,
|
nrows_dst, item_ct1,
|
||||||
tile_x_ql_acc_ct1.get_pointer(),
|
get_pointer(tile_x_ql_acc_ct1),
|
||||||
tile_x_dm_acc_ct1.get_pointer(),
|
get_pointer(tile_x_dm_acc_ct1),
|
||||||
tile_x_sc_acc_ct1.get_pointer(),
|
get_pointer(tile_x_sc_acc_ct1),
|
||||||
tile_y_qs_acc_ct1.get_pointer(),
|
get_pointer(tile_y_qs_acc_ct1),
|
||||||
tile_y_ds_acc_ct1.get_pointer());
|
get_pointer(tile_y_ds_acc_ct1));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -2943,11 +2943,11 @@ static void ggml_mul_mat_q6_K_q8_1_sycl(const void *vx, const void *vy,
|
|||||||
mul_mat_q6_K<need_check>(
|
mul_mat_q6_K<need_check>(
|
||||||
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
|
||||||
nrows_dst, item_ct1,
|
nrows_dst, item_ct1,
|
||||||
tile_x_ql_acc_ct1.get_pointer(),
|
get_pointer(tile_x_ql_acc_ct1),
|
||||||
tile_x_dm_acc_ct1.get_pointer(),
|
get_pointer(tile_x_dm_acc_ct1),
|
||||||
tile_x_sc_acc_ct1.get_pointer(),
|
get_pointer(tile_x_sc_acc_ct1),
|
||||||
tile_y_qs_acc_ct1.get_pointer(),
|
get_pointer(tile_y_qs_acc_ct1),
|
||||||
tile_y_ds_acc_ct1.get_pointer());
|
get_pointer(tile_y_ds_acc_ct1));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -218,7 +218,7 @@ static void norm_f32_sycl(const float* x, float* dst, const int ncols,
|
|||||||
[=](sycl::nd_item<3> item_ct1)
|
[=](sycl::nd_item<3> item_ct1)
|
||||||
[[intel::reqd_sub_group_size(WARP_SIZE)]] {
|
[[intel::reqd_sub_group_size(WARP_SIZE)]] {
|
||||||
norm_f32(x, dst, ncols, eps, item_ct1,
|
norm_f32(x, dst, ncols, eps, item_ct1,
|
||||||
s_sum_acc_ct1.get_pointer(), work_group_size);
|
get_pointer(s_sum_acc_ct1), work_group_size);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -265,7 +265,7 @@ static void group_norm_f32_sycl(const float* x, float* dst,
|
|||||||
[[intel::reqd_sub_group_size(WARP_SIZE)]] {
|
[[intel::reqd_sub_group_size(WARP_SIZE)]] {
|
||||||
group_norm_f32(x, dst, group_size, ne_elements,
|
group_norm_f32(x, dst, group_size, ne_elements,
|
||||||
eps_ct4, item_ct1,
|
eps_ct4, item_ct1,
|
||||||
s_sum_acc_ct1.get_pointer(), work_group_size);
|
get_pointer(s_sum_acc_ct1), work_group_size);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@ -306,7 +306,7 @@ static void rms_norm_f32_sycl(const float* x, float* dst, const int ncols,
|
|||||||
[=](sycl::nd_item<3> item_ct1)
|
[=](sycl::nd_item<3> item_ct1)
|
||||||
[[intel::reqd_sub_group_size(WARP_SIZE)]] {
|
[[intel::reqd_sub_group_size(WARP_SIZE)]] {
|
||||||
rms_norm_f32(x, dst, ncols, eps, item_ct1,
|
rms_norm_f32(x, dst, ncols, eps, item_ct1,
|
||||||
s_sum_acc_ct1.get_pointer(), work_group_size);
|
get_pointer(s_sum_acc_ct1), work_group_size);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -55,7 +55,7 @@ static void rope_norm(
|
|||||||
const int i = row*ne0 + i0;
|
const int i = row*ne0 + i0;
|
||||||
const int i2 = row/p_delta_rows;
|
const int i2 = row/p_delta_rows;
|
||||||
|
|
||||||
const float theta_base = pos[i2]*powf(theta_scale, i0/2.0f);
|
const float theta_base = pos[i2] * sycl::pow(theta_scale, i0 / 2.0f);
|
||||||
|
|
||||||
const float freq_factor = has_ff ? freq_factors[i0/2] : 1.0f;
|
const float freq_factor = has_ff ? freq_factors[i0/2] : 1.0f;
|
||||||
|
|
||||||
@ -98,7 +98,7 @@ static void rope_neox(
|
|||||||
const int i = row*ne0 + i0/2;
|
const int i = row*ne0 + i0/2;
|
||||||
const int i2 = row/p_delta_rows;
|
const int i2 = row/p_delta_rows;
|
||||||
|
|
||||||
const float theta_base = pos[i2]*powf(theta_scale, i0/2.0f);
|
const float theta_base = pos[i2] * sycl::pow(theta_scale, i0 / 2.0f);
|
||||||
|
|
||||||
const float freq_factor = has_ff ? freq_factors[i0/2] : 1.0f;
|
const float freq_factor = has_ff ? freq_factors[i0/2] : 1.0f;
|
||||||
|
|
||||||
|
@ -136,7 +136,7 @@ static void soft_max_f32_submitter(const float * x, const float * mask, float *
|
|||||||
soft_max_f32<vals_smem, ncols_template, block_size_template>(x, mask, dst, ncols_par,
|
soft_max_f32<vals_smem, ncols_template, block_size_template>(x, mask, dst, ncols_par,
|
||||||
nrows_y, scale, max_bias, m0,
|
nrows_y, scale, max_bias, m0,
|
||||||
m1, n_head_log2, item_ct1,
|
m1, n_head_log2, item_ct1,
|
||||||
local_buf_acc.get_pointer());
|
get_pointer(local_buf_acc));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
153
ggml/src/ggml.c
153
ggml/src/ggml.c
@ -4,7 +4,7 @@
|
|||||||
#include "ggml-impl.h"
|
#include "ggml-impl.h"
|
||||||
#include "ggml-quants.h"
|
#include "ggml-quants.h"
|
||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
|
#include "ggml-aarch64.h"
|
||||||
|
|
||||||
#if defined(_MSC_VER) || defined(__MINGW32__)
|
#if defined(_MSC_VER) || defined(__MINGW32__)
|
||||||
#include <malloc.h> // using malloc.h with MSC/MINGW
|
#include <malloc.h> // using malloc.h with MSC/MINGW
|
||||||
@ -37,12 +37,12 @@
|
|||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __ARM_FEATURE_MATMUL_INT8
|
#if defined(__ARM_FEATURE_SVE) || defined(__ARM_FEATURE_MATMUL_INT8)
|
||||||
#undef GGML_USE_LLAMAFILE
|
#undef GGML_USE_LLAMAFILE
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef GGML_USE_LLAMAFILE
|
#ifdef GGML_USE_LLAMAFILE
|
||||||
#include "sgemm.h"
|
#include <llamafile/sgemm.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(_MSC_VER)
|
#if defined(_MSC_VER)
|
||||||
@ -692,6 +692,7 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
|
|||||||
#else
|
#else
|
||||||
.nrows = 1,
|
.nrows = 1,
|
||||||
#endif
|
#endif
|
||||||
|
.from_float_to_mat = quantize_mat_q8_0,
|
||||||
},
|
},
|
||||||
[GGML_TYPE_Q8_1] = {
|
[GGML_TYPE_Q8_1] = {
|
||||||
.type_name = "q8_1",
|
.type_name = "q8_1",
|
||||||
@ -889,6 +890,54 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
|
|||||||
.vec_dot = (ggml_vec_dot_t) ggml_vec_dot_bf16,
|
.vec_dot = (ggml_vec_dot_t) ggml_vec_dot_bf16,
|
||||||
.vec_dot_type = GGML_TYPE_BF16,
|
.vec_dot_type = GGML_TYPE_BF16,
|
||||||
.nrows = 1,
|
.nrows = 1,
|
||||||
|
},
|
||||||
|
[GGML_TYPE_Q4_0_4_4] = {
|
||||||
|
.type_name = "q4_0_4x4",
|
||||||
|
.blck_size = QK4_0,
|
||||||
|
.type_size = sizeof(block_q4_0),
|
||||||
|
.is_quantized = true,
|
||||||
|
.to_float = NULL,
|
||||||
|
.from_float = NULL,
|
||||||
|
.from_float_reference = NULL,
|
||||||
|
.vec_dot = NULL,
|
||||||
|
.vec_dot_type = GGML_TYPE_Q8_0,
|
||||||
|
.nrows = 1,
|
||||||
|
.ncols = 4,
|
||||||
|
.interleave_blcksize = 4,
|
||||||
|
.gemv = ggml_gemv_q4_0_4x4_q8_0,
|
||||||
|
.gemm = ggml_gemm_q4_0_4x4_q8_0,
|
||||||
|
},
|
||||||
|
[GGML_TYPE_Q4_0_4_8] = {
|
||||||
|
.type_name = "q4_0_4x8",
|
||||||
|
.blck_size = QK4_0,
|
||||||
|
.type_size = sizeof(block_q4_0),
|
||||||
|
.is_quantized = true,
|
||||||
|
.to_float = NULL,
|
||||||
|
.from_float = NULL,
|
||||||
|
.from_float_reference = NULL,
|
||||||
|
.vec_dot = NULL,
|
||||||
|
.vec_dot_type = GGML_TYPE_Q8_0,
|
||||||
|
.nrows = 1,
|
||||||
|
.ncols = 4,
|
||||||
|
.interleave_blcksize = 8,
|
||||||
|
.gemv = ggml_gemv_q4_0_4x8_q8_0,
|
||||||
|
.gemm = ggml_gemm_q4_0_4x8_q8_0,
|
||||||
|
},
|
||||||
|
[GGML_TYPE_Q4_0_8_8] = {
|
||||||
|
.type_name = "q4_0_8x8",
|
||||||
|
.blck_size = QK4_0,
|
||||||
|
.type_size = sizeof(block_q4_0),
|
||||||
|
.is_quantized = true,
|
||||||
|
.to_float = NULL,
|
||||||
|
.from_float = NULL,
|
||||||
|
.from_float_reference = NULL,
|
||||||
|
.vec_dot = NULL,
|
||||||
|
.vec_dot_type = GGML_TYPE_Q8_0,
|
||||||
|
.nrows = 1,
|
||||||
|
.ncols = 8,
|
||||||
|
.interleave_blcksize = 8,
|
||||||
|
.gemv = ggml_gemv_q4_0_8x8_q8_0,
|
||||||
|
.gemm = ggml_gemm_q4_0_8x8_q8_0,
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -3188,6 +3237,9 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
|
|||||||
case GGML_FTYPE_MOSTLY_IQ4_XS: wtype = GGML_TYPE_IQ4_XS; break;
|
case GGML_FTYPE_MOSTLY_IQ4_XS: wtype = GGML_TYPE_IQ4_XS; break;
|
||||||
case GGML_FTYPE_MOSTLY_IQ3_S: wtype = GGML_TYPE_IQ3_S; break;
|
case GGML_FTYPE_MOSTLY_IQ3_S: wtype = GGML_TYPE_IQ3_S; break;
|
||||||
case GGML_FTYPE_MOSTLY_IQ2_S: wtype = GGML_TYPE_IQ2_S; break;
|
case GGML_FTYPE_MOSTLY_IQ2_S: wtype = GGML_TYPE_IQ2_S; break;
|
||||||
|
case GGML_FTYPE_MOSTLY_Q4_0_4_4: wtype = GGML_TYPE_Q4_0_4_4; break;
|
||||||
|
case GGML_FTYPE_MOSTLY_Q4_0_4_8: wtype = GGML_TYPE_Q4_0_4_8; break;
|
||||||
|
case GGML_FTYPE_MOSTLY_Q4_0_8_8: wtype = GGML_TYPE_Q4_0_8_8; break;
|
||||||
case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
|
case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
|
||||||
case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
|
case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
|
||||||
}
|
}
|
||||||
@ -9432,6 +9484,9 @@ static void ggml_compute_forward_add(
|
|||||||
case GGML_TYPE_IQ4_XS:
|
case GGML_TYPE_IQ4_XS:
|
||||||
case GGML_TYPE_IQ3_S:
|
case GGML_TYPE_IQ3_S:
|
||||||
case GGML_TYPE_IQ2_S:
|
case GGML_TYPE_IQ2_S:
|
||||||
|
case GGML_TYPE_Q4_0_4_4:
|
||||||
|
case GGML_TYPE_Q4_0_4_8:
|
||||||
|
case GGML_TYPE_Q4_0_8_8:
|
||||||
{
|
{
|
||||||
ggml_compute_forward_add_q_f32(params, dst);
|
ggml_compute_forward_add_q_f32(params, dst);
|
||||||
} break;
|
} break;
|
||||||
@ -9807,6 +9862,9 @@ static void ggml_compute_forward_add1(
|
|||||||
case GGML_TYPE_IQ4_XS:
|
case GGML_TYPE_IQ4_XS:
|
||||||
case GGML_TYPE_IQ3_S:
|
case GGML_TYPE_IQ3_S:
|
||||||
case GGML_TYPE_IQ2_S:
|
case GGML_TYPE_IQ2_S:
|
||||||
|
case GGML_TYPE_Q4_0_4_4:
|
||||||
|
case GGML_TYPE_Q4_0_4_8:
|
||||||
|
case GGML_TYPE_Q4_0_8_8:
|
||||||
{
|
{
|
||||||
ggml_compute_forward_add1_q_f32(params, dst);
|
ggml_compute_forward_add1_q_f32(params, dst);
|
||||||
} break;
|
} break;
|
||||||
@ -9932,6 +9990,9 @@ static void ggml_compute_forward_acc(
|
|||||||
case GGML_TYPE_IQ4_XS:
|
case GGML_TYPE_IQ4_XS:
|
||||||
case GGML_TYPE_IQ3_S:
|
case GGML_TYPE_IQ3_S:
|
||||||
case GGML_TYPE_IQ2_S:
|
case GGML_TYPE_IQ2_S:
|
||||||
|
case GGML_TYPE_Q4_0_4_4:
|
||||||
|
case GGML_TYPE_Q4_0_4_8:
|
||||||
|
case GGML_TYPE_Q4_0_8_8:
|
||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
GGML_ASSERT(false);
|
GGML_ASSERT(false);
|
||||||
@ -12134,6 +12195,12 @@ static void ggml_compute_forward_mul_mat(
|
|||||||
enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type;
|
enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type;
|
||||||
ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float;
|
ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float;
|
||||||
int64_t const vec_dot_num_rows = type_traits[type].nrows;
|
int64_t const vec_dot_num_rows = type_traits[type].nrows;
|
||||||
|
int64_t const matmul_num_cols = type_traits[type].ncols;
|
||||||
|
int64_t const interleave_blcksize = type_traits[type].interleave_blcksize;
|
||||||
|
ggml_from_float_to_mat_t const from_float_to_mat
|
||||||
|
= type_traits[vec_dot_type].from_float_to_mat;
|
||||||
|
ggml_gemv_t const gemv = type_traits[type].gemv;
|
||||||
|
ggml_gemm_t const gemm = type_traits[type].gemm;
|
||||||
|
|
||||||
GGML_ASSERT(ne0 == ne01);
|
GGML_ASSERT(ne0 == ne01);
|
||||||
GGML_ASSERT(ne1 == ne11);
|
GGML_ASSERT(ne1 == ne11);
|
||||||
@ -12192,7 +12259,16 @@ UseGgmlGemm1:;
|
|||||||
|
|
||||||
for (int64_t i13 = 0; i13 < ne13; ++i13) {
|
for (int64_t i13 = 0; i13 < ne13; ++i13) {
|
||||||
for (int64_t i12 = 0; i12 < ne12; ++i12) {
|
for (int64_t i12 = 0; i12 < ne12; ++i12) {
|
||||||
for (int64_t i11 = ith; i11 < ne11; i11 += nth) {
|
int64_t i11_processed = 0;
|
||||||
|
if ((ggml_n_dims(src1) == 2) && from_float_to_mat && gemm) {
|
||||||
|
for (int64_t i11 = ith * 4; i11 < ne11 - ne11 % 4; i11 += nth * 4) {
|
||||||
|
from_float_to_mat((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11),
|
||||||
|
(void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1),
|
||||||
|
4, ne10, interleave_blcksize);
|
||||||
|
}
|
||||||
|
i11_processed = ne11 - ne11 % 4;
|
||||||
|
}
|
||||||
|
for (int64_t i11 = i11_processed + ith; i11 < ne11; i11 += nth) {
|
||||||
from_float_to_vec_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11),
|
from_float_to_vec_dot((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11),
|
||||||
(void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1),
|
(void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1),
|
||||||
ne10);
|
ne10);
|
||||||
@ -12273,6 +12349,28 @@ UseGgmlGemm2:;
|
|||||||
const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0;
|
const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0;
|
||||||
const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1;
|
const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1;
|
||||||
|
|
||||||
|
if ((ggml_n_dims(src0) == 2) && gemv) {
|
||||||
|
const void * src1_wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
|
||||||
|
const size_t src1_col_stride = ggml_is_contiguous(src1) || src1->type != vec_dot_type ? ggml_row_size(vec_dot_type, ne10) : nb11;
|
||||||
|
int64_t src0_start = (ith * ne01) / nth;
|
||||||
|
int64_t src0_end = ((ith + 1) * ne01) / nth;
|
||||||
|
src0_start = (src0_start % matmul_num_cols) ? src0_start + matmul_num_cols - (src0_start % matmul_num_cols): src0_start;
|
||||||
|
src0_end = (src0_end % matmul_num_cols) ? src0_end + matmul_num_cols - (src0_end % matmul_num_cols): src0_end;
|
||||||
|
if (src0_start >= src0_end) return;
|
||||||
|
|
||||||
|
// If there are more than three rows in src1, use gemm; otherwise, use gemv.
|
||||||
|
if (gemm && (ne11 > 3)) {
|
||||||
|
gemm(ne00, (float *)((char *) dst->data) + src0_start, ne01, (const char *) src0->data + src0_start * nb01,
|
||||||
|
(const char *) src1_wdata, ne11 - ne11 % 4, src0_end - src0_start);
|
||||||
|
}
|
||||||
|
for (int iter = gemm ? ne11 - ne11 % 4 : 0; iter < ne11; iter++) {
|
||||||
|
gemv(ne00, (float *)((char *) dst->data + (iter * nb1)) + src0_start, ne01,
|
||||||
|
(const char *) src0->data + src0_start * nb01, (const char *) src1_wdata + (src1_col_stride * iter), 1,
|
||||||
|
src0_end - src0_start);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// The first chunk comes from our thread_id, the rest will get auto-assigned.
|
// The first chunk comes from our thread_id, the rest will get auto-assigned.
|
||||||
int current_chunk = ith;
|
int current_chunk = ith;
|
||||||
|
|
||||||
@ -12318,6 +12416,8 @@ static void ggml_compute_forward_mul_mat_id(
|
|||||||
ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot;
|
ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot;
|
||||||
enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type;
|
enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type;
|
||||||
ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float;
|
ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float;
|
||||||
|
int64_t const matmul_num_cols = type_traits[type].ncols;
|
||||||
|
ggml_gemv_t const gemv = type_traits[type].gemv;
|
||||||
|
|
||||||
// we don't support permuted src0 or src1
|
// we don't support permuted src0 or src1
|
||||||
GGML_ASSERT(nb00 == ggml_type_size(type));
|
GGML_ASSERT(nb00 == ggml_type_size(type));
|
||||||
@ -12403,6 +12503,34 @@ static void ggml_compute_forward_mul_mat_id(
|
|||||||
const int64_t nr0 = ne01; // src0 rows
|
const int64_t nr0 = ne01; // src0 rows
|
||||||
const int64_t nr1 = cne1; // src1 rows
|
const int64_t nr1 = cne1; // src1 rows
|
||||||
|
|
||||||
|
if (((ggml_n_dims(src0) - 1) == 2) && gemv) {
|
||||||
|
int64_t src0_cur_start = (ith * ne01) / nth;
|
||||||
|
int64_t src0_cur_end = ((ith + 1) * ne01) / nth;
|
||||||
|
src0_cur_start = (src0_cur_start % matmul_num_cols) ? src0_cur_start + matmul_num_cols - (src0_cur_start % matmul_num_cols): src0_cur_start;
|
||||||
|
src0_cur_end = (src0_cur_end % matmul_num_cols) ? src0_cur_end + matmul_num_cols - (src0_cur_end % matmul_num_cols): src0_cur_end;
|
||||||
|
if (src0_cur_start >= src0_cur_end) return;
|
||||||
|
|
||||||
|
for (int ir1 = 0; ir1 < nr1; ir1++) {
|
||||||
|
struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, ir1);
|
||||||
|
const int id = row_mapping.i1; // selected expert index
|
||||||
|
|
||||||
|
const int64_t i11 = id % ne11;
|
||||||
|
const int64_t i12 = row_mapping.i2; // row index in src1
|
||||||
|
|
||||||
|
const int64_t i1 = id; // selected expert index
|
||||||
|
const int64_t i2 = i12; // row
|
||||||
|
|
||||||
|
const char * src1_col = (const char *) wdata +
|
||||||
|
(src1_cont || src1->type != vec_dot_type
|
||||||
|
? (i11 + i12 * ne11) * row_size
|
||||||
|
: (i11 * nb11 + i12 * nb12));
|
||||||
|
|
||||||
|
gemv(ne00, (float *)((char *) dst->data + (i1 * nb1 + i2 * nb2)) + src0_cur_start, ne01,
|
||||||
|
(const char *) src0_cur + src0_cur_start * nb01, src1_col, 1, src0_cur_end - src0_cur_start);
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
// distribute the thread work across the inner or outer loop based on which one is larger
|
// distribute the thread work across the inner or outer loop based on which one is larger
|
||||||
|
|
||||||
const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
|
const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
|
||||||
@ -12704,6 +12832,9 @@ static void ggml_compute_forward_out_prod(
|
|||||||
case GGML_TYPE_IQ4_XS:
|
case GGML_TYPE_IQ4_XS:
|
||||||
case GGML_TYPE_IQ3_S:
|
case GGML_TYPE_IQ3_S:
|
||||||
case GGML_TYPE_IQ2_S:
|
case GGML_TYPE_IQ2_S:
|
||||||
|
case GGML_TYPE_Q4_0_4_4:
|
||||||
|
case GGML_TYPE_Q4_0_4_8:
|
||||||
|
case GGML_TYPE_Q4_0_8_8:
|
||||||
{
|
{
|
||||||
ggml_compute_forward_out_prod_q_f32(params, dst);
|
ggml_compute_forward_out_prod_q_f32(params, dst);
|
||||||
} break;
|
} break;
|
||||||
@ -12889,6 +13020,9 @@ static void ggml_compute_forward_set(
|
|||||||
case GGML_TYPE_IQ4_XS:
|
case GGML_TYPE_IQ4_XS:
|
||||||
case GGML_TYPE_IQ3_S:
|
case GGML_TYPE_IQ3_S:
|
||||||
case GGML_TYPE_IQ2_S:
|
case GGML_TYPE_IQ2_S:
|
||||||
|
case GGML_TYPE_Q4_0_4_4:
|
||||||
|
case GGML_TYPE_Q4_0_4_8:
|
||||||
|
case GGML_TYPE_Q4_0_8_8:
|
||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
GGML_ASSERT(false);
|
GGML_ASSERT(false);
|
||||||
@ -13148,6 +13282,9 @@ static void ggml_compute_forward_get_rows(
|
|||||||
case GGML_TYPE_IQ4_XS:
|
case GGML_TYPE_IQ4_XS:
|
||||||
case GGML_TYPE_IQ3_S:
|
case GGML_TYPE_IQ3_S:
|
||||||
case GGML_TYPE_IQ2_S:
|
case GGML_TYPE_IQ2_S:
|
||||||
|
case GGML_TYPE_Q4_0_4_4:
|
||||||
|
case GGML_TYPE_Q4_0_4_8:
|
||||||
|
case GGML_TYPE_Q4_0_8_8:
|
||||||
{
|
{
|
||||||
ggml_compute_forward_get_rows_q(params, dst);
|
ggml_compute_forward_get_rows_q(params, dst);
|
||||||
} break;
|
} break;
|
||||||
@ -13734,6 +13871,9 @@ static void ggml_compute_forward_clamp(
|
|||||||
case GGML_TYPE_IQ3_S:
|
case GGML_TYPE_IQ3_S:
|
||||||
case GGML_TYPE_IQ2_S:
|
case GGML_TYPE_IQ2_S:
|
||||||
case GGML_TYPE_Q8_K:
|
case GGML_TYPE_Q8_K:
|
||||||
|
case GGML_TYPE_Q4_0_4_4:
|
||||||
|
case GGML_TYPE_Q4_0_4_8:
|
||||||
|
case GGML_TYPE_Q4_0_8_8:
|
||||||
case GGML_TYPE_I8:
|
case GGML_TYPE_I8:
|
||||||
case GGML_TYPE_I16:
|
case GGML_TYPE_I16:
|
||||||
case GGML_TYPE_I32:
|
case GGML_TYPE_I32:
|
||||||
@ -20457,6 +20597,9 @@ size_t ggml_quantize_chunk(
|
|||||||
case GGML_TYPE_IQ1_M: result = quantize_iq1_m (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
|
case GGML_TYPE_IQ1_M: result = quantize_iq1_m (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
|
||||||
case GGML_TYPE_IQ4_NL: result = quantize_iq4_nl (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
|
case GGML_TYPE_IQ4_NL: result = quantize_iq4_nl (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
|
||||||
case GGML_TYPE_IQ4_XS: result = quantize_iq4_xs (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
|
case GGML_TYPE_IQ4_XS: result = quantize_iq4_xs (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
|
||||||
|
case GGML_TYPE_Q4_0_4_4: result = quantize_q4_0_4x4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
|
||||||
|
case GGML_TYPE_Q4_0_4_8: result = quantize_q4_0_4x8(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
|
||||||
|
case GGML_TYPE_Q4_0_8_8: result = quantize_q4_0_8x8(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
|
||||||
case GGML_TYPE_F16:
|
case GGML_TYPE_F16:
|
||||||
{
|
{
|
||||||
size_t elemsize = sizeof(ggml_fp16_t);
|
size_t elemsize = sizeof(ggml_fp16_t);
|
||||||
@ -21759,8 +21902,6 @@ int ggml_cpu_has_neon(void) {
|
|||||||
|
|
||||||
int ggml_cpu_has_sve(void) {
|
int ggml_cpu_has_sve(void) {
|
||||||
#if defined(__ARM_FEATURE_SVE)
|
#if defined(__ARM_FEATURE_SVE)
|
||||||
// TODO: Currently, SVE 256 bit is only supported.
|
|
||||||
GGML_ASSERT(svcntb() == QK8_0);
|
|
||||||
return 1;
|
return 1;
|
||||||
#else
|
#else
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -79,5 +79,4 @@ python -m twine upload dist/*
|
|||||||
```
|
```
|
||||||
|
|
||||||
## TODO
|
## TODO
|
||||||
- [ ] Add tests
|
|
||||||
- [ ] Include conversion scripts as command line entry points in this package.
|
- [ ] Include conversion scripts as command line entry points in this package.
|
||||||
|
@ -67,7 +67,7 @@ class ReaderTensor(NamedTuple):
|
|||||||
|
|
||||||
class GGUFReader:
|
class GGUFReader:
|
||||||
# I - same as host, S - swapped
|
# I - same as host, S - swapped
|
||||||
byte_order: Literal['I'] | Literal['S'] = 'I'
|
byte_order: Literal['I', 'S'] = 'I'
|
||||||
alignment: int = GGUF_DEFAULT_ALIGNMENT
|
alignment: int = GGUF_DEFAULT_ALIGNMENT
|
||||||
data_offset: int
|
data_offset: int
|
||||||
|
|
||||||
@ -86,7 +86,7 @@ class GGUFReader:
|
|||||||
GGUFValueType.BOOL: np.bool_,
|
GGUFValueType.BOOL: np.bool_,
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self, path: os.PathLike[str] | str, mode: Literal['r'] | Literal['r+'] | Literal['c'] = 'r'):
|
def __init__(self, path: os.PathLike[str] | str, mode: Literal['r', 'r+', 'c'] = 'r'):
|
||||||
self.data = np.memmap(path, mode = mode)
|
self.data = np.memmap(path, mode = mode)
|
||||||
offs = 0
|
offs = 0
|
||||||
|
|
||||||
@ -140,7 +140,7 @@ class GGUFReader:
|
|||||||
return self.tensors[idx]
|
return self.tensors[idx]
|
||||||
|
|
||||||
def _get(
|
def _get(
|
||||||
self, offset: int, dtype: npt.DTypeLike, count: int = 1, override_order: None | Literal['I'] | Literal['S'] | Literal['<'] = None,
|
self, offset: int, dtype: npt.DTypeLike, count: int = 1, override_order: None | Literal['I', 'S', '<'] = None,
|
||||||
) -> npt.NDArray[Any]:
|
) -> npt.NDArray[Any]:
|
||||||
count = int(count)
|
count = int(count)
|
||||||
itemsize = int(np.empty([], dtype = dtype).itemsize)
|
itemsize = int(np.empty([], dtype = dtype).itemsize)
|
||||||
|
@ -6,7 +6,6 @@ from typing import Any, Callable
|
|||||||
from collections import deque
|
from collections import deque
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from numpy._typing import _Shape
|
|
||||||
from numpy.typing import DTypeLike
|
from numpy.typing import DTypeLike
|
||||||
|
|
||||||
|
|
||||||
@ -16,16 +15,16 @@ logger = logging.getLogger(__name__)
|
|||||||
class LazyMeta(ABCMeta):
|
class LazyMeta(ABCMeta):
|
||||||
|
|
||||||
def __new__(cls, name: str, bases: tuple[type, ...], namespace: dict[str, Any], **kwargs):
|
def __new__(cls, name: str, bases: tuple[type, ...], namespace: dict[str, Any], **kwargs):
|
||||||
def __getattr__(self, __name: str) -> Any:
|
def __getattr__(self, name: str) -> Any:
|
||||||
meta_attr = getattr(self._meta, __name)
|
meta_attr = getattr(self._meta, name)
|
||||||
if callable(meta_attr):
|
if callable(meta_attr):
|
||||||
return type(self)._wrap_fn(
|
return type(self)._wrap_fn(
|
||||||
(lambda s, *args, **kwargs: getattr(s, __name)(*args, **kwargs)),
|
(lambda s, *args, **kwargs: getattr(s, name)(*args, **kwargs)),
|
||||||
use_self=self,
|
use_self=self,
|
||||||
)
|
)
|
||||||
elif isinstance(meta_attr, self._tensor_type):
|
elif isinstance(meta_attr, self._tensor_type):
|
||||||
# e.g. self.T with torch.Tensor should still be wrapped
|
# e.g. self.T with torch.Tensor should still be wrapped
|
||||||
return type(self)._wrap_fn(lambda s: getattr(s, __name))(self)
|
return type(self)._wrap_fn(lambda s: getattr(s, name))(self)
|
||||||
else:
|
else:
|
||||||
# no need to wrap non-tensor properties,
|
# no need to wrap non-tensor properties,
|
||||||
# and they likely don't depend on the actual contents of the tensor
|
# and they likely don't depend on the actual contents of the tensor
|
||||||
@ -141,19 +140,21 @@ class LazyBase(ABC, metaclass=LazyMeta):
|
|||||||
res = cls.meta_with_dtype_and_shape(meta_noop, res.shape)
|
res = cls.meta_with_dtype_and_shape(meta_noop, res.shape)
|
||||||
|
|
||||||
if isinstance(res, cls._tensor_type):
|
if isinstance(res, cls._tensor_type):
|
||||||
def collect_replace(t: LazyBase):
|
class CollectSharedLazy:
|
||||||
if collect_replace.shared_lazy is None:
|
|
||||||
collect_replace.shared_lazy = t._lazy
|
|
||||||
else:
|
|
||||||
collect_replace.shared_lazy.extend(t._lazy)
|
|
||||||
t._lazy = collect_replace.shared_lazy
|
|
||||||
|
|
||||||
# emulating a static variable
|
# emulating a static variable
|
||||||
collect_replace.shared_lazy = None
|
shared_lazy: None | deque[LazyBase] = None
|
||||||
|
|
||||||
LazyBase._recurse_apply(args, collect_replace)
|
@staticmethod
|
||||||
|
def collect_replace(t: LazyBase):
|
||||||
|
if CollectSharedLazy.shared_lazy is None:
|
||||||
|
CollectSharedLazy.shared_lazy = t._lazy
|
||||||
|
else:
|
||||||
|
CollectSharedLazy.shared_lazy.extend(t._lazy)
|
||||||
|
t._lazy = CollectSharedLazy.shared_lazy
|
||||||
|
|
||||||
shared_lazy = collect_replace.shared_lazy
|
LazyBase._recurse_apply(args, CollectSharedLazy.collect_replace)
|
||||||
|
|
||||||
|
shared_lazy = CollectSharedLazy.shared_lazy
|
||||||
|
|
||||||
return cls(meta=cls.eager_to_meta(res), lazy=shared_lazy, args=args, func=lambda a: fn(*a, **kwargs))
|
return cls(meta=cls.eager_to_meta(res), lazy=shared_lazy, args=args, func=lambda a: fn(*a, **kwargs))
|
||||||
else:
|
else:
|
||||||
@ -184,6 +185,7 @@ class LazyBase(ABC, metaclass=LazyMeta):
|
|||||||
lt._args = cls._recurse_apply(lt._args, already_eager_to_eager)
|
lt._args = cls._recurse_apply(lt._args, already_eager_to_eager)
|
||||||
lt._data = lt._func(lt._args)
|
lt._data = lt._func(lt._args)
|
||||||
# sanity check
|
# sanity check
|
||||||
|
assert lt._data is not None
|
||||||
assert lt._data.dtype == lt._meta.dtype
|
assert lt._data.dtype == lt._meta.dtype
|
||||||
assert lt._data.shape == lt._meta.shape
|
assert lt._data.shape == lt._meta.shape
|
||||||
|
|
||||||
@ -216,7 +218,7 @@ class LazyNumpyTensor(LazyBase):
|
|||||||
_tensor_type = np.ndarray
|
_tensor_type = np.ndarray
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def meta_with_dtype_and_shape(cls, dtype: DTypeLike, shape: _Shape) -> np.ndarray[Any, Any]:
|
def meta_with_dtype_and_shape(cls, dtype: DTypeLike, shape: tuple[int, ...]) -> np.ndarray[Any, Any]:
|
||||||
# The initial idea was to use np.nan as the fill value,
|
# The initial idea was to use np.nan as the fill value,
|
||||||
# but non-float types like np.int16 can't use that.
|
# but non-float types like np.int16 can't use that.
|
||||||
# So zero it is.
|
# So zero it is.
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "gguf"
|
name = "gguf"
|
||||||
version = "0.9.0"
|
version = "0.9.1"
|
||||||
description = "Read and write ML models in GGUF for GGML"
|
description = "Read and write ML models in GGUF for GGML"
|
||||||
authors = ["GGML <ggml@ggml.ai>"]
|
authors = ["GGML <ggml@ggml.ai>"]
|
||||||
packages = [
|
packages = [
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
# pyright: reportUnusedImport=false
|
||||||
|
|
||||||
from .gguf_convert_endian import main as gguf_convert_endian_entrypoint
|
from .gguf_convert_endian import main as gguf_convert_endian_entrypoint
|
||||||
from .gguf_dump import main as gguf_dump_entrypoint
|
from .gguf_dump import main as gguf_dump_entrypoint
|
||||||
from .gguf_set_metadata import main as gguf_set_metadata_entrypoint
|
from .gguf_set_metadata import main as gguf_set_metadata_entrypoint
|
||||||
|
@ -63,9 +63,9 @@ def gguf_hash(reader: GGUFReader, filename: str, disable_progress_bar) -> None:
|
|||||||
bar.update(sum_weights_in_tensor)
|
bar.update(sum_weights_in_tensor)
|
||||||
|
|
||||||
sha1_layer = hashlib.sha1()
|
sha1_layer = hashlib.sha1()
|
||||||
sha1_layer.update(tensor.data)
|
sha1_layer.update(tensor.data.data)
|
||||||
sha1.update(tensor.data)
|
sha1.update(tensor.data.data)
|
||||||
uuidv5_sha1.update(tensor.data)
|
uuidv5_sha1.update(tensor.data.data)
|
||||||
print("sha1 {0} {1}:{2}".format(sha1_layer.hexdigest(), filename, tensor.name)) # noqa: NP100
|
print("sha1 {0} {1}:{2}".format(sha1_layer.hexdigest(), filename, tensor.name)) # noqa: NP100
|
||||||
|
|
||||||
# Flush Hash Progress Bar
|
# Flush Hash Progress Bar
|
||||||
|
@ -1,4 +1,6 @@
|
|||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
import gguf # noqa: F401
|
import gguf # noqa: F401 # pyright: ignore[reportUnusedImport]
|
||||||
|
|
||||||
# TODO: add tests
|
# TODO: add tests
|
||||||
|
|
||||||
|
@ -4,7 +4,7 @@ GBNF (GGML BNF) is a format for defining [formal grammars](https://en.wikipedia.
|
|||||||
|
|
||||||
## Background
|
## Background
|
||||||
|
|
||||||
[Bakus-Naur Form (BNF)](https://en.wikipedia.org/wiki/Backus%E2%80%93Naur_form) is a notation for describing the syntax of formal languages like programming languages, file formats, and protocols. GBNF is an extension of BNF that primarily adds a few modern regex-like features.
|
[Backus-Naur Form (BNF)](https://en.wikipedia.org/wiki/Backus%E2%80%93Naur_form) is a notation for describing the syntax of formal languages like programming languages, file formats, and protocols. GBNF is an extension of BNF that primarily adds a few modern regex-like features.
|
||||||
|
|
||||||
## Basics
|
## Basics
|
||||||
|
|
||||||
|
@ -162,6 +162,9 @@ extern "C" {
|
|||||||
LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_BF16 = 32, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_BF16 = 32, // except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33, // except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34, // except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // except 1d tensors
|
||||||
|
|
||||||
LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
|
LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
|
||||||
};
|
};
|
||||||
|
@ -1,3 +1,21 @@
|
|||||||
{
|
{
|
||||||
"extraPaths": ["gguf-py"],
|
"extraPaths": ["gguf-py"],
|
||||||
}
|
"pythonVersion": "3.9",
|
||||||
|
"pythonPlatform": "All",
|
||||||
|
"reportUnusedImport": "warning",
|
||||||
|
"reportDuplicateImport": "error",
|
||||||
|
"reportDeprecated": "warning",
|
||||||
|
"reportUnnecessaryTypeIgnoreComment": "warning",
|
||||||
|
"executionEnvironments": [
|
||||||
|
{
|
||||||
|
// TODO: make this version override work correctly
|
||||||
|
"root": "gguf-py",
|
||||||
|
"pythonVersion": "3.8",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
// uses match expressions in steps.py
|
||||||
|
"root": "examples/server/tests",
|
||||||
|
"pythonVersion": "3.10",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
}
|
||||||
|
12
requirements/requirements-all.txt
Normal file
12
requirements/requirements-all.txt
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
-r ../examples/llava/requirements.txt
|
||||||
|
-r ../examples/server/bench/requirements.txt
|
||||||
|
-r ../examples/server/tests/requirements.txt
|
||||||
|
|
||||||
|
-r ./requirements-compare-llama-bench.txt
|
||||||
|
-r ./requirements-pydantic.txt
|
||||||
|
-r ./requirements-test-tokenizer-random.txt
|
||||||
|
|
||||||
|
-r ./requirements-convert_hf_to_gguf.txt
|
||||||
|
-r ./requirements-convert_hf_to_gguf_update.txt
|
||||||
|
-r ./requirements-convert_legacy_llama.txt
|
||||||
|
-r ./requirements-convert_llama_ggml_to_gguf.txt
|
2
requirements/requirements-compare-llama-bench.txt
Normal file
2
requirements/requirements-compare-llama-bench.txt
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
tabulate~=0.9.0
|
||||||
|
GitPython~=3.1.43
|
2
requirements/requirements-pydantic.txt
Normal file
2
requirements/requirements-pydantic.txt
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
docstring_parser~=0.15
|
||||||
|
pydantic~=2.6.3
|
1
requirements/requirements-test-tokenizer-random.txt
Normal file
1
requirements/requirements-test-tokenizer-random.txt
Normal file
@ -0,0 +1 @@
|
|||||||
|
cffi~=1.16.0
|
@ -108,6 +108,11 @@ check_convert_script() {
|
|||||||
fatal "$py missing requirements. Expected: $reqs"
|
fatal "$py missing requirements. Expected: $reqs"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Check that all sub-requirements are added to top-level requirements.txt
|
||||||
|
if ! grep -qF "$reqs" requirements.txt; then
|
||||||
|
fatal "$reqs needs to be added to requirements.txt"
|
||||||
|
fi
|
||||||
|
|
||||||
local venv="$workdir/$pyname-venv"
|
local venv="$workdir/$pyname-venv"
|
||||||
python3 -m venv "$venv"
|
python3 -m venv "$venv"
|
||||||
|
|
||||||
@ -134,12 +139,7 @@ EOF
|
|||||||
|
|
||||||
readonly ignore_eq_eq='check_requirements: ignore "=="'
|
readonly ignore_eq_eq='check_requirements: ignore "=="'
|
||||||
|
|
||||||
for req in "$reqs_dir"/*; do
|
for req in */**/requirements*.txt; do
|
||||||
# Check that all sub-requirements are added to top-level requirements.txt
|
|
||||||
if ! grep -qF "$req" requirements.txt; then
|
|
||||||
fatal "$req needs to be added to requirements.txt"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Make sure exact release versions aren't being pinned in the requirements
|
# Make sure exact release versions aren't being pinned in the requirements
|
||||||
# Filters out the ignore string
|
# Filters out the ignore string
|
||||||
if grep -vF "$ignore_eq_eq" "$req" | grep -q '=='; then
|
if grep -vF "$ignore_eq_eq" "$req" | grep -q '=='; then
|
||||||
|
@ -123,13 +123,13 @@ builds = cursor.execute("SELECT DISTINCT build_commit FROM test;").fetchall()
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
repo = git.Repo(".", search_parent_directories=True)
|
repo = git.Repo(".", search_parent_directories=True)
|
||||||
except git.exc.InvalidGitRepositoryError:
|
except git.InvalidGitRepositoryError:
|
||||||
repo = None
|
repo = None
|
||||||
|
|
||||||
|
|
||||||
def find_parent_in_data(commit):
|
def find_parent_in_data(commit: git.Commit):
|
||||||
"""Helper function to find the most recent parent measured in number of commits for which there is data."""
|
"""Helper function to find the most recent parent measured in number of commits for which there is data."""
|
||||||
heap = [(0, commit)]
|
heap: list[tuple[int, git.Commit]] = [(0, commit)]
|
||||||
seen_hexsha8 = set()
|
seen_hexsha8 = set()
|
||||||
while heap:
|
while heap:
|
||||||
depth, current_commit = heapq.heappop(heap)
|
depth, current_commit = heapq.heappop(heap)
|
||||||
@ -144,7 +144,7 @@ def find_parent_in_data(commit):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def get_all_parent_hexsha8s(commit):
|
def get_all_parent_hexsha8s(commit: git.Commit):
|
||||||
"""Helper function to recursively get hexsha8 values for all parents of a commit."""
|
"""Helper function to recursively get hexsha8 values for all parents of a commit."""
|
||||||
unvisited = [commit]
|
unvisited = [commit]
|
||||||
visited = []
|
visited = []
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
import array
|
import array
|
||||||
import unicodedata
|
import unicodedata
|
||||||
import requests
|
import requests
|
||||||
@ -133,7 +135,7 @@ table_nfd.sort()
|
|||||||
|
|
||||||
|
|
||||||
# group ranges with same flags
|
# group ranges with same flags
|
||||||
ranges_flags = [(0, codepoint_flags[0])] # start, flags
|
ranges_flags: list[tuple[int, int]] = [(0, codepoint_flags[0])] # start, flags
|
||||||
for codepoint, flags in enumerate(codepoint_flags):
|
for codepoint, flags in enumerate(codepoint_flags):
|
||||||
if flags != ranges_flags[-1][1]:
|
if flags != ranges_flags[-1][1]:
|
||||||
ranges_flags.append((codepoint, flags))
|
ranges_flags.append((codepoint, flags))
|
||||||
@ -141,11 +143,11 @@ ranges_flags.append((MAX_CODEPOINTS, 0x0000))
|
|||||||
|
|
||||||
|
|
||||||
# group ranges with same nfd
|
# group ranges with same nfd
|
||||||
ranges_nfd = [(0, 0, 0)] # start, last, nfd
|
ranges_nfd: list[tuple[int, int, int]] = [(0, 0, 0)] # start, last, nfd
|
||||||
for codepoint, norm in table_nfd:
|
for codepoint, norm in table_nfd:
|
||||||
start = ranges_nfd[-1][0]
|
start = ranges_nfd[-1][0]
|
||||||
if ranges_nfd[-1] != (start, codepoint - 1, norm):
|
if ranges_nfd[-1] != (start, codepoint - 1, norm):
|
||||||
ranges_nfd.append(None)
|
ranges_nfd.append(None) # type: ignore[arg-type] # dummy, will be replaced below
|
||||||
start = codepoint
|
start = codepoint
|
||||||
ranges_nfd[-1] = (start, codepoint, norm)
|
ranges_nfd[-1] = (start, codepoint, norm)
|
||||||
|
|
||||||
@ -179,13 +181,13 @@ for codepoint in table_whitespace:
|
|||||||
out("};\n")
|
out("};\n")
|
||||||
|
|
||||||
out("const std::unordered_map<uint32_t, uint32_t> unicode_map_lowercase = {")
|
out("const std::unordered_map<uint32_t, uint32_t> unicode_map_lowercase = {")
|
||||||
for tuple in table_lowercase:
|
for tuple_lw in table_lowercase:
|
||||||
out("{0x%06X, 0x%06X}," % tuple)
|
out("{0x%06X, 0x%06X}," % tuple_lw)
|
||||||
out("};\n")
|
out("};\n")
|
||||||
|
|
||||||
out("const std::unordered_map<uint32_t, uint32_t> unicode_map_uppercase = {")
|
out("const std::unordered_map<uint32_t, uint32_t> unicode_map_uppercase = {")
|
||||||
for tuple in table_uppercase:
|
for tuple_up in table_uppercase:
|
||||||
out("{0x%06X, 0x%06X}," % tuple)
|
out("{0x%06X, 0x%06X}," % tuple_up)
|
||||||
out("};\n")
|
out("};\n")
|
||||||
|
|
||||||
out("const std::vector<range_nfd> unicode_ranges_nfd = { // start, last, nfd")
|
out("const std::vector<range_nfd> unicode_ranges_nfd = { // start, last, nfd")
|
||||||
|
@ -63,6 +63,7 @@ while read c; do
|
|||||||
src/ggml*.metal \
|
src/ggml*.metal \
|
||||||
src/ggml*.cu \
|
src/ggml*.cu \
|
||||||
src/ggml-cuda/* \
|
src/ggml-cuda/* \
|
||||||
|
src/ggml-sycl/* \
|
||||||
include/ggml*.h \
|
include/ggml*.h \
|
||||||
tests/test-opt.cpp \
|
tests/test-opt.cpp \
|
||||||
tests/test-grad0.cpp \
|
tests/test-grad0.cpp \
|
||||||
@ -113,6 +114,7 @@ if [ -f $SRC_LLAMA/ggml-src.patch ]; then
|
|||||||
# src/ggml-quants.c -> ggml/src/ggml-quants.c
|
# src/ggml-quants.c -> ggml/src/ggml-quants.c
|
||||||
# src/ggml-quants.h -> ggml/src/ggml-quants.h
|
# src/ggml-quants.h -> ggml/src/ggml-quants.h
|
||||||
# src/ggml-rpc.cpp -> ggml/src/ggml-rpc.cpp
|
# src/ggml-rpc.cpp -> ggml/src/ggml-rpc.cpp
|
||||||
|
# src/ggml-sycl/* -> ggml/src/ggml-sycl/
|
||||||
# src/ggml-sycl.cpp -> ggml/src/ggml-sycl.cpp
|
# src/ggml-sycl.cpp -> ggml/src/ggml-sycl.cpp
|
||||||
# src/ggml-vulkan.cpp -> ggml/src/ggml-vulkan.cpp
|
# src/ggml-vulkan.cpp -> ggml/src/ggml-vulkan.cpp
|
||||||
#
|
#
|
||||||
@ -153,6 +155,7 @@ if [ -f $SRC_LLAMA/ggml-src.patch ]; then
|
|||||||
-e 's/([[:space:]]|[ab]\/)src\/ggml-quants\.c/\1ggml\/src\/ggml-quants.c/g' \
|
-e 's/([[:space:]]|[ab]\/)src\/ggml-quants\.c/\1ggml\/src\/ggml-quants.c/g' \
|
||||||
-e 's/([[:space:]]|[ab]\/)src\/ggml-quants\.h/\1ggml\/src\/ggml-quants.h/g' \
|
-e 's/([[:space:]]|[ab]\/)src\/ggml-quants\.h/\1ggml\/src\/ggml-quants.h/g' \
|
||||||
-e 's/([[:space:]]|[ab]\/)src\/ggml-rpc\.cpp/\1ggml\/src\/ggml-rpc.cpp/g' \
|
-e 's/([[:space:]]|[ab]\/)src\/ggml-rpc\.cpp/\1ggml\/src\/ggml-rpc.cpp/g' \
|
||||||
|
-e 's/([[:space:]]|[ab]\/)src\/ggml-sycl\//\1ggml\/src\/ggml-sycl\//g' \
|
||||||
-e 's/([[:space:]]|[ab]\/)src\/ggml-sycl\.cpp/\1ggml\/src\/ggml-sycl.cpp/g' \
|
-e 's/([[:space:]]|[ab]\/)src\/ggml-sycl\.cpp/\1ggml\/src\/ggml-sycl.cpp/g' \
|
||||||
-e 's/([[:space:]]|[ab]\/)src\/ggml-vulkan\.cpp/\1ggml\/src\/ggml-vulkan.cpp/g' \
|
-e 's/([[:space:]]|[ab]\/)src\/ggml-vulkan\.cpp/\1ggml\/src\/ggml-vulkan.cpp/g' \
|
||||||
-e 's/([[:space:]]|[ab]\/)include\/ggml\.h/\1ggml\/include\/ggml.h/g' \
|
-e 's/([[:space:]]|[ab]\/)include\/ggml\.h/\1ggml\/include\/ggml.h/g' \
|
||||||
|
@ -1 +1 @@
|
|||||||
5378ea0d3c2f25bcd330ecb226ad2db454be86d0
|
e3b3846976c94163f2b3dd128cc959782653edbb
|
||||||
|
@ -18,6 +18,7 @@ cp -rpv ../ggml/src/ggml-metal.metal ./ggml/src/ggml-metal.metal
|
|||||||
cp -rpv ../ggml/src/ggml-quants.c ./ggml/src/ggml-quants.c
|
cp -rpv ../ggml/src/ggml-quants.c ./ggml/src/ggml-quants.c
|
||||||
cp -rpv ../ggml/src/ggml-quants.h ./ggml/src/ggml-quants.h
|
cp -rpv ../ggml/src/ggml-quants.h ./ggml/src/ggml-quants.h
|
||||||
cp -rpv ../ggml/src/ggml-rpc.cpp ./ggml/src/ggml-rpc.cpp
|
cp -rpv ../ggml/src/ggml-rpc.cpp ./ggml/src/ggml-rpc.cpp
|
||||||
|
cp -rpv ../ggml/src/ggml-sycl/* ./ggml/src/ggml-sycl/
|
||||||
cp -rpv ../ggml/src/ggml-sycl.cpp ./ggml/src/ggml-sycl.cpp
|
cp -rpv ../ggml/src/ggml-sycl.cpp ./ggml/src/ggml-sycl.cpp
|
||||||
cp -rpv ../ggml/src/ggml-vulkan.cpp ./ggml/src/ggml-vulkan.cpp
|
cp -rpv ../ggml/src/ggml-vulkan.cpp ./ggml/src/ggml-vulkan.cpp
|
||||||
|
|
||||||
|
@ -57,6 +57,12 @@
|
|||||||
#include <io.h>
|
#include <io.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if __cplusplus >= 202000L
|
||||||
|
#define LU8(x) (const char*)(u8##x)
|
||||||
|
#else
|
||||||
|
#define LU8(x) u8##x
|
||||||
|
#endif
|
||||||
|
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
@ -3836,6 +3842,9 @@ struct llama_model_loader {
|
|||||||
case GGML_TYPE_IQ4_NL: ftype = LLAMA_FTYPE_MOSTLY_IQ4_NL; break;
|
case GGML_TYPE_IQ4_NL: ftype = LLAMA_FTYPE_MOSTLY_IQ4_NL; break;
|
||||||
case GGML_TYPE_IQ4_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ4_XS; break;
|
case GGML_TYPE_IQ4_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ4_XS; break;
|
||||||
case GGML_TYPE_IQ3_S: ftype = LLAMA_FTYPE_MOSTLY_IQ3_S; break;
|
case GGML_TYPE_IQ3_S: ftype = LLAMA_FTYPE_MOSTLY_IQ3_S; break;
|
||||||
|
case GGML_TYPE_Q4_0_4_4: ftype = LLAMA_FTYPE_MOSTLY_Q4_0_4_4; break;
|
||||||
|
case GGML_TYPE_Q4_0_4_8: ftype = LLAMA_FTYPE_MOSTLY_Q4_0_4_8; break;
|
||||||
|
case GGML_TYPE_Q4_0_8_8: ftype = LLAMA_FTYPE_MOSTLY_Q4_0_8_8; break;
|
||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max));
|
LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max));
|
||||||
@ -4529,6 +4538,9 @@ static std::string llama_model_ftype_name(llama_ftype ftype) {
|
|||||||
case LLAMA_FTYPE_MOSTLY_IQ4_XS: return "IQ4_XS - 4.25 bpw";
|
case LLAMA_FTYPE_MOSTLY_IQ4_XS: return "IQ4_XS - 4.25 bpw";
|
||||||
case LLAMA_FTYPE_MOSTLY_IQ3_S: return "IQ3_S - 3.4375 bpw";
|
case LLAMA_FTYPE_MOSTLY_IQ3_S: return "IQ3_S - 3.4375 bpw";
|
||||||
case LLAMA_FTYPE_MOSTLY_IQ3_M: return "IQ3_S mix - 3.66 bpw";
|
case LLAMA_FTYPE_MOSTLY_IQ3_M: return "IQ3_S mix - 3.66 bpw";
|
||||||
|
case LLAMA_FTYPE_MOSTLY_Q4_0_4_4: return "Q4_0_4_4";
|
||||||
|
case LLAMA_FTYPE_MOSTLY_Q4_0_4_8: return "Q4_0_4_8";
|
||||||
|
case LLAMA_FTYPE_MOSTLY_Q4_0_8_8: return "Q4_0_8_8";
|
||||||
|
|
||||||
default: return "unknown, may not work";
|
default: return "unknown, may not work";
|
||||||
}
|
}
|
||||||
@ -13282,6 +13294,8 @@ struct llm_build_context {
|
|||||||
LLM_NORM_RMS, cb, -1);
|
LLM_NORM_RMS, cb, -1);
|
||||||
cb(cur, "result_norm", -1);
|
cb(cur, "result_norm", -1);
|
||||||
} else {
|
} else {
|
||||||
|
GGML_ASSERT(n_outputs_enc > 0 && "call llama_encode() first");
|
||||||
|
|
||||||
struct ggml_tensor * embd_enc = llm_build_inp_embd_enc();
|
struct ggml_tensor * embd_enc = llm_build_inp_embd_enc();
|
||||||
struct ggml_tensor * pos_bucket_dec = llm_build_pos_bucket(true);
|
struct ggml_tensor * pos_bucket_dec = llm_build_pos_bucket(true);
|
||||||
|
|
||||||
@ -17842,6 +17856,10 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
|
|||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
|
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
|
||||||
new_type = GGML_TYPE_IQ3_S;
|
new_type = GGML_TYPE_IQ3_S;
|
||||||
}
|
}
|
||||||
|
else if (new_type == GGML_TYPE_Q4_0_4_4 || new_type == GGML_TYPE_Q4_0_4_8 ||
|
||||||
|
new_type == GGML_TYPE_Q4_0_8_8) {
|
||||||
|
new_type = GGML_TYPE_Q4_0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
|
} else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
|
||||||
ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
|
ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
|
||||||
@ -18154,6 +18172,9 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
case LLAMA_FTYPE_MOSTLY_IQ4_XS: default_type = GGML_TYPE_IQ4_XS; break;
|
case LLAMA_FTYPE_MOSTLY_IQ4_XS: default_type = GGML_TYPE_IQ4_XS; break;
|
||||||
case LLAMA_FTYPE_MOSTLY_IQ3_S: default_type = GGML_TYPE_IQ3_S; break;
|
case LLAMA_FTYPE_MOSTLY_IQ3_S: default_type = GGML_TYPE_IQ3_S; break;
|
||||||
case LLAMA_FTYPE_MOSTLY_IQ3_M: default_type = GGML_TYPE_IQ3_S; break;
|
case LLAMA_FTYPE_MOSTLY_IQ3_M: default_type = GGML_TYPE_IQ3_S; break;
|
||||||
|
case LLAMA_FTYPE_MOSTLY_Q4_0_4_4: default_type = GGML_TYPE_Q4_0_4_4; break;
|
||||||
|
case LLAMA_FTYPE_MOSTLY_Q4_0_4_8: default_type = GGML_TYPE_Q4_0_4_8; break;
|
||||||
|
case LLAMA_FTYPE_MOSTLY_Q4_0_8_8: default_type = GGML_TYPE_Q4_0_8_8; break;
|
||||||
|
|
||||||
default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
|
default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
|
||||||
}
|
}
|
||||||
@ -18464,6 +18485,14 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
f32_data = (float *) f32_conv_buf.data();
|
f32_data = (float *) f32_conv_buf.data();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int chunk_size_multiplier = 1;
|
||||||
|
if (new_type == GGML_TYPE_Q4_0_4_4 || new_type == GGML_TYPE_Q4_0_4_8 || new_type == GGML_TYPE_Q4_0_8_8) {
|
||||||
|
if ((new_type == GGML_TYPE_Q4_0_8_8) && (tensor->ne[1] % 8 != 0)) new_type = GGML_TYPE_Q4_0;
|
||||||
|
else if (tensor->ne[1] % 4 != 0) new_type = GGML_TYPE_Q4_0;
|
||||||
|
if (new_type == GGML_TYPE_Q4_0_8_8) chunk_size_multiplier = 8;
|
||||||
|
else if (new_type == GGML_TYPE_Q4_0_4_4 || new_type == GGML_TYPE_Q4_0_4_8) chunk_size_multiplier = 4;
|
||||||
|
}
|
||||||
|
|
||||||
LLAMA_LOG_INFO("converting to %s .. ", ggml_type_name(new_type));
|
LLAMA_LOG_INFO("converting to %s .. ", ggml_type_name(new_type));
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
|
|
||||||
@ -18476,7 +18505,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
const int64_t nrows = tensor->ne[1];
|
const int64_t nrows = tensor->ne[1];
|
||||||
|
|
||||||
static const int64_t min_chunk_size = 32 * 512;
|
static const int64_t min_chunk_size = 32 * 512;
|
||||||
const int64_t chunk_size = n_per_row >= min_chunk_size ? n_per_row : n_per_row * ((min_chunk_size + n_per_row - 1)/n_per_row);
|
const int64_t chunk_size = (n_per_row >= min_chunk_size ? n_per_row : n_per_row * ((min_chunk_size + n_per_row - 1)/n_per_row)) *
|
||||||
|
chunk_size_multiplier;
|
||||||
|
|
||||||
const int64_t nelements_matrix = tensor->ne[0] * tensor->ne[1];
|
const int64_t nelements_matrix = tensor->ne[0] * tensor->ne[1];
|
||||||
const int64_t nchunk = (nelements_matrix + chunk_size - 1)/chunk_size;
|
const int64_t nchunk = (nelements_matrix + chunk_size - 1)/chunk_size;
|
||||||
@ -21508,12 +21538,12 @@ static int32_t llama_chat_apply_template_internal(
|
|||||||
if (add_ass) {
|
if (add_ass) {
|
||||||
ss << "<|assistant|>";
|
ss << "<|assistant|>";
|
||||||
}
|
}
|
||||||
} else if (tmpl == "minicpm" || tmpl_contains(u8"<用户>")) {
|
} else if (tmpl == "minicpm" || tmpl_contains(LU8("<用户>"))) {
|
||||||
// MiniCPM-3B-OpenHermes-2.5-v2-GGUF
|
// MiniCPM-3B-OpenHermes-2.5-v2-GGUF
|
||||||
for (auto message : chat) {
|
for (auto message : chat) {
|
||||||
std::string role(message->role);
|
std::string role(message->role);
|
||||||
if (role == "user") {
|
if (role == "user") {
|
||||||
ss << u8"<用户>";
|
ss << LU8("<用户>");
|
||||||
ss << trim(message->content);
|
ss << trim(message->content);
|
||||||
ss << "<AI>";
|
ss << "<AI>";
|
||||||
} else {
|
} else {
|
||||||
@ -21529,7 +21559,7 @@ static int32_t llama_chat_apply_template_internal(
|
|||||||
} else if (role == "user") {
|
} else if (role == "user") {
|
||||||
ss << "User: " << message->content << "\n\n";
|
ss << "User: " << message->content << "\n\n";
|
||||||
} else if (role == "assistant") {
|
} else if (role == "assistant") {
|
||||||
ss << "Assistant: " << message->content << u8"<|end▁of▁sentence|>";
|
ss << "Assistant: " << message->content << LU8("<|end▁of▁sentence|>");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (add_ass) {
|
if (add_ass) {
|
||||||
|
@ -1,3 +1,7 @@
|
|||||||
|
#if defined(_MSC_VER)
|
||||||
|
#define _SILENCE_CXX17_CODECVT_HEADER_DEPRECATION_WARNING
|
||||||
|
#endif
|
||||||
|
|
||||||
#include "unicode.h"
|
#include "unicode.h"
|
||||||
#include "unicode-data.h"
|
#include "unicode-data.h"
|
||||||
|
|
||||||
|
@ -1266,6 +1266,32 @@ struct test_pool2d : public test_case {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// GGML_OP_CONV_TRANSPOSE_1D
|
||||||
|
struct test_conv_transpose_1d : public test_case {
|
||||||
|
const std::array<int64_t, 4> ne_input;
|
||||||
|
const std::array<int64_t, 4> ne_kernel;
|
||||||
|
|
||||||
|
const int s0; // stride
|
||||||
|
const int p0; // padding
|
||||||
|
const int d0; // dilation
|
||||||
|
|
||||||
|
std::string vars() override {
|
||||||
|
return VARS_TO_STR5(ne_input, ne_kernel, s0, p0, d0);
|
||||||
|
}
|
||||||
|
|
||||||
|
test_conv_transpose_1d(std::array<int64_t, 4> ne_input = {197, 32, 1, 1}, // [input_width, input_height, input_channels, 1]
|
||||||
|
std::array<int64_t, 4> ne_kernel = {16, 32, 32, 1}, // [kernel_width, kernel_height, input_channels, 1]
|
||||||
|
int s0 = 1, int p0 = 0, int d0 = 1)
|
||||||
|
: ne_input(ne_input), ne_kernel(ne_kernel), s0(s0), p0(p0), d0(d0) {}
|
||||||
|
|
||||||
|
ggml_tensor * build_graph(ggml_context * ctx) override {
|
||||||
|
ggml_tensor * input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data());
|
||||||
|
ggml_tensor * kernel = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_kernel.data());
|
||||||
|
ggml_tensor * out = ggml_conv_transpose_1d(ctx, kernel, input, s0, p0, d0);
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// GGML_OP_IM2COL
|
// GGML_OP_IM2COL
|
||||||
struct test_im2col : public test_case {
|
struct test_im2col : public test_case {
|
||||||
const ggml_type type_input;
|
const ggml_type type_input;
|
||||||
@ -1279,7 +1305,7 @@ struct test_im2col : public test_case {
|
|||||||
// padding
|
// padding
|
||||||
const int p0;
|
const int p0;
|
||||||
const int p1;
|
const int p1;
|
||||||
// dilatation
|
// dilation
|
||||||
const int d0;
|
const int d0;
|
||||||
const int d1;
|
const int d1;
|
||||||
// mode
|
// mode
|
||||||
@ -2098,6 +2124,16 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
|
|||||||
test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32));
|
test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32));
|
||||||
test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16));
|
test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16));
|
||||||
|
|
||||||
|
test_cases.emplace_back(new test_conv_transpose_1d());
|
||||||
|
test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {2,3,2,1}, 3, 0, 1));
|
||||||
|
test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {2,3,2,1}, 2, 0, 1));
|
||||||
|
test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {2,3,2,1}, 1, 0, 1));
|
||||||
|
test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {3,2,2,1}, 2, 0, 1));
|
||||||
|
test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {3,2,2,1}, 1, 0, 1));
|
||||||
|
test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {3,1,2,1}, 1, 0, 1));
|
||||||
|
test_cases.emplace_back(new test_conv_transpose_1d({2,1,1,1}, {3,1,1,1}, 1, 0, 1));
|
||||||
|
|
||||||
|
|
||||||
test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 1, 1, 1}));
|
test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 1, 1, 1}));
|
||||||
test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {2, 1, 1, 1}));
|
test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {2, 1, 1, 1}));
|
||||||
test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 2, 1, 1}));
|
test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 2, 1, 1}));
|
||||||
|
@ -6,6 +6,8 @@
|
|||||||
# python3 tests/test-tokenizer-random.py ./models/ggml-vocab-llama-bpe.gguf ./models/tokenizers/llama-bpe
|
# python3 tests/test-tokenizer-random.py ./models/ggml-vocab-llama-bpe.gguf ./models/tokenizers/llama-bpe
|
||||||
#
|
#
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
import time
|
import time
|
||||||
import logging
|
import logging
|
||||||
import argparse
|
import argparse
|
||||||
@ -13,7 +15,9 @@ import subprocess
|
|||||||
import random
|
import random
|
||||||
import unicodedata
|
import unicodedata
|
||||||
|
|
||||||
from typing import Iterator
|
from pathlib import Path
|
||||||
|
from typing import Any, Iterator, cast
|
||||||
|
from typing_extensions import Buffer
|
||||||
|
|
||||||
import cffi
|
import cffi
|
||||||
from transformers import AutoTokenizer
|
from transformers import AutoTokenizer
|
||||||
@ -28,15 +32,15 @@ class LibLlama:
|
|||||||
DEFAULT_PATH_INCLUDES = ["./ggml/include/", "./include/"]
|
DEFAULT_PATH_INCLUDES = ["./ggml/include/", "./include/"]
|
||||||
DEFAULT_PATH_LIBLLAMA = "./build/src/libllama.so" # CMakeLists.txt: BUILD_SHARED_LIBS ON
|
DEFAULT_PATH_LIBLLAMA = "./build/src/libllama.so" # CMakeLists.txt: BUILD_SHARED_LIBS ON
|
||||||
|
|
||||||
def __init__(self, path_llama_h: str = None, path_includes: list[str] = [], path_libllama: str = None):
|
def __init__(self, path_llama_h: str | None = None, path_includes: list[str] = [], path_libllama: str | None = None):
|
||||||
path_llama_h = path_llama_h or self.DEFAULT_PATH_LLAMA_H
|
path_llama_h = path_llama_h or self.DEFAULT_PATH_LLAMA_H
|
||||||
path_includes = path_includes or self.DEFAULT_PATH_INCLUDES
|
path_includes = path_includes or self.DEFAULT_PATH_INCLUDES
|
||||||
path_libllama = path_libllama or self.DEFAULT_PATH_LIBLLAMA
|
path_libllama = path_libllama or self.DEFAULT_PATH_LIBLLAMA
|
||||||
(self.ffi, self.lib) = self._load_libllama_cffi(path_llama_h, path_includes, path_libllama)
|
(self.ffi, self.lib) = self._load_libllama_cffi(path_llama_h, path_includes, path_libllama)
|
||||||
self.lib.llama_backend_init()
|
self.lib.llama_backend_init()
|
||||||
|
|
||||||
def _load_libllama_cffi(self, path_llama_h: str, path_includes: list[str], path_libllama: str):
|
def _load_libllama_cffi(self, path_llama_h: str, path_includes: list[str], path_libllama: str) -> tuple[cffi.FFI, Any]:
|
||||||
cmd = ["gcc", "-E", "-P", "-D__restrict=", "-D__attribute__(x)=", "-D__asm__(x)="]
|
cmd = ["gcc", "-O0", "-E", "-P", "-D__restrict=", "-D__attribute__(x)=", "-D__asm__(x)="]
|
||||||
cmd += ["-I" + path for path in path_includes] + [path_llama_h]
|
cmd += ["-I" + path for path in path_includes] + [path_llama_h]
|
||||||
res = subprocess.run(cmd, stdout=subprocess.PIPE)
|
res = subprocess.run(cmd, stdout=subprocess.PIPE)
|
||||||
assert (res.returncode == 0)
|
assert (res.returncode == 0)
|
||||||
@ -68,7 +72,7 @@ class LibLlama:
|
|||||||
class LibLlamaModel:
|
class LibLlamaModel:
|
||||||
|
|
||||||
def __init__(self, libllama: LibLlama, path_model: str, mparams={}, cparams={}):
|
def __init__(self, libllama: LibLlama, path_model: str, mparams={}, cparams={}):
|
||||||
self.lib = libllama.lib
|
self.lib: Any = libllama.lib
|
||||||
self.ffi = libllama.ffi
|
self.ffi = libllama.ffi
|
||||||
if isinstance(mparams, dict):
|
if isinstance(mparams, dict):
|
||||||
mparams = libllama.model_default_params(**mparams)
|
mparams = libllama.model_default_params(**mparams)
|
||||||
@ -94,11 +98,11 @@ class LibLlamaModel:
|
|||||||
self.lib = None
|
self.lib = None
|
||||||
|
|
||||||
def tokenize(self, text: str, add_special: bool = False, parse_special: bool = False) -> list[int]:
|
def tokenize(self, text: str, add_special: bool = False, parse_special: bool = False) -> list[int]:
|
||||||
text = text.encode("utf-8")
|
encoded_text: bytes = text.encode("utf-8")
|
||||||
num = self.lib.llama_tokenize(self.model, text, len(text), self.token_ids, len(self.token_ids), add_special, parse_special)
|
num = self.lib.llama_tokenize(self.model, encoded_text, len(encoded_text), self.token_ids, len(self.token_ids), add_special, parse_special)
|
||||||
while num < 0 and len(self.token_ids) < (16 << 20):
|
while num < 0 and len(self.token_ids) < (16 << 20):
|
||||||
self.token_ids = self.ffi.new("llama_token[]", -2 * num)
|
self.token_ids = self.ffi.new("llama_token[]", -2 * num)
|
||||||
num = self.lib.llama_tokenize(self.model, text, len(text), self.token_ids, len(self.token_ids), add_special, parse_special)
|
num = self.lib.llama_tokenize(self.model, encoded_text, len(encoded_text), self.token_ids, len(self.token_ids), add_special, parse_special)
|
||||||
return list(self.token_ids[0:num])
|
return list(self.token_ids[0:num])
|
||||||
|
|
||||||
def detokenize(self, ids: list[int], remove_special: bool = False, unparse_special: bool = False) -> str:
|
def detokenize(self, ids: list[int], remove_special: bool = False, unparse_special: bool = False) -> str:
|
||||||
@ -110,7 +114,7 @@ class LibLlamaModel:
|
|||||||
while num < 0 and len(self.text_buff) < (16 << 20):
|
while num < 0 and len(self.text_buff) < (16 << 20):
|
||||||
self.text_buff = self.ffi.new("uint8_t[]", -2 * num)
|
self.text_buff = self.ffi.new("uint8_t[]", -2 * num)
|
||||||
num = self.lib.llama_detokenize(self.model, self.token_ids, len(ids), self.text_buff, len(self.text_buff), remove_special, unparse_special)
|
num = self.lib.llama_detokenize(self.model, self.token_ids, len(ids), self.text_buff, len(self.text_buff), remove_special, unparse_special)
|
||||||
return str(self.ffi.buffer(self.text_buff, num), encoding="utf-8", errors="replace") # replace errors with '\uFFFD'
|
return str(cast(Buffer, self.ffi.buffer(self.text_buff, num)), encoding="utf-8", errors="replace") # replace errors with '\uFFFD'
|
||||||
|
|
||||||
|
|
||||||
class Tokenizer:
|
class Tokenizer:
|
||||||
@ -152,7 +156,7 @@ class TokenizerGroundtruth (Tokenizer):
|
|||||||
|
|
||||||
class TokenizerLlamaCpp (Tokenizer):
|
class TokenizerLlamaCpp (Tokenizer):
|
||||||
|
|
||||||
libllama: LibLlama = None
|
libllama: LibLlama | None = None
|
||||||
|
|
||||||
def __init__(self, vocab_file: str):
|
def __init__(self, vocab_file: str):
|
||||||
if not self.libllama:
|
if not self.libllama:
|
||||||
@ -404,7 +408,7 @@ def generator_random_vocab_words(tokenizer: TokenizerGroundtruth, iterations=100
|
|||||||
|
|
||||||
def compare_tokenizers(tokenizer1: TokenizerGroundtruth, tokenizer2: TokenizerLlamaCpp, generator: Iterator[str]):
|
def compare_tokenizers(tokenizer1: TokenizerGroundtruth, tokenizer2: TokenizerLlamaCpp, generator: Iterator[str]):
|
||||||
|
|
||||||
def find_first_mismatch(ids1: list[int], ids2: list[int]):
|
def find_first_mismatch(ids1: list[int] | str, ids2: list[int] | str):
|
||||||
for i, (a, b) in enumerate(zip(ids1, ids2)):
|
for i, (a, b) in enumerate(zip(ids1, ids2)):
|
||||||
if a != b:
|
if a != b:
|
||||||
return i
|
return i
|
||||||
@ -433,7 +437,7 @@ def compare_tokenizers(tokenizer1: TokenizerGroundtruth, tokenizer2: TokenizerLl
|
|||||||
decode_errors = 0
|
decode_errors = 0
|
||||||
MAX_ERRORS = 10
|
MAX_ERRORS = 10
|
||||||
|
|
||||||
logger.info("%s: %s" % (generator.__name__, "ini"))
|
logger.info("%s: %s" % (generator.__qualname__, "ini"))
|
||||||
for text in generator:
|
for text in generator:
|
||||||
# print(repr(text), text.encode())
|
# print(repr(text), text.encode())
|
||||||
# print(repr(text), hex(ord(text[0])), text.encode())
|
# print(repr(text), hex(ord(text[0])), text.encode())
|
||||||
@ -472,13 +476,13 @@ def compare_tokenizers(tokenizer1: TokenizerGroundtruth, tokenizer2: TokenizerLl
|
|||||||
break
|
break
|
||||||
|
|
||||||
t_total = time.perf_counter() - t_start
|
t_total = time.perf_counter() - t_start
|
||||||
logger.info(f"{generator.__name__}: end, {t_encode1=:.3f} {t_encode2=:.3f} {t_decode1=:.3f} {t_decode2=:.3f} {t_total=:.3f}")
|
logger.info(f"{generator.__qualname__}: end, {t_encode1=:.3f} {t_encode2=:.3f} {t_decode1=:.3f} {t_decode2=:.3f} {t_total=:.3f}")
|
||||||
|
|
||||||
|
|
||||||
def main(argv: list[str] = None):
|
def main(argv: list[str] | None = None):
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument("vocab_file", help="path to vocab 'gguf' file")
|
parser.add_argument("vocab_file", type=str, help="path to vocab 'gguf' file")
|
||||||
parser.add_argument("dir_tokenizer", help="directory containing 'tokenizer.model' file")
|
parser.add_argument("dir_tokenizer", type=str, help="directory containing 'tokenizer.model' file")
|
||||||
parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
|
parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
|
||||||
args = parser.parse_args(argv)
|
args = parser.parse_args(argv)
|
||||||
|
|
||||||
@ -520,7 +524,7 @@ if __name__ == "__main__":
|
|||||||
format = "%(levelname)s %(message)s",
|
format = "%(levelname)s %(message)s",
|
||||||
)
|
)
|
||||||
|
|
||||||
path_tokenizers = "./models/tokenizers/"
|
path_tokenizers = Path("./models/tokenizers/")
|
||||||
path_vocab_format = "./models/ggml-vocab-%s.gguf"
|
path_vocab_format = "./models/ggml-vocab-%s.gguf"
|
||||||
|
|
||||||
tokenizers = [
|
tokenizers = [
|
||||||
@ -556,6 +560,6 @@ if __name__ == "__main__":
|
|||||||
for tokenizer in tokenizers:
|
for tokenizer in tokenizers:
|
||||||
logger.info("-" * 50)
|
logger.info("-" * 50)
|
||||||
logger.info(f"TOKENIZER: '{tokenizer}'")
|
logger.info(f"TOKENIZER: '{tokenizer}'")
|
||||||
vocab_file = path_vocab_format % tokenizer
|
vocab_file = Path(path_vocab_format % tokenizer)
|
||||||
dir_tokenizer = path_tokenizers + "/" + tokenizer
|
dir_tokenizer = path_tokenizers / tokenizer
|
||||||
main([vocab_file, dir_tokenizer, "--verbose"])
|
main([str(vocab_file), str(dir_tokenizer), "--verbose"])
|
||||||
|
Loading…
Reference in New Issue
Block a user