mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 11:24:35 +00:00
Merge branch 'master' into fix-eos
This commit is contained in:
commit
977629a34e
58
.devops/lamma-cpp-clblast.srpm.spec
Normal file
58
.devops/lamma-cpp-clblast.srpm.spec
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
# SRPM for building from source and packaging an RPM for RPM-based distros.
|
||||||
|
# https://fedoraproject.org/wiki/How_to_create_an_RPM_package
|
||||||
|
# Built and maintained by John Boero - boeroboy@gmail.com
|
||||||
|
# In honor of Seth Vidal https://www.redhat.com/it/blog/thank-you-seth-vidal
|
||||||
|
|
||||||
|
# Notes for llama.cpp:
|
||||||
|
# 1. Tags are currently based on hash - which will not sort asciibetically.
|
||||||
|
# We need to declare standard versioning if people want to sort latest releases.
|
||||||
|
# 2. Builds for CUDA/OpenCL support are separate, with different depenedencies.
|
||||||
|
# 3. NVidia's developer repo must be enabled with nvcc, cublas, clblas, etc installed.
|
||||||
|
# Example: https://developer.download.nvidia.com/compute/cuda/repos/fedora37/x86_64/cuda-fedora37.repo
|
||||||
|
# 4. OpenCL/CLBLAST support simply requires the ICD loader and basic opencl libraries.
|
||||||
|
# It is up to the user to install the correct vendor-specific support.
|
||||||
|
|
||||||
|
Name: llama.cpp-clblast
|
||||||
|
Version: master
|
||||||
|
Release: 1%{?dist}
|
||||||
|
Summary: OpenCL Inference of LLaMA model in pure C/C++
|
||||||
|
License: MIT
|
||||||
|
Source0: https://github.com/ggerganov/llama.cpp/archive/refs/heads/master.tar.gz
|
||||||
|
BuildRequires: coreutils make gcc-c++ git mesa-libOpenCL-devel
|
||||||
|
URL: https://github.com/ggerganov/llama.cpp
|
||||||
|
|
||||||
|
%define debug_package %{nil}
|
||||||
|
%define source_date_epoch_from_changelog 0
|
||||||
|
|
||||||
|
%description
|
||||||
|
CPU inference for Meta's Lllama2 models using default options.
|
||||||
|
|
||||||
|
%prep
|
||||||
|
%setup -n llama.cpp-master
|
||||||
|
|
||||||
|
%build
|
||||||
|
make -j LLAMA_CLBLAST=1
|
||||||
|
|
||||||
|
%install
|
||||||
|
mkdir -p %{buildroot}%{_bindir}/
|
||||||
|
cp -p main %{buildroot}%{_bindir}/llamacppclblast
|
||||||
|
cp -p server %{buildroot}%{_bindir}/llamacppclblastserver
|
||||||
|
cp -p simple %{buildroot}%{_bindir}/llamacppclblastsimple
|
||||||
|
|
||||||
|
%clean
|
||||||
|
rm -rf %{buildroot}
|
||||||
|
rm -rf %{_builddir}/*
|
||||||
|
|
||||||
|
%files
|
||||||
|
%{_bindir}/llamacppclblast
|
||||||
|
%{_bindir}/llamacppclblastserver
|
||||||
|
%{_bindir}/llamacppclblastsimple
|
||||||
|
|
||||||
|
%pre
|
||||||
|
|
||||||
|
%post
|
||||||
|
|
||||||
|
%preun
|
||||||
|
%postun
|
||||||
|
|
||||||
|
%changelog
|
59
.devops/lamma-cpp-cublas.srpm.spec
Normal file
59
.devops/lamma-cpp-cublas.srpm.spec
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
# SRPM for building from source and packaging an RPM for RPM-based distros.
|
||||||
|
# https://fedoraproject.org/wiki/How_to_create_an_RPM_package
|
||||||
|
# Built and maintained by John Boero - boeroboy@gmail.com
|
||||||
|
# In honor of Seth Vidal https://www.redhat.com/it/blog/thank-you-seth-vidal
|
||||||
|
|
||||||
|
# Notes for llama.cpp:
|
||||||
|
# 1. Tags are currently based on hash - which will not sort asciibetically.
|
||||||
|
# We need to declare standard versioning if people want to sort latest releases.
|
||||||
|
# 2. Builds for CUDA/OpenCL support are separate, with different depenedencies.
|
||||||
|
# 3. NVidia's developer repo must be enabled with nvcc, cublas, clblas, etc installed.
|
||||||
|
# Example: https://developer.download.nvidia.com/compute/cuda/repos/fedora37/x86_64/cuda-fedora37.repo
|
||||||
|
# 4. OpenCL/CLBLAST support simply requires the ICD loader and basic opencl libraries.
|
||||||
|
# It is up to the user to install the correct vendor-specific support.
|
||||||
|
|
||||||
|
Name: llama.cpp-cublas
|
||||||
|
Version: master
|
||||||
|
Release: 1%{?dist}
|
||||||
|
Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL)
|
||||||
|
License: MIT
|
||||||
|
Source0: https://github.com/ggerganov/llama.cpp/archive/refs/heads/master.tar.gz
|
||||||
|
BuildRequires: coreutils make gcc-c++ git cuda-toolkit
|
||||||
|
Requires: cuda-toolkit
|
||||||
|
URL: https://github.com/ggerganov/llama.cpp
|
||||||
|
|
||||||
|
%define debug_package %{nil}
|
||||||
|
%define source_date_epoch_from_changelog 0
|
||||||
|
|
||||||
|
%description
|
||||||
|
CPU inference for Meta's Lllama2 models using default options.
|
||||||
|
|
||||||
|
%prep
|
||||||
|
%setup -n llama.cpp-master
|
||||||
|
|
||||||
|
%build
|
||||||
|
make -j LLAMA_CUBLAS=1
|
||||||
|
|
||||||
|
%install
|
||||||
|
mkdir -p %{buildroot}%{_bindir}/
|
||||||
|
cp -p main %{buildroot}%{_bindir}/llamacppcublas
|
||||||
|
cp -p server %{buildroot}%{_bindir}/llamacppcublasserver
|
||||||
|
cp -p simple %{buildroot}%{_bindir}/llamacppcublassimple
|
||||||
|
|
||||||
|
%clean
|
||||||
|
rm -rf %{buildroot}
|
||||||
|
rm -rf %{_builddir}/*
|
||||||
|
|
||||||
|
%files
|
||||||
|
%{_bindir}/llamacppcublas
|
||||||
|
%{_bindir}/llamacppcublasserver
|
||||||
|
%{_bindir}/llamacppcublassimple
|
||||||
|
|
||||||
|
%pre
|
||||||
|
|
||||||
|
%post
|
||||||
|
|
||||||
|
%preun
|
||||||
|
%postun
|
||||||
|
|
||||||
|
%changelog
|
58
.devops/llama-cpp.srpm.spec
Normal file
58
.devops/llama-cpp.srpm.spec
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
# SRPM for building from source and packaging an RPM for RPM-based distros.
|
||||||
|
# https://fedoraproject.org/wiki/How_to_create_an_RPM_package
|
||||||
|
# Built and maintained by John Boero - boeroboy@gmail.com
|
||||||
|
# In honor of Seth Vidal https://www.redhat.com/it/blog/thank-you-seth-vidal
|
||||||
|
|
||||||
|
# Notes for llama.cpp:
|
||||||
|
# 1. Tags are currently based on hash - which will not sort asciibetically.
|
||||||
|
# We need to declare standard versioning if people want to sort latest releases.
|
||||||
|
# 2. Builds for CUDA/OpenCL support are separate, with different depenedencies.
|
||||||
|
# 3. NVidia's developer repo must be enabled with nvcc, cublas, clblas, etc installed.
|
||||||
|
# Example: https://developer.download.nvidia.com/compute/cuda/repos/fedora37/x86_64/cuda-fedora37.repo
|
||||||
|
# 4. OpenCL/CLBLAST support simply requires the ICD loader and basic opencl libraries.
|
||||||
|
# It is up to the user to install the correct vendor-specific support.
|
||||||
|
|
||||||
|
Name: llama.cpp
|
||||||
|
Version: master
|
||||||
|
Release: 1%{?dist}
|
||||||
|
Summary: CPU Inference of LLaMA model in pure C/C++ (no CUDA/OpenCL)
|
||||||
|
License: MIT
|
||||||
|
Source0: https://github.com/ggerganov/llama.cpp/archive/refs/heads/master.tar.gz
|
||||||
|
BuildRequires: coreutils make gcc-c++ git
|
||||||
|
URL: https://github.com/ggerganov/llama.cpp
|
||||||
|
|
||||||
|
%define debug_package %{nil}
|
||||||
|
%define source_date_epoch_from_changelog 0
|
||||||
|
|
||||||
|
%description
|
||||||
|
CPU inference for Meta's Lllama2 models using default options.
|
||||||
|
|
||||||
|
%prep
|
||||||
|
%autosetup
|
||||||
|
|
||||||
|
%build
|
||||||
|
make -j
|
||||||
|
|
||||||
|
%install
|
||||||
|
mkdir -p %{buildroot}%{_bindir}/
|
||||||
|
cp -p main %{buildroot}%{_bindir}/llamacpp
|
||||||
|
cp -p server %{buildroot}%{_bindir}/llamacppserver
|
||||||
|
cp -p simple %{buildroot}%{_bindir}/llamacppsimple
|
||||||
|
|
||||||
|
%clean
|
||||||
|
rm -rf %{buildroot}
|
||||||
|
rm -rf %{_builddir}/*
|
||||||
|
|
||||||
|
%files
|
||||||
|
%{_bindir}/llamacpp
|
||||||
|
%{_bindir}/llamacppserver
|
||||||
|
%{_bindir}/llamacppsimple
|
||||||
|
|
||||||
|
%pre
|
||||||
|
|
||||||
|
%post
|
||||||
|
|
||||||
|
%preun
|
||||||
|
%postun
|
||||||
|
|
||||||
|
%changelog
|
7
.gitignore
vendored
7
.gitignore
vendored
@ -1,7 +1,10 @@
|
|||||||
*.o
|
*.o
|
||||||
*.a
|
*.a
|
||||||
*.so
|
*.so
|
||||||
|
*.gguf
|
||||||
*.bin
|
*.bin
|
||||||
|
*.exe
|
||||||
|
*.dll
|
||||||
.DS_Store
|
.DS_Store
|
||||||
.build/
|
.build/
|
||||||
.cache/
|
.cache/
|
||||||
@ -47,6 +50,8 @@ models-mnt
|
|||||||
/server
|
/server
|
||||||
/Pipfile
|
/Pipfile
|
||||||
/embd-input-test
|
/embd-input-test
|
||||||
|
/gguf
|
||||||
|
/gguf-llama-simple
|
||||||
/libllama.so
|
/libllama.so
|
||||||
/llama-bench
|
/llama-bench
|
||||||
build-info.h
|
build-info.h
|
||||||
@ -65,7 +70,6 @@ perf-*.txt
|
|||||||
|
|
||||||
examples/jeopardy/results.txt
|
examples/jeopardy/results.txt
|
||||||
|
|
||||||
|
|
||||||
pyproject.toml
|
pyproject.toml
|
||||||
poetry.lock
|
poetry.lock
|
||||||
poetry.toml
|
poetry.toml
|
||||||
@ -79,4 +83,3 @@ tests/test-quantize-fns
|
|||||||
tests/test-quantize-perf
|
tests/test-quantize-perf
|
||||||
tests/test-sampling
|
tests/test-sampling
|
||||||
tests/test-tokenizer-0
|
tests/test-tokenizer-0
|
||||||
|
|
||||||
|
@ -497,9 +497,11 @@ else()
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
#
|
#
|
||||||
# Build libraries
|
# libraries
|
||||||
#
|
#
|
||||||
|
|
||||||
|
# ggml
|
||||||
|
|
||||||
add_library(ggml OBJECT
|
add_library(ggml OBJECT
|
||||||
ggml.c
|
ggml.c
|
||||||
ggml.h
|
ggml.h
|
||||||
@ -524,10 +526,11 @@ if (BUILD_SHARED_LIBS)
|
|||||||
install(TARGETS ggml_shared LIBRARY)
|
install(TARGETS ggml_shared LIBRARY)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
# llama
|
||||||
|
|
||||||
add_library(llama
|
add_library(llama
|
||||||
llama.cpp
|
llama.cpp
|
||||||
llama.h
|
llama.h
|
||||||
llama-util.h
|
|
||||||
)
|
)
|
||||||
|
|
||||||
target_include_directories(llama PUBLIC .)
|
target_include_directories(llama PUBLIC .)
|
||||||
@ -546,6 +549,10 @@ if (BUILD_SHARED_LIBS)
|
|||||||
install(TARGETS llama LIBRARY)
|
install(TARGETS llama LIBRARY)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
#
|
||||||
|
# install
|
||||||
|
#
|
||||||
|
|
||||||
include(GNUInstallDirs)
|
include(GNUInstallDirs)
|
||||||
install(
|
install(
|
||||||
FILES convert.py
|
FILES convert.py
|
||||||
@ -584,6 +591,8 @@ endif()
|
|||||||
# programs, examples and tests
|
# programs, examples and tests
|
||||||
#
|
#
|
||||||
|
|
||||||
|
add_subdirectory(common)
|
||||||
|
|
||||||
if (LLAMA_BUILD_TESTS AND NOT CMAKE_JS_VERSION)
|
if (LLAMA_BUILD_TESTS AND NOT CMAKE_JS_VERSION)
|
||||||
include(CTest)
|
include(CTest)
|
||||||
add_subdirectory(tests)
|
add_subdirectory(tests)
|
||||||
|
21
Makefile
21
Makefile
@ -1,5 +1,5 @@
|
|||||||
# Define the default target now so that it is always the first target
|
# Define the default target now so that it is always the first target
|
||||||
BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple server embd-input-test llama-bench
|
BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple server embd-input-test gguf llama-bench
|
||||||
|
|
||||||
# Binaries only useful for tests
|
# Binaries only useful for tests
|
||||||
TEST_TARGETS = tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0
|
TEST_TARGETS = tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0
|
||||||
@ -46,7 +46,7 @@ else
|
|||||||
OPT = -O3
|
OPT = -O3
|
||||||
endif
|
endif
|
||||||
CFLAGS = -I. $(OPT) -std=c11 -fPIC
|
CFLAGS = -I. $(OPT) -std=c11 -fPIC
|
||||||
CXXFLAGS = -I. -I./examples $(OPT) -std=c++11 -fPIC
|
CXXFLAGS = -I. -I./common $(OPT) -std=c++11 -fPIC
|
||||||
LDFLAGS =
|
LDFLAGS =
|
||||||
|
|
||||||
ifdef LLAMA_DEBUG
|
ifdef LLAMA_DEBUG
|
||||||
@ -329,23 +329,23 @@ ggml-alloc.o: ggml-alloc.c ggml.h ggml-alloc.h
|
|||||||
|
|
||||||
OBJS += ggml-alloc.o
|
OBJS += ggml-alloc.o
|
||||||
|
|
||||||
llama.o: llama.cpp ggml.h ggml-alloc.h ggml-cuda.h ggml-metal.h llama.h llama-util.h
|
llama.o: llama.cpp ggml.h ggml-alloc.h ggml-cuda.h ggml-metal.h llama.h
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
|
|
||||||
common.o: examples/common.cpp examples/common.h
|
common.o: common/common.cpp common/common.h
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
|
|
||||||
console.o: examples/console.cpp examples/console.h
|
console.o: common/console.cpp common/console.h
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
|
|
||||||
grammar-parser.o: examples/grammar-parser.cpp examples/grammar-parser.h
|
grammar-parser.o: common/grammar-parser.cpp common/grammar-parser.h
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
$(CXX) $(CXXFLAGS) -c $< -o $@
|
||||||
|
|
||||||
libllama.so: llama.o ggml.o $(OBJS)
|
libllama.so: llama.o ggml.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS)
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -vf *.o *.so *.dll main quantize quantize-stats perplexity embedding benchmark-matmult save-load-state server simple vdot train-text-from-scratch convert-llama2c-to-ggml embd-input-test llama-bench build-info.h $(TEST_TARGETS)
|
rm -vf *.o *.so *.dll main quantize quantize-stats perplexity embedding benchmark-matmult save-load-state server simple vdot train-text-from-scratch convert-llama2c-to-ggml embd-input-test gguf llama-bench build-info.h $(TEST_TARGETS)
|
||||||
|
|
||||||
#
|
#
|
||||||
# Examples
|
# Examples
|
||||||
@ -385,7 +385,10 @@ $(LIB_PRE)embdinput$(DSO_EXT): examples/embd-input/embd-input.h examples/embd-in
|
|||||||
embd-input-test: $(LIB_PRE)embdinput$(DSO_EXT) examples/embd-input/embd-input-test.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
embd-input-test: $(LIB_PRE)embdinput$(DSO_EXT) examples/embd-input/embd-input-test.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %$(DSO_EXT),$(filter-out %.h,$(filter-out %.hpp,$^))) -o $@ $(LDFLAGS) -L. -lembdinput
|
$(CXX) $(CXXFLAGS) $(filter-out %$(DSO_EXT),$(filter-out %.h,$(filter-out %.hpp,$^))) -o $@ $(LDFLAGS) -L. -lembdinput
|
||||||
|
|
||||||
train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp build-info.h ggml.o llama.o $(OBJS)
|
gguf: examples/gguf/gguf.cpp build-info.h ggml.o llama.o $(OBJS)
|
||||||
|
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
|
train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
convert-llama2c-to-ggml: examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp build-info.h ggml.o llama.o $(OBJS)
|
convert-llama2c-to-ggml: examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp build-info.h ggml.o llama.o $(OBJS)
|
||||||
@ -418,7 +421,7 @@ vdot: pocs/vdot/vdot.cpp ggml.o $(OBJS)
|
|||||||
tests/test-llama-grammar: tests/test-llama-grammar.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
tests/test-llama-grammar: tests/test-llama-grammar.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
tests/test-grammar-parser: tests/test-grammar-parser.cpp examples/grammar-parser.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
tests/test-grammar-parser: tests/test-grammar-parser.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
tests/test-double-float: tests/test-double-float.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
tests/test-double-float: tests/test-double-float.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
||||||
|
46
README.md
46
README.md
@ -9,11 +9,17 @@
|
|||||||
|
|
||||||
Inference of [LLaMA](https://arxiv.org/abs/2302.13971) model in pure C/C++
|
Inference of [LLaMA](https://arxiv.org/abs/2302.13971) model in pure C/C++
|
||||||
|
|
||||||
### 🚧 Incoming breaking change + refactoring:
|
### Hot topics
|
||||||
|
|
||||||
See PR https://github.com/ggerganov/llama.cpp/pull/2398 for more info.
|
A new file format has been introduced: [GGUF](https://github.com/ggerganov/llama.cpp/pull/2398)
|
||||||
|
|
||||||
To devs: avoid making big changes to `llama.h` / `llama.cpp` until merged
|
Last revision compatible with the old format: [dadbed9](https://github.com/ggerganov/llama.cpp/commit/dadbed99e65252d79f81101a392d0d6497b86caa)
|
||||||
|
|
||||||
|
### Current `master` should be considered in Beta - expect some issues for a few days!
|
||||||
|
|
||||||
|
### Be prepared to re-convert and / or re-quantize your GGUF models while this notice is up!
|
||||||
|
|
||||||
|
### Issues with non-GGUF models will be considered with low priority!
|
||||||
|
|
||||||
----
|
----
|
||||||
|
|
||||||
@ -33,6 +39,7 @@ To devs: avoid making big changes to `llama.h` / `llama.cpp` until merged
|
|||||||
<li><a href="#memorydisk-requirements">Memory/Disk Requirements</a></li>
|
<li><a href="#memorydisk-requirements">Memory/Disk Requirements</a></li>
|
||||||
<li><a href="#quantization">Quantization</a></li>
|
<li><a href="#quantization">Quantization</a></li>
|
||||||
<li><a href="#interactive-mode">Interactive mode</a></li>
|
<li><a href="#interactive-mode">Interactive mode</a></li>
|
||||||
|
<li><a href="#constrained-output-with-grammars">Constrained output with grammars</a></li>
|
||||||
<li><a href="#instruction-mode-with-alpaca">Instruction mode with Alpaca</a></li>
|
<li><a href="#instruction-mode-with-alpaca">Instruction mode with Alpaca</a></li>
|
||||||
<li><a href="#using-openllama">Using OpenLLaMA</a></li>
|
<li><a href="#using-openllama">Using OpenLLaMA</a></li>
|
||||||
<li><a href="#using-gpt4all">Using GPT4All</a></li>
|
<li><a href="#using-gpt4all">Using GPT4All</a></li>
|
||||||
@ -291,7 +298,7 @@ When built with Metal support, you can enable GPU inference with the `--gpu-laye
|
|||||||
Any value larger than 0 will offload the computation to the GPU. For example:
|
Any value larger than 0 will offload the computation to the GPU. For example:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./main -m ./models/7B/ggml-model-q4_0.bin -n 128 -ngl 1
|
./main -m ./models/7B/ggml-model-q4_0.gguf -n 128 -ngl 1
|
||||||
```
|
```
|
||||||
|
|
||||||
### MPI Build
|
### MPI Build
|
||||||
@ -330,7 +337,7 @@ The above will distribute the computation across 2 processes on the first host a
|
|||||||
Finally, you're ready to run a computation using `mpirun`:
|
Finally, you're ready to run a computation using `mpirun`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
mpirun -hostfile hostfile -n 3 ./main -m ./models/7B/ggml-model-q4_0.bin -n 128
|
mpirun -hostfile hostfile -n 3 ./main -m ./models/7B/ggml-model-q4_0.gguf -n 128
|
||||||
```
|
```
|
||||||
|
|
||||||
### BLAS Build
|
### BLAS Build
|
||||||
@ -513,10 +520,10 @@ python3 convert.py models/7B/
|
|||||||
python convert.py models/7B/ --vocabtype bpe
|
python convert.py models/7B/ --vocabtype bpe
|
||||||
|
|
||||||
# quantize the model to 4-bits (using q4_0 method)
|
# quantize the model to 4-bits (using q4_0 method)
|
||||||
./quantize ./models/7B/ggml-model-f16.bin ./models/7B/ggml-model-q4_0.bin q4_0
|
./quantize ./models/7B/ggml-model-f16.gguf ./models/7B/ggml-model-q4_0.gguf q4_0
|
||||||
|
|
||||||
# run the inference
|
# run the inference
|
||||||
./main -m ./models/7B/ggml-model-q4_0.bin -n 128
|
./main -m ./models/7B/ggml-model-q4_0.gguf -n 128
|
||||||
```
|
```
|
||||||
|
|
||||||
When running the larger models, make sure you have enough disk space to store all the intermediate files.
|
When running the larger models, make sure you have enough disk space to store all the intermediate files.
|
||||||
@ -572,7 +579,7 @@ Here is an example of a few-shot interaction, invoked with the command
|
|||||||
./examples/chat-13B.sh
|
./examples/chat-13B.sh
|
||||||
|
|
||||||
# custom arguments using a 13B model
|
# custom arguments using a 13B model
|
||||||
./main -m ./models/13B/ggml-model-q4_0.bin -n 256 --repeat_penalty 1.0 --color -i -r "User:" -f prompts/chat-with-bob.txt
|
./main -m ./models/13B/ggml-model-q4_0.gguf -n 256 --repeat_penalty 1.0 --color -i -r "User:" -f prompts/chat-with-bob.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
Note the use of `--color` to distinguish between user input and generated text. Other parameters are explained in more detail in the [README](examples/main/README.md) for the `main` example program.
|
Note the use of `--color` to distinguish between user input and generated text. Other parameters are explained in more detail in the [README](examples/main/README.md) for the `main` example program.
|
||||||
@ -598,6 +605,16 @@ PROMPT_TEMPLATE=./prompts/chat-with-bob.txt PROMPT_CACHE_FILE=bob.prompt.bin \
|
|||||||
CHAT_SAVE_DIR=./chat/bob ./examples/chat-persistent.sh
|
CHAT_SAVE_DIR=./chat/bob ./examples/chat-persistent.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Constrained output with grammars
|
||||||
|
|
||||||
|
`llama.cpp` supports grammars to constrain model output. For example, you can force the model to output JSON only:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./main -m ./models/13B/ggml-model-q4_0.gguf -n 256 --grammar-file grammars/json.gbnf -p 'Request: schedule a call at 8pm; Command:'
|
||||||
|
```
|
||||||
|
|
||||||
|
The `grammars/` folder contains a handful of sample grammars. To write your own, check out the [GBNF Guide](./grammars/README.md).
|
||||||
|
|
||||||
### Instruction mode with Alpaca
|
### Instruction mode with Alpaca
|
||||||
|
|
||||||
1. First, download the `ggml` Alpaca model into the `./models` folder
|
1. First, download the `ggml` Alpaca model into the `./models` folder
|
||||||
@ -635,6 +652,8 @@ OpenLLaMA is an openly licensed reproduction of Meta's original LLaMA model. It
|
|||||||
|
|
||||||
### Using [GPT4All](https://github.com/nomic-ai/gpt4all)
|
### Using [GPT4All](https://github.com/nomic-ai/gpt4all)
|
||||||
|
|
||||||
|
*Note: these instructions are likely obsoleted by the GGUF update*
|
||||||
|
|
||||||
- Obtain the `tokenizer.model` file from LLaMA model and put it to `models`
|
- Obtain the `tokenizer.model` file from LLaMA model and put it to `models`
|
||||||
- Obtain the `added_tokens.json` file from Alpaca model and put it to `models`
|
- Obtain the `added_tokens.json` file from Alpaca model and put it to `models`
|
||||||
- Obtain the `gpt4all-lora-quantized.bin` file from GPT4All model and put it to `models/gpt4all-7B`
|
- Obtain the `gpt4all-lora-quantized.bin` file from GPT4All model and put it to `models/gpt4all-7B`
|
||||||
@ -710,7 +729,7 @@ If your issue is with model generation quality, then please at least scan the fo
|
|||||||
#### How to run
|
#### How to run
|
||||||
|
|
||||||
1. Download/extract: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
|
1. Download/extract: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
|
||||||
2. Run `./perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
|
2. Run `./perplexity -m models/7B/ggml-model-q4_0.gguf -f wiki.test.raw`
|
||||||
3. Output:
|
3. Output:
|
||||||
```
|
```
|
||||||
perplexity : calculating perplexity over 655 chunks
|
perplexity : calculating perplexity over 655 chunks
|
||||||
@ -809,13 +828,13 @@ docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --all-in-
|
|||||||
On completion, you are ready to play!
|
On completion, you are ready to play!
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --run -m /models/7B/ggml-model-q4_0.bin -p "Building a website can be done in 10 simple steps:" -n 512
|
docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512
|
||||||
```
|
```
|
||||||
|
|
||||||
or with a light image:
|
or with a light image:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:light -m /models/7B/ggml-model-q4_0.bin -p "Building a website can be done in 10 simple steps:" -n 512
|
docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:light -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512
|
||||||
```
|
```
|
||||||
|
|
||||||
### Docker With CUDA
|
### Docker With CUDA
|
||||||
@ -846,8 +865,8 @@ The resulting images, are essentially the same as the non-CUDA images:
|
|||||||
After building locally, Usage is similar to the non-CUDA examples, but you'll need to add the `--gpus` flag. You will also want to use the `--n-gpu-layers` flag.
|
After building locally, Usage is similar to the non-CUDA examples, but you'll need to add the `--gpus` flag. You will also want to use the `--n-gpu-layers` flag.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run --gpus all -v /path/to/models:/models local/llama.cpp:full-cuda --run -m /models/7B/ggml-model-q4_0.bin -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1
|
docker run --gpus all -v /path/to/models:/models local/llama.cpp:full-cuda --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1
|
||||||
docker run --gpus all -v /path/to/models:/models local/llama.cpp:light-cuda -m /models/7B/ggml-model-q4_0.bin -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1
|
docker run --gpus all -v /path/to/models:/models local/llama.cpp:light-cuda -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1
|
||||||
```
|
```
|
||||||
|
|
||||||
### Contributing
|
### Contributing
|
||||||
@ -877,3 +896,4 @@ docker run --gpus all -v /path/to/models:/models local/llama.cpp:light-cuda -m /
|
|||||||
- [BLIS](./docs/BLIS.md)
|
- [BLIS](./docs/BLIS.md)
|
||||||
- [Performance troubleshooting](./docs/token_generation_performance_tips.md)
|
- [Performance troubleshooting](./docs/token_generation_performance_tips.md)
|
||||||
- [GGML tips & tricks](https://github.com/ggerganov/llama.cpp/wiki/GGML-Tips-&-Tricks)
|
- [GGML tips & tricks](https://github.com/ggerganov/llama.cpp/wiki/GGML-Tips-&-Tricks)
|
||||||
|
- [GBNF grammars](./grammars/README.md)
|
||||||
|
44
ci/run.sh
Normal file → Executable file
44
ci/run.sh
Normal file → Executable file
@ -159,17 +159,17 @@ function gg_run_open_llama_3b_v2 {
|
|||||||
|
|
||||||
python3 ../convert.py ${path_models}
|
python3 ../convert.py ${path_models}
|
||||||
|
|
||||||
model_f16="${path_models}/ggml-model-f16.bin"
|
model_f16="${path_models}/ggml-model-f16.gguf"
|
||||||
model_q8_0="${path_models}/ggml-model-q8_0.bin"
|
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
||||||
model_q4_0="${path_models}/ggml-model-q4_0.bin"
|
model_q4_0="${path_models}/ggml-model-q4_0.gguf"
|
||||||
model_q4_1="${path_models}/ggml-model-q4_1.bin"
|
model_q4_1="${path_models}/ggml-model-q4_1.gguf"
|
||||||
model_q5_0="${path_models}/ggml-model-q5_0.bin"
|
model_q5_0="${path_models}/ggml-model-q5_0.gguf"
|
||||||
model_q5_1="${path_models}/ggml-model-q5_1.bin"
|
model_q5_1="${path_models}/ggml-model-q5_1.gguf"
|
||||||
model_q2_k="${path_models}/ggml-model-q2_k.bin"
|
model_q2_k="${path_models}/ggml-model-q2_k.gguf"
|
||||||
model_q3_k="${path_models}/ggml-model-q3_k.bin"
|
model_q3_k="${path_models}/ggml-model-q3_k.gguf"
|
||||||
model_q4_k="${path_models}/ggml-model-q4_k.bin"
|
model_q4_k="${path_models}/ggml-model-q4_k.gguf"
|
||||||
model_q5_k="${path_models}/ggml-model-q5_k.bin"
|
model_q5_k="${path_models}/ggml-model-q5_k.gguf"
|
||||||
model_q6_k="${path_models}/ggml-model-q6_k.bin"
|
model_q6_k="${path_models}/ggml-model-q6_k.gguf"
|
||||||
|
|
||||||
wiki_test_60="${path_wiki}/wiki.test-60.raw"
|
wiki_test_60="${path_wiki}/wiki.test-60.raw"
|
||||||
|
|
||||||
@ -285,17 +285,17 @@ function gg_run_open_llama_7b_v2 {
|
|||||||
|
|
||||||
python3 ../convert.py ${path_models}
|
python3 ../convert.py ${path_models}
|
||||||
|
|
||||||
model_f16="${path_models}/ggml-model-f16.bin"
|
model_f16="${path_models}/ggml-model-f16.gguf"
|
||||||
model_q8_0="${path_models}/ggml-model-q8_0.bin"
|
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
||||||
model_q4_0="${path_models}/ggml-model-q4_0.bin"
|
model_q4_0="${path_models}/ggml-model-q4_0.gguf"
|
||||||
model_q4_1="${path_models}/ggml-model-q4_1.bin"
|
model_q4_1="${path_models}/ggml-model-q4_1.gguf"
|
||||||
model_q5_0="${path_models}/ggml-model-q5_0.bin"
|
model_q5_0="${path_models}/ggml-model-q5_0.gguf"
|
||||||
model_q5_1="${path_models}/ggml-model-q5_1.bin"
|
model_q5_1="${path_models}/ggml-model-q5_1.gguf"
|
||||||
model_q2_k="${path_models}/ggml-model-q2_k.bin"
|
model_q2_k="${path_models}/ggml-model-q2_k.gguf"
|
||||||
model_q3_k="${path_models}/ggml-model-q3_k.bin"
|
model_q3_k="${path_models}/ggml-model-q3_k.gguf"
|
||||||
model_q4_k="${path_models}/ggml-model-q4_k.bin"
|
model_q4_k="${path_models}/ggml-model-q4_k.gguf"
|
||||||
model_q5_k="${path_models}/ggml-model-q5_k.bin"
|
model_q5_k="${path_models}/ggml-model-q5_k.gguf"
|
||||||
model_q6_k="${path_models}/ggml-model-q6_k.bin"
|
model_q6_k="${path_models}/ggml-model-q6_k.gguf"
|
||||||
|
|
||||||
wiki_test="${path_wiki}/wiki.test.raw"
|
wiki_test="${path_wiki}/wiki.test.raw"
|
||||||
|
|
||||||
|
20
common/CMakeLists.txt
Normal file
20
common/CMakeLists.txt
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
# common
|
||||||
|
|
||||||
|
set(TARGET common)
|
||||||
|
|
||||||
|
add_library(${TARGET} OBJECT
|
||||||
|
common.h
|
||||||
|
common.cpp
|
||||||
|
console.h
|
||||||
|
console.cpp
|
||||||
|
grammar-parser.h
|
||||||
|
grammar-parser.cpp
|
||||||
|
)
|
||||||
|
|
||||||
|
if (BUILD_SHARED_LIBS)
|
||||||
|
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
target_include_directories(${TARGET} PUBLIC .)
|
||||||
|
target_compile_features(${TARGET} PUBLIC cxx_std_11)
|
||||||
|
target_link_libraries(${TARGET} PRIVATE llama)
|
@ -170,18 +170,6 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
params.n_ctx = std::stoi(argv[i]);
|
params.n_ctx = std::stoi(argv[i]);
|
||||||
} else if (arg == "-gqa" || arg == "--gqa") {
|
|
||||||
if (++i >= argc) {
|
|
||||||
invalid_param = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
params.n_gqa = std::stoi(argv[i]);
|
|
||||||
} else if (arg == "-eps" || arg == "--rms-norm-eps") {
|
|
||||||
if (++i >= argc) {
|
|
||||||
invalid_param = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
params.rms_norm_eps = std::stof(argv[i]);
|
|
||||||
} else if (arg == "--rope-freq-base") {
|
} else if (arg == "--rope-freq-base") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
@ -301,7 +289,6 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
params.n_batch = std::stoi(argv[i]);
|
params.n_batch = std::stoi(argv[i]);
|
||||||
params.n_batch = std::min(512, params.n_batch);
|
|
||||||
} else if (arg == "--keep") {
|
} else if (arg == "--keep") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
@ -400,11 +387,11 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
|||||||
#else
|
#else
|
||||||
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n");
|
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n");
|
||||||
#endif // GGML_USE_CUBLAS
|
#endif // GGML_USE_CUBLAS
|
||||||
} else if (arg == "--mul-mat-q" || arg == "-mmq") {
|
} else if (arg == "--no-mul-mat-q" || arg == "-nommq") {
|
||||||
#ifdef GGML_USE_CUBLAS
|
#ifdef GGML_USE_CUBLAS
|
||||||
params.mul_mat_q = true;
|
params.mul_mat_q = false;
|
||||||
#else
|
#else
|
||||||
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to use mul_mat_q kernels.\n");
|
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. Disabling mul_mat_q kernels has no effect.\n");
|
||||||
#endif // GGML_USE_CUBLAS
|
#endif // GGML_USE_CUBLAS
|
||||||
} else if (arg == "--low-vram" || arg == "-lv") {
|
} else if (arg == "--low-vram" || arg == "-lv") {
|
||||||
#ifdef GGML_USE_CUBLAS
|
#ifdef GGML_USE_CUBLAS
|
||||||
@ -430,6 +417,18 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
|||||||
params.antiprompt.push_back(argv[i]);
|
params.antiprompt.push_back(argv[i]);
|
||||||
} else if (arg == "--perplexity") {
|
} else if (arg == "--perplexity") {
|
||||||
params.perplexity = true;
|
params.perplexity = true;
|
||||||
|
} else if (arg == "--ppl-stride") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
params.ppl_stride = std::stoi(argv[i]);
|
||||||
|
} else if (arg == "--ppl-output-type") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
params.ppl_output_type = std::stoi(argv[i]);
|
||||||
} else if (arg == "--hellaswag") {
|
} else if (arg == "--hellaswag") {
|
||||||
params.hellaswag = true;
|
params.hellaswag = true;
|
||||||
} else if (arg == "--hellaswag-tasks") {
|
} else if (arg == "--hellaswag-tasks") {
|
||||||
@ -439,7 +438,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
|||||||
}
|
}
|
||||||
params.hellaswag_tasks = std::stoi(argv[i]);
|
params.hellaswag_tasks = std::stoi(argv[i]);
|
||||||
} else if (arg == "--ignore-eos") {
|
} else if (arg == "--ignore-eos") {
|
||||||
params.logit_bias[llama_token_eos()] = -INFINITY;
|
params.ignore_eos = true;
|
||||||
} else if (arg == "--no-penalize-nl") {
|
} else if (arg == "--no-penalize-nl") {
|
||||||
params.penalize_nl = false;
|
params.penalize_nl = false;
|
||||||
} else if (arg == "-l" || arg == "--logit-bias") {
|
} else if (arg == "-l" || arg == "--logit-bias") {
|
||||||
@ -561,8 +560,6 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
|||||||
fprintf(stdout, " -n N, --n-predict N number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)\n", params.n_predict);
|
fprintf(stdout, " -n N, --n-predict N number of tokens to predict (default: %d, -1 = infinity, -2 = until context filled)\n", params.n_predict);
|
||||||
fprintf(stdout, " -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx);
|
fprintf(stdout, " -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx);
|
||||||
fprintf(stdout, " -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
|
fprintf(stdout, " -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
|
||||||
fprintf(stdout, " -gqa N, --gqa N grouped-query attention factor (TEMP!!! use 8 for LLaMAv2 70B) (default: %d)\n", params.n_gqa);
|
|
||||||
fprintf(stdout, " -eps N, --rms-norm-eps N rms norm eps (TEMP!!! use 1e-5 for LLaMAv2) (default: %.1e)\n", params.rms_norm_eps);
|
|
||||||
fprintf(stdout, " --top-k N top-k sampling (default: %d, 0 = disabled)\n", params.top_k);
|
fprintf(stdout, " --top-k N top-k sampling (default: %d, 0 = disabled)\n", params.top_k);
|
||||||
fprintf(stdout, " --top-p N top-p sampling (default: %.1f, 1.0 = disabled)\n", (double)params.top_p);
|
fprintf(stdout, " --top-p N top-p sampling (default: %.1f, 1.0 = disabled)\n", (double)params.top_p);
|
||||||
fprintf(stdout, " --tfs N tail free sampling, parameter z (default: %.1f, 1.0 = disabled)\n", (double)params.tfs_z);
|
fprintf(stdout, " --tfs N tail free sampling, parameter z (default: %.1f, 1.0 = disabled)\n", (double)params.tfs_z);
|
||||||
@ -614,11 +611,11 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
|||||||
fprintf(stdout, " number of layers to store in VRAM\n");
|
fprintf(stdout, " number of layers to store in VRAM\n");
|
||||||
fprintf(stdout, " -ts SPLIT --tensor-split SPLIT\n");
|
fprintf(stdout, " -ts SPLIT --tensor-split SPLIT\n");
|
||||||
fprintf(stdout, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
|
fprintf(stdout, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
|
||||||
fprintf(stdout, " -mg i, --main-gpu i the GPU to use for scratch and small tensors\n" );
|
fprintf(stdout, " -mg i, --main-gpu i the GPU to use for scratch and small tensors\n");
|
||||||
fprintf(stdout, " -lv, --low-vram don't allocate VRAM scratch buffer\n" );
|
fprintf(stdout, " -lv, --low-vram don't allocate VRAM scratch buffer\n");
|
||||||
fprintf(stdout, " -mmq, --mul-mat-q use experimental mul_mat_q CUDA kernels instead of cuBLAS. TEMP!!!\n" );
|
fprintf(stdout, " -nommq, --no-mul-mat-q\n");
|
||||||
fprintf(stdout, " Reduces VRAM usage by 700/970/1430 MiB for 7b/13b/33b but prompt processing speed\n" );
|
fprintf(stdout, " use cuBLAS instead of custom mul_mat_q CUDA kernels.\n");
|
||||||
fprintf(stdout, " is still suboptimal, especially q2_K, q3_K, q5_K, and q6_K.\n" );
|
fprintf(stdout, " Not recommended since this is both slower and uses more VRAM.\n");
|
||||||
#endif
|
#endif
|
||||||
fprintf(stdout, " --mtest compute maximum memory usage\n");
|
fprintf(stdout, " --mtest compute maximum memory usage\n");
|
||||||
fprintf(stdout, " --export export the computation graph to 'llama.ggml'\n");
|
fprintf(stdout, " --export export the computation graph to 'llama.ggml'\n");
|
||||||
@ -650,24 +647,15 @@ std::string gpt_random_prompt(std::mt19937 & rng) {
|
|||||||
return "The";
|
return "The";
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: not great allocating this every time
|
//
|
||||||
std::vector<llama_token> llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos) {
|
// Model utils
|
||||||
// initialize to prompt numer of chars, since n_tokens <= n_prompt_chars
|
//
|
||||||
std::vector<llama_token> res(text.size() + (int) add_bos);
|
|
||||||
const int n = llama_tokenize(ctx, text.c_str(), res.data(), res.size(), add_bos);
|
|
||||||
assert(n >= 0);
|
|
||||||
res.resize(n);
|
|
||||||
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params) {
|
struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params) {
|
||||||
auto lparams = llama_context_default_params();
|
auto lparams = llama_context_default_params();
|
||||||
|
|
||||||
lparams.n_ctx = params.n_ctx;
|
lparams.n_ctx = params.n_ctx;
|
||||||
lparams.n_batch = params.n_batch;
|
lparams.n_batch = params.n_batch;
|
||||||
lparams.n_gqa = params.n_gqa;
|
|
||||||
lparams.rms_norm_eps = params.rms_norm_eps;
|
|
||||||
lparams.n_gpu_layers = params.n_gpu_layers;
|
lparams.n_gpu_layers = params.n_gpu_layers;
|
||||||
lparams.main_gpu = params.main_gpu;
|
lparams.main_gpu = params.main_gpu;
|
||||||
lparams.tensor_split = params.tensor_split;
|
lparams.tensor_split = params.tensor_split;
|
||||||
@ -685,7 +673,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
|
|||||||
return lparams;
|
return lparams;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_params(const gpt_params & params) {
|
std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_params(gpt_params & params) {
|
||||||
auto lparams = llama_context_params_from_gpt_params(params);
|
auto lparams = llama_context_params_from_gpt_params(params);
|
||||||
|
|
||||||
llama_model * model = llama_load_model_from_file(params.model.c_str(), lparams);
|
llama_model * model = llama_load_model_from_file(params.model.c_str(), lparams);
|
||||||
@ -714,5 +702,77 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (params.ignore_eos) {
|
||||||
|
params.logit_bias[llama_token_eos(lctx)] = -INFINITY;
|
||||||
|
}
|
||||||
|
|
||||||
return std::make_tuple(model, lctx);
|
return std::make_tuple(model, lctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Vocab utils
|
||||||
|
//
|
||||||
|
|
||||||
|
std::vector<llama_token> llama_tokenize(
|
||||||
|
struct llama_context * ctx,
|
||||||
|
const std::string & text,
|
||||||
|
bool add_bos) {
|
||||||
|
// upper limit for the number of tokens
|
||||||
|
int n_tokens = text.length() + add_bos;
|
||||||
|
std::vector<llama_token> result(n_tokens);
|
||||||
|
n_tokens = llama_tokenize(ctx, text.c_str(), result.data(), result.size(), add_bos);
|
||||||
|
if (n_tokens < 0) {
|
||||||
|
result.resize(-n_tokens);
|
||||||
|
int check = llama_tokenize(ctx, text.c_str(), result.data(), result.size(), add_bos);
|
||||||
|
GGML_ASSERT(check == -n_tokens);
|
||||||
|
} else {
|
||||||
|
result.resize(n_tokens);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string llama_token_to_str(const struct llama_context * ctx, llama_token token) {
|
||||||
|
std::vector<char> result(8, 0);
|
||||||
|
const int n_tokens = llama_token_to_str(ctx, token, result.data(), result.size());
|
||||||
|
if (n_tokens < 0) {
|
||||||
|
result.resize(-n_tokens);
|
||||||
|
int check = llama_token_to_str(ctx, token, result.data(), result.size());
|
||||||
|
GGML_ASSERT(check == -n_tokens);
|
||||||
|
} else {
|
||||||
|
result.resize(n_tokens);
|
||||||
|
}
|
||||||
|
|
||||||
|
return std::string(result.data(), result.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<llama_token> llama_tokenize_bpe(
|
||||||
|
struct llama_context * ctx,
|
||||||
|
const std::string & text,
|
||||||
|
bool add_bos) {
|
||||||
|
int n_tokens = text.length() + add_bos;
|
||||||
|
std::vector<llama_token> result(n_tokens);
|
||||||
|
n_tokens = llama_tokenize_bpe(ctx, text.c_str(), result.data(), result.size(), add_bos);
|
||||||
|
if (n_tokens < 0) {
|
||||||
|
result.resize(-n_tokens);
|
||||||
|
int check = llama_tokenize_bpe(ctx, text.c_str(), result.data(), result.size(), add_bos);
|
||||||
|
GGML_ASSERT(check == -n_tokens);
|
||||||
|
} else {
|
||||||
|
result.resize(n_tokens);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string llama_token_to_str_bpe(const struct llama_context * ctx, llama_token token) {
|
||||||
|
std::vector<char> result(8, 0);
|
||||||
|
const int n_tokens = llama_token_to_str_bpe(ctx, token, result.data(), result.size());
|
||||||
|
if (n_tokens < 0) {
|
||||||
|
result.resize(-n_tokens);
|
||||||
|
const int check = llama_token_to_str_bpe(ctx, token, result.data(), result.size());
|
||||||
|
GGML_ASSERT(check == -n_tokens);
|
||||||
|
} else {
|
||||||
|
result.resize(n_tokens);
|
||||||
|
}
|
||||||
|
|
||||||
|
return std::string(result.data(), result.size());
|
||||||
|
}
|
||||||
|
|
@ -22,19 +22,16 @@ struct gpt_params {
|
|||||||
int32_t n_predict = -1; // new tokens to predict
|
int32_t n_predict = -1; // new tokens to predict
|
||||||
int32_t n_ctx = 512; // context size
|
int32_t n_ctx = 512; // context size
|
||||||
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
|
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
|
||||||
int32_t n_gqa = 1; // grouped-query attention factor (TODO: move to hparams)
|
|
||||||
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
int32_t n_keep = 0; // number of tokens to keep from initial prompt
|
||||||
int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
|
int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited)
|
||||||
int32_t n_gpu_layers = 0; // number of layers to store in VRAM
|
int32_t n_gpu_layers = 0; // number of layers to store in VRAM
|
||||||
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
|
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
|
||||||
float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
|
float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
|
||||||
int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens.
|
int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens.
|
||||||
float rms_norm_eps = LLAMA_DEFAULT_RMS_EPS; // rms norm epsilon
|
|
||||||
float rope_freq_base = 10000.0f; // RoPE base frequency
|
float rope_freq_base = 10000.0f; // RoPE base frequency
|
||||||
float rope_freq_scale = 1.0f; // RoPE frequency scaling factor
|
float rope_freq_scale = 1.0f; // RoPE frequency scaling factor
|
||||||
|
|
||||||
// sampling parameters
|
// sampling parameters
|
||||||
std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens
|
|
||||||
int32_t top_k = 40; // <= 0 to use vocab size
|
int32_t top_k = 40; // <= 0 to use vocab size
|
||||||
float top_p = 0.95f; // 1.0 = disabled
|
float top_p = 0.95f; // 1.0 = disabled
|
||||||
float tfs_z = 1.00f; // 1.0 = disabled
|
float tfs_z = 1.00f; // 1.0 = disabled
|
||||||
@ -48,12 +45,14 @@ struct gpt_params {
|
|||||||
float mirostat_tau = 5.00f; // target entropy
|
float mirostat_tau = 5.00f; // target entropy
|
||||||
float mirostat_eta = 0.10f; // learning rate
|
float mirostat_eta = 0.10f; // learning rate
|
||||||
|
|
||||||
|
std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens
|
||||||
|
|
||||||
// Classifier-Free Guidance
|
// Classifier-Free Guidance
|
||||||
// https://arxiv.org/abs/2306.17806
|
// https://arxiv.org/abs/2306.17806
|
||||||
std::string cfg_negative_prompt; // string to help guidance
|
std::string cfg_negative_prompt; // string to help guidance
|
||||||
float cfg_scale = 1.f; // How strong is guidance
|
float cfg_scale = 1.f; // How strong is guidance
|
||||||
|
|
||||||
std::string model = "models/7B/ggml-model.bin"; // model path
|
std::string model = "models/7B/ggml-model-f16.gguf"; // model path
|
||||||
std::string model_alias = "unknown"; // model alias
|
std::string model_alias = "unknown"; // model alias
|
||||||
std::string prompt = "";
|
std::string prompt = "";
|
||||||
std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
|
std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
|
||||||
@ -65,11 +64,15 @@ struct gpt_params {
|
|||||||
std::string lora_adapter = ""; // lora adapter path
|
std::string lora_adapter = ""; // lora adapter path
|
||||||
std::string lora_base = ""; // base model path for the lora adapter
|
std::string lora_base = ""; // base model path for the lora adapter
|
||||||
|
|
||||||
|
int ppl_stride = 0; // stride for perplexity calculations. If left at 0, the pre-existing approach will be used.
|
||||||
|
int ppl_output_type = 0; // = 0 -> ppl output is as usual, = 1 -> ppl output is num_tokens, ppl, one per line
|
||||||
|
// (which is more convenient to use for plotting)
|
||||||
|
//
|
||||||
bool hellaswag = false; // compute HellaSwag score over random tasks from datafile supplied in prompt
|
bool hellaswag = false; // compute HellaSwag score over random tasks from datafile supplied in prompt
|
||||||
size_t hellaswag_tasks = 400; // number of tasks to use when computing the HellaSwag score
|
size_t hellaswag_tasks = 400; // number of tasks to use when computing the HellaSwag score
|
||||||
|
|
||||||
bool low_vram = false; // if true, reduce VRAM usage at the cost of performance
|
bool low_vram = false; // if true, reduce VRAM usage at the cost of performance
|
||||||
bool mul_mat_q = false; // if true, use experimental mul_mat_q kernels
|
bool mul_mat_q = true; // if true, use mul_mat_q kernels instead of cuBLAS
|
||||||
bool memory_f16 = true; // use f16 instead of f32 for memory kv
|
bool memory_f16 = true; // use f16 instead of f32 for memory kv
|
||||||
bool random_prompt = false; // do not randomize prompt if none provided
|
bool random_prompt = false; // do not randomize prompt if none provided
|
||||||
bool use_color = false; // use color to distinguish generations and inputs
|
bool use_color = false; // use color to distinguish generations and inputs
|
||||||
@ -83,6 +86,7 @@ struct gpt_params {
|
|||||||
bool simple_io = false; // improves compatibility with subprocesses and limited consoles
|
bool simple_io = false; // improves compatibility with subprocesses and limited consoles
|
||||||
|
|
||||||
bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix
|
bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix
|
||||||
|
bool ignore_eos = false; // ignore generated EOS tokens
|
||||||
bool instruct = false; // instruction mode (used for Alpaca models)
|
bool instruct = false; // instruction mode (used for Alpaca models)
|
||||||
bool penalize_nl = true; // consider newlines as a repeatable token
|
bool penalize_nl = true; // consider newlines as a repeatable token
|
||||||
bool perplexity = false; // compute perplexity over the prompt
|
bool perplexity = false; // compute perplexity over the prompt
|
||||||
@ -100,15 +104,31 @@ void gpt_print_usage(int argc, char ** argv, const gpt_params & params);
|
|||||||
|
|
||||||
std::string gpt_random_prompt(std::mt19937 & rng);
|
std::string gpt_random_prompt(std::mt19937 & rng);
|
||||||
|
|
||||||
//
|
|
||||||
// Vocab utils
|
|
||||||
//
|
|
||||||
|
|
||||||
std::vector<llama_token> llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos);
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// Model utils
|
// Model utils
|
||||||
//
|
//
|
||||||
|
|
||||||
std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_params(const gpt_params & params);
|
std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_params(gpt_params & params);
|
||||||
struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params);
|
struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params);
|
||||||
|
|
||||||
|
//
|
||||||
|
// Vocab utils
|
||||||
|
//
|
||||||
|
|
||||||
|
std::vector<llama_token> llama_tokenize(
|
||||||
|
struct llama_context * ctx,
|
||||||
|
const std::string & text,
|
||||||
|
bool add_bos);
|
||||||
|
|
||||||
|
std::vector<llama_token> llama_tokenize_bpe(
|
||||||
|
struct llama_context * ctx,
|
||||||
|
const std::string & text,
|
||||||
|
bool add_bos);
|
||||||
|
|
||||||
|
std::string llama_token_to_str(
|
||||||
|
const struct llama_context * ctx,
|
||||||
|
llama_token token);
|
||||||
|
|
||||||
|
std::string llama_token_to_str_bpe(
|
||||||
|
const struct llama_context * ctx,
|
||||||
|
llama_token token);
|
283
convert-falcon-hf-to-gguf.py
Executable file
283
convert-falcon-hf-to-gguf.py
Executable file
@ -0,0 +1,283 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# HF falcon--> gguf conversion
|
||||||
|
|
||||||
|
import gguf
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import struct
|
||||||
|
import json
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from typing import Any, List
|
||||||
|
from pathlib import Path
|
||||||
|
from transformers import AutoTokenizer
|
||||||
|
|
||||||
|
def bytes_to_unicode():
|
||||||
|
# ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py
|
||||||
|
"""
|
||||||
|
Returns list of utf-8 byte and a corresponding list of unicode strings.
|
||||||
|
The reversible bpe codes work on unicode strings.
|
||||||
|
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
|
||||||
|
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
|
||||||
|
This is a significant percentage of your normal, say, 32K bpe vocab.
|
||||||
|
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
|
||||||
|
And avoids mapping to whitespace/control characters the bpe code barfs on.
|
||||||
|
"""
|
||||||
|
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
|
||||||
|
cs = bs[:]
|
||||||
|
n = 0
|
||||||
|
for b in range(2**8):
|
||||||
|
if b not in bs:
|
||||||
|
bs.append(b)
|
||||||
|
cs.append(2**8+n)
|
||||||
|
n += 1
|
||||||
|
cs = [chr(n) for n in cs]
|
||||||
|
return dict(zip(bs, cs))
|
||||||
|
|
||||||
|
|
||||||
|
def count_model_parts(dir_model: str) -> int:
|
||||||
|
num_parts = 0
|
||||||
|
for filename in os.listdir(dir_model):
|
||||||
|
if filename.startswith("pytorch_model-"):
|
||||||
|
num_parts += 1
|
||||||
|
|
||||||
|
if num_parts > 0:
|
||||||
|
print("gguf: found " + str(num_parts) + " model parts")
|
||||||
|
return num_parts
|
||||||
|
|
||||||
|
|
||||||
|
if len(sys.argv) < 3:
|
||||||
|
print("Usage: convert-h5-to-ggml.py dir-model ftype\n")
|
||||||
|
print(" ftype == 0 -> float32")
|
||||||
|
print(" ftype == 1 -> float16")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
# output in the same directory as the model
|
||||||
|
dir_model = sys.argv[1]
|
||||||
|
last_dir = os.path.basename(os.path.normpath(dir_model))
|
||||||
|
|
||||||
|
# possible tensor data types
|
||||||
|
# ftype == 0 -> float32
|
||||||
|
# ftype == 1 -> float16
|
||||||
|
|
||||||
|
# map from ftype to string
|
||||||
|
ftype_str = ["f32", "f16"]
|
||||||
|
|
||||||
|
ftype = 1
|
||||||
|
if len(sys.argv) > 2:
|
||||||
|
ftype = int(sys.argv[2])
|
||||||
|
if ftype < 0 or ftype > 1:
|
||||||
|
print("Invalid ftype: " + str(ftype))
|
||||||
|
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf"
|
||||||
|
|
||||||
|
print("gguf: loading model "+last_dir)
|
||||||
|
|
||||||
|
with open(dir_model + "/config.json", "r", encoding="utf-8") as f:
|
||||||
|
hparams = json.load(f)
|
||||||
|
|
||||||
|
if hparams["architectures"][0] != "RWForCausalLM":
|
||||||
|
print("Model architecture not supported: " + hparams["architectures"][0])
|
||||||
|
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
# get number of model parts
|
||||||
|
num_parts = count_model_parts(dir_model)
|
||||||
|
|
||||||
|
ARCH=gguf.MODEL_ARCH.FALCON
|
||||||
|
gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH])
|
||||||
|
|
||||||
|
print("gguf: get model metadata")
|
||||||
|
|
||||||
|
block_count = hparams["n_layer"]
|
||||||
|
|
||||||
|
gguf_writer.add_name(last_dir)
|
||||||
|
gguf_writer.add_context_length(2048) # not in config.json
|
||||||
|
gguf_writer.add_tensor_data_layout("jploski") # qkv tensor transform
|
||||||
|
gguf_writer.add_embedding_length(hparams["hidden_size"])
|
||||||
|
gguf_writer.add_feed_forward_length(4 * hparams["hidden_size"])
|
||||||
|
gguf_writer.add_block_count(block_count)
|
||||||
|
gguf_writer.add_head_count(hparams["n_head"])
|
||||||
|
if "n_head_kv" in hparams: gguf_writer.add_head_count_kv(hparams["n_head_kv"])
|
||||||
|
gguf_writer.add_layer_norm_eps(hparams["layer_norm_epsilon"])
|
||||||
|
|
||||||
|
# TOKENIZATION
|
||||||
|
|
||||||
|
print("gguf: get tokenizer metadata")
|
||||||
|
|
||||||
|
tokens: List[str] = []
|
||||||
|
merges: List[str] = []
|
||||||
|
|
||||||
|
|
||||||
|
if Path(dir_model + "/tokenizer.json").is_file():
|
||||||
|
# gpt2 tokenizer
|
||||||
|
gguf_writer.add_tokenizer_model("gpt2")
|
||||||
|
|
||||||
|
print("gguf: get gpt2 tokenizer merges")
|
||||||
|
|
||||||
|
with open(dir_model + "/tokenizer.json", "r", encoding="utf-8") as f:
|
||||||
|
tokenizer_json = json.load(f)
|
||||||
|
merges = tokenizer_json["model"]["merges"]
|
||||||
|
|
||||||
|
gguf_writer.add_token_merges(merges)
|
||||||
|
|
||||||
|
print("gguf: get gpt2 tokenizer vocab")
|
||||||
|
|
||||||
|
vocab_size = len(tokenizer_json["model"]["vocab"])
|
||||||
|
|
||||||
|
# ref: https://github.com/cmp-nct/ggllm.cpp/blob/master/falcon_convert.py
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(dir_model)
|
||||||
|
|
||||||
|
reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()}
|
||||||
|
byte_encoder = bytes_to_unicode()
|
||||||
|
byte_decoder = {v: k for k, v in byte_encoder.items()}
|
||||||
|
|
||||||
|
for i in range(vocab_size):
|
||||||
|
if i in reverse_vocab:
|
||||||
|
try:
|
||||||
|
text = bytearray([byte_decoder[c] for c in reverse_vocab[i]])
|
||||||
|
except KeyError:
|
||||||
|
text = bytearray()
|
||||||
|
for c in reverse_vocab[i]:
|
||||||
|
if ord(c) < 256: # single byte character
|
||||||
|
text.append(byte_decoder[ord(c)])
|
||||||
|
else: # multibyte special token character
|
||||||
|
text.extend(c.encode('utf-8'))
|
||||||
|
else:
|
||||||
|
print(f"Key {i} not in tokenizer vocabulary. Padding with an arbitrary token.")
|
||||||
|
pad_token = f"[PAD{i}]".encode("utf8")
|
||||||
|
text = bytearray(pad_token)
|
||||||
|
|
||||||
|
tokens.append(text)
|
||||||
|
|
||||||
|
gguf_writer.add_token_list(tokens)
|
||||||
|
|
||||||
|
if "added_tokens" in tokenizer_json and Path(dir_model + "/tokenizer_config.json").is_file():
|
||||||
|
print("gguf: get special token ids")
|
||||||
|
|
||||||
|
with open(dir_model + "/tokenizer_config.json", "r", encoding="utf-8") as f:
|
||||||
|
tokenizer_config = json.load(f)
|
||||||
|
|
||||||
|
# find special token ids
|
||||||
|
|
||||||
|
if "bos_token" in tokenizer_config:
|
||||||
|
for key in tokenizer_json["added_tokens"]:
|
||||||
|
if key["content"] == tokenizer_config["bos_token"]:
|
||||||
|
gguf_writer.add_bos_token_id(key["id"])
|
||||||
|
|
||||||
|
if "eos_token" in tokenizer_config:
|
||||||
|
for key in tokenizer_json["added_tokens"]:
|
||||||
|
if key["content"] == tokenizer_config["eos_token"]:
|
||||||
|
gguf_writer.add_eos_token_id(key["id"])
|
||||||
|
|
||||||
|
if "unk_token" in tokenizer_config:
|
||||||
|
for key in tokenizer_json["added_tokens"]:
|
||||||
|
if key["content"] == tokenizer_config["unk_token"]:
|
||||||
|
gguf_writer.add_unk_token_id(key["id"])
|
||||||
|
|
||||||
|
if "sep_token" in tokenizer_config:
|
||||||
|
for key in tokenizer_json["added_tokens"]:
|
||||||
|
if key["content"] == tokenizer_config["sep_token"]:
|
||||||
|
gguf_writer.add_sep_token_id(key["id"])
|
||||||
|
|
||||||
|
if "pad_token" in tokenizer_config:
|
||||||
|
for key in tokenizer_json["added_tokens"]:
|
||||||
|
if key["content"] == tokenizer_config["pad_token"]:
|
||||||
|
gguf_writer.add_pad_token_id(key["id"])
|
||||||
|
|
||||||
|
|
||||||
|
# TENSORS
|
||||||
|
|
||||||
|
tensor_map = gguf.get_tensor_name_map(ARCH,block_count)
|
||||||
|
|
||||||
|
# params for qkv transform
|
||||||
|
n_head = hparams["n_head"]
|
||||||
|
n_head_kv = hparams["n_head_kv"] if "n_head_kv" in hparams else 1
|
||||||
|
head_dim = hparams["hidden_size"] // n_head
|
||||||
|
|
||||||
|
# tensor info
|
||||||
|
print("gguf: get tensor metadata")
|
||||||
|
|
||||||
|
if num_parts == 0:
|
||||||
|
part_names = ("pytorch_model.bin",)
|
||||||
|
else:
|
||||||
|
part_names = (
|
||||||
|
f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1)
|
||||||
|
)
|
||||||
|
|
||||||
|
for part_name in part_names:
|
||||||
|
print("gguf: loading model part '" + part_name + "'")
|
||||||
|
model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu")
|
||||||
|
|
||||||
|
for name in model_part.keys():
|
||||||
|
data = model_part[name]
|
||||||
|
|
||||||
|
old_dtype = data.dtype
|
||||||
|
|
||||||
|
# convert any unsupported data types to float32
|
||||||
|
if data.dtype != torch.float16 and data.dtype != torch.float32:
|
||||||
|
data = data.to(torch.float32)
|
||||||
|
|
||||||
|
# QKV tensor transform
|
||||||
|
# The original query_key_value tensor contains n_head_kv "kv groups",
|
||||||
|
# each consisting of n_head/n_head_kv query weights followed by one key
|
||||||
|
# and one value weight (shared by all query heads in the kv group).
|
||||||
|
# This layout makes it a big pain to work with in GGML.
|
||||||
|
# So we rearrange them here,, so that we have n_head query weights
|
||||||
|
# followed by n_head_kv key weights followed by n_head_kv value weights,
|
||||||
|
# in contiguous fashion.
|
||||||
|
# ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py
|
||||||
|
|
||||||
|
if "query_key_value" in name:
|
||||||
|
qkv = data.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head)
|
||||||
|
q = qkv[:, :-2 ].reshape(n_head * head_dim, head_dim * n_head)
|
||||||
|
k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head)
|
||||||
|
v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head)
|
||||||
|
data = torch.cat((q,k,v)).reshape_as(data)
|
||||||
|
|
||||||
|
data = data.squeeze().numpy()
|
||||||
|
|
||||||
|
# map tensor names
|
||||||
|
if name.endswith(".weight") and name[:-7] in tensor_map:
|
||||||
|
name = tensor_map[name[:-7]] + ".weight"
|
||||||
|
elif name.endswith(".bias") and name[:-5] in tensor_map:
|
||||||
|
name = tensor_map[name[:-5]] + ".bias"
|
||||||
|
else:
|
||||||
|
print("Can not map tensor '" + name + "'")
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
n_dims = len(data.shape)
|
||||||
|
data_dtype = data.dtype
|
||||||
|
|
||||||
|
# if f32 desired, convert any float16 to float32
|
||||||
|
if ftype == 0 and data_dtype == np.float16:
|
||||||
|
data = data.astype(np.float32)
|
||||||
|
|
||||||
|
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
|
||||||
|
if ftype == 1 and data_dtype == np.float16 and n_dims == 1:
|
||||||
|
data = data.astype(np.float32)
|
||||||
|
|
||||||
|
# if f16 desired, convert any float32 2-dim weight tensors to float16
|
||||||
|
if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
||||||
|
data = data.astype(np.float16)
|
||||||
|
|
||||||
|
print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype))
|
||||||
|
|
||||||
|
gguf_writer.add_tensor(name, data)
|
||||||
|
|
||||||
|
|
||||||
|
print("gguf: write header")
|
||||||
|
gguf_writer.write_header_to_file()
|
||||||
|
print("gguf: write metadata")
|
||||||
|
gguf_writer.write_kv_data_to_file()
|
||||||
|
print("gguf: write tensors")
|
||||||
|
gguf_writer.write_tensors_to_file()
|
||||||
|
|
||||||
|
gguf_writer.close()
|
||||||
|
|
||||||
|
print("gguf: model successfully exported to '" + fname_out + "'")
|
||||||
|
print("")
|
267
convert-gptneox-hf-to-gguf.py
Executable file
267
convert-gptneox-hf-to-gguf.py
Executable file
@ -0,0 +1,267 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# HF gptneox--> gguf conversion
|
||||||
|
|
||||||
|
import gguf
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import struct
|
||||||
|
import json
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from typing import Any, List
|
||||||
|
from pathlib import Path
|
||||||
|
from transformers import AutoTokenizer
|
||||||
|
|
||||||
|
# ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py
|
||||||
|
|
||||||
|
|
||||||
|
def bytes_to_unicode():
|
||||||
|
"""
|
||||||
|
Returns list of utf-8 byte and a corresponding list of unicode strings.
|
||||||
|
The reversible bpe codes work on unicode strings.
|
||||||
|
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
|
||||||
|
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
|
||||||
|
This is a significant percentage of your normal, say, 32K bpe vocab.
|
||||||
|
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
|
||||||
|
And avoids mapping to whitespace/control characters the bpe code barfs on.
|
||||||
|
"""
|
||||||
|
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
|
||||||
|
cs = bs[:]
|
||||||
|
n = 0
|
||||||
|
for b in range(2**8):
|
||||||
|
if b not in bs:
|
||||||
|
bs.append(b)
|
||||||
|
cs.append(2**8+n)
|
||||||
|
n += 1
|
||||||
|
cs = [chr(n) for n in cs]
|
||||||
|
return dict(zip(bs, cs))
|
||||||
|
|
||||||
|
|
||||||
|
def count_model_parts(dir_model: str) -> int:
|
||||||
|
num_parts = 0
|
||||||
|
for filename in os.listdir(dir_model):
|
||||||
|
if filename.startswith("pytorch_model-"):
|
||||||
|
num_parts += 1
|
||||||
|
|
||||||
|
if num_parts > 0:
|
||||||
|
print("gguf: found " + str(num_parts) + " model parts")
|
||||||
|
return num_parts
|
||||||
|
|
||||||
|
|
||||||
|
if len(sys.argv) < 3:
|
||||||
|
print("Usage: convert-h5-to-ggml.py dir-model ftype\n")
|
||||||
|
print(" ftype == 0 -> float32")
|
||||||
|
print(" ftype == 1 -> float16")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
# output in the same directory as the model
|
||||||
|
dir_model = sys.argv[1]
|
||||||
|
last_dir = os.path.basename(os.path.normpath(dir_model))
|
||||||
|
|
||||||
|
# possible tensor data types
|
||||||
|
# ftype == 0 -> float32
|
||||||
|
# ftype == 1 -> float16
|
||||||
|
|
||||||
|
# map from ftype to string
|
||||||
|
ftype_str = ["f32", "f16"]
|
||||||
|
|
||||||
|
ftype = 1
|
||||||
|
if len(sys.argv) > 2:
|
||||||
|
ftype = int(sys.argv[2])
|
||||||
|
if ftype < 0 or ftype > 1:
|
||||||
|
print("Invalid ftype: " + str(ftype))
|
||||||
|
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf"
|
||||||
|
|
||||||
|
print("gguf: loading model "+last_dir)
|
||||||
|
|
||||||
|
with open(dir_model + "/config.json", "r", encoding="utf-8") as f:
|
||||||
|
hparams = json.load(f)
|
||||||
|
|
||||||
|
if hparams["architectures"][0] != "GPTNeoXForCausalLM":
|
||||||
|
print("Model architecture not supported: " + hparams["architectures"][0])
|
||||||
|
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
# get number of model parts
|
||||||
|
num_parts = count_model_parts(dir_model)
|
||||||
|
|
||||||
|
ARCH=gguf.MODEL_ARCH.GPTNEOX
|
||||||
|
gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH])
|
||||||
|
|
||||||
|
print("gguf: get model metadata")
|
||||||
|
|
||||||
|
block_count = hparams["num_hidden_layers"]
|
||||||
|
|
||||||
|
gguf_writer.add_name(last_dir)
|
||||||
|
gguf_writer.add_context_length(hparams["max_position_embeddings"])
|
||||||
|
gguf_writer.add_embedding_length(hparams["hidden_size"])
|
||||||
|
gguf_writer.add_block_count(block_count)
|
||||||
|
gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
|
||||||
|
gguf_writer.add_rope_dimension_count(int(hparams["rotary_pct"]*(hparams["hidden_size"]//hparams["num_attention_heads"])))
|
||||||
|
gguf_writer.add_head_count(hparams["num_attention_heads"])
|
||||||
|
gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True)
|
||||||
|
gguf_writer.add_layer_norm_eps(hparams["layer_norm_eps"])
|
||||||
|
|
||||||
|
# TOKENIZATION
|
||||||
|
|
||||||
|
print("gguf: get tokenizer metadata")
|
||||||
|
|
||||||
|
tokens: List[str] = []
|
||||||
|
merges: List[str] = []
|
||||||
|
|
||||||
|
|
||||||
|
if Path(dir_model + "/tokenizer.json").is_file():
|
||||||
|
# gpt2 tokenizer
|
||||||
|
gguf_writer.add_tokenizer_model("gpt2")
|
||||||
|
|
||||||
|
print("gguf: get gpt2 tokenizer merges")
|
||||||
|
|
||||||
|
with open(dir_model + "/tokenizer.json", "r", encoding="utf-8") as f:
|
||||||
|
tokenizer_json = json.load(f)
|
||||||
|
merges = tokenizer_json["model"]["merges"]
|
||||||
|
|
||||||
|
gguf_writer.add_token_merges(merges)
|
||||||
|
|
||||||
|
print("gguf: get gpt2 tokenizer vocab")
|
||||||
|
|
||||||
|
vocab_size = len(tokenizer_json["model"]["vocab"])
|
||||||
|
|
||||||
|
# ref: https://github.com/cmp-nct/ggllm.cpp/blob/master/falcon_convert.py
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(dir_model)
|
||||||
|
|
||||||
|
reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()}
|
||||||
|
byte_encoder = bytes_to_unicode()
|
||||||
|
byte_decoder = {v: k for k, v in byte_encoder.items()}
|
||||||
|
|
||||||
|
for i in range(vocab_size):
|
||||||
|
if i in reverse_vocab:
|
||||||
|
try:
|
||||||
|
text = bytearray([byte_decoder[c] for c in reverse_vocab[i]])
|
||||||
|
except KeyError:
|
||||||
|
text = bytearray()
|
||||||
|
for c in reverse_vocab[i]:
|
||||||
|
if ord(c) < 256: # single byte character
|
||||||
|
text.append(byte_decoder[ord(c)])
|
||||||
|
else: # multibyte special token character
|
||||||
|
text.extend(c.encode('utf-8'))
|
||||||
|
else:
|
||||||
|
print(f"Key {i} not in tokenizer vocabulary. Padding with an arbitrary token.")
|
||||||
|
pad_token = f"[PAD{i}]".encode("utf8")
|
||||||
|
text = bytearray(pad_token)
|
||||||
|
|
||||||
|
tokens.append(text)
|
||||||
|
|
||||||
|
gguf_writer.add_token_list(tokens)
|
||||||
|
|
||||||
|
if "added_tokens" in tokenizer_json and Path(dir_model + "/tokenizer_config.json").is_file():
|
||||||
|
print("gguf: get special token ids")
|
||||||
|
|
||||||
|
with open(dir_model + "/tokenizer_config.json", "r", encoding="utf-8") as f:
|
||||||
|
tokenizer_config = json.load(f)
|
||||||
|
|
||||||
|
# find special token ids
|
||||||
|
|
||||||
|
if "bos_token" in tokenizer_config:
|
||||||
|
for key in tokenizer_json["added_tokens"]:
|
||||||
|
if key["content"] == tokenizer_config["bos_token"]:
|
||||||
|
gguf_writer.add_bos_token_id(key["id"])
|
||||||
|
|
||||||
|
if "eos_token" in tokenizer_config:
|
||||||
|
for key in tokenizer_json["added_tokens"]:
|
||||||
|
if key["content"] == tokenizer_config["eos_token"]:
|
||||||
|
gguf_writer.add_eos_token_id(key["id"])
|
||||||
|
|
||||||
|
if "unk_token" in tokenizer_config:
|
||||||
|
for key in tokenizer_json["added_tokens"]:
|
||||||
|
if key["content"] == tokenizer_config["unk_token"]:
|
||||||
|
gguf_writer.add_unk_token_id(key["id"])
|
||||||
|
|
||||||
|
if "sep_token" in tokenizer_config:
|
||||||
|
for key in tokenizer_json["added_tokens"]:
|
||||||
|
if key["content"] == tokenizer_config["sep_token"]:
|
||||||
|
gguf_writer.add_sep_token_id(key["id"])
|
||||||
|
|
||||||
|
if "pad_token" in tokenizer_config:
|
||||||
|
for key in tokenizer_json["added_tokens"]:
|
||||||
|
if key["content"] == tokenizer_config["pad_token"]:
|
||||||
|
gguf_writer.add_pad_token_id(key["id"])
|
||||||
|
|
||||||
|
|
||||||
|
# TENSORS
|
||||||
|
|
||||||
|
tensor_map = gguf.get_tensor_name_map(ARCH,block_count)
|
||||||
|
|
||||||
|
# tensor info
|
||||||
|
print("gguf: get tensor metadata")
|
||||||
|
|
||||||
|
if num_parts == 0:
|
||||||
|
part_names = ("pytorch_model.bin",)
|
||||||
|
else:
|
||||||
|
part_names = (
|
||||||
|
f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1)
|
||||||
|
)
|
||||||
|
|
||||||
|
for part_name in part_names:
|
||||||
|
print("gguf: loading model part '" + part_name + "'")
|
||||||
|
model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu")
|
||||||
|
|
||||||
|
for name in model_part.keys():
|
||||||
|
data = model_part[name]
|
||||||
|
|
||||||
|
# we don't need these
|
||||||
|
if name.endswith(".attention.masked_bias") or name.endswith(".attention.bias") or name.endswith(".attention.rotary_emb.inv_freq"):
|
||||||
|
continue
|
||||||
|
|
||||||
|
old_dtype = data.dtype
|
||||||
|
|
||||||
|
# convert any unsupported data types to float32
|
||||||
|
if data.dtype != torch.float16 and data.dtype != torch.float32:
|
||||||
|
data = data.to(torch.float32)
|
||||||
|
|
||||||
|
data = data.squeeze().numpy()
|
||||||
|
|
||||||
|
# map tensor names
|
||||||
|
if name.endswith(".weight") and name[:-7] in tensor_map:
|
||||||
|
name = tensor_map[name[:-7]] + ".weight"
|
||||||
|
elif name.endswith(".bias") and name[:-5] in tensor_map:
|
||||||
|
name = tensor_map[name[:-5]] + ".bias"
|
||||||
|
else:
|
||||||
|
print("Can not map tensor '" + name + "'")
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
n_dims = len(data.shape)
|
||||||
|
data_dtype = data.dtype
|
||||||
|
|
||||||
|
# if f32 desired, convert any float16 to float32
|
||||||
|
if ftype == 0 and data_dtype == np.float16:
|
||||||
|
data = data.astype(np.float32)
|
||||||
|
|
||||||
|
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
|
||||||
|
if ftype == 1 and data_dtype == np.float16 and n_dims == 1:
|
||||||
|
data = data.astype(np.float32)
|
||||||
|
|
||||||
|
# if f16 desired, convert any float32 2-dim weight tensors to float16
|
||||||
|
if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
||||||
|
data = data.astype(np.float16)
|
||||||
|
|
||||||
|
print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype))
|
||||||
|
|
||||||
|
gguf_writer.add_tensor(name, data)
|
||||||
|
|
||||||
|
|
||||||
|
print("gguf: write header")
|
||||||
|
gguf_writer.write_header_to_file()
|
||||||
|
print("gguf: write metadata")
|
||||||
|
gguf_writer.write_kv_data_to_file()
|
||||||
|
print("gguf: write tensors")
|
||||||
|
gguf_writer.write_tensors_to_file()
|
||||||
|
|
||||||
|
gguf_writer.close()
|
||||||
|
|
||||||
|
print("gguf: model successfully exported to '" + fname_out + "'")
|
||||||
|
print("")
|
308
convert-llama-7b-pth-to-gguf.py
Executable file
308
convert-llama-7b-pth-to-gguf.py
Executable file
@ -0,0 +1,308 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# 7b pth llama --> gguf conversion
|
||||||
|
# Only models with a single datafile are supported, like 7B
|
||||||
|
# HF files required in the model dir: config.json tokenizer_config.json tokenizer.json tokenizer.model
|
||||||
|
|
||||||
|
import gguf
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import struct
|
||||||
|
import json
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from typing import Any, List
|
||||||
|
from pathlib import Path
|
||||||
|
from sentencepiece import SentencePieceProcessor
|
||||||
|
|
||||||
|
#NDArray = np.ndarray[Any, Any]
|
||||||
|
# compatible with python < 3.9
|
||||||
|
NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]'
|
||||||
|
|
||||||
|
|
||||||
|
def count_model_parts(dir_model: str) -> int:
|
||||||
|
num_parts = 0
|
||||||
|
for filename in os.listdir(dir_model):
|
||||||
|
if filename.startswith("consolidated."):
|
||||||
|
num_parts += 1
|
||||||
|
|
||||||
|
if num_parts > 0:
|
||||||
|
print("gguf: found " + str(num_parts) + " model parts")
|
||||||
|
return num_parts
|
||||||
|
|
||||||
|
|
||||||
|
if len(sys.argv) < 3:
|
||||||
|
print("Usage: convert-h5-to-ggml.py dir-model ftype\n")
|
||||||
|
print(" ftype == 0 -> float32")
|
||||||
|
print(" ftype == 1 -> float16")
|
||||||
|
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
# output in the same directory as the model
|
||||||
|
dir_model = sys.argv[1]
|
||||||
|
last_dir = os.path.basename(os.path.normpath(dir_model))
|
||||||
|
|
||||||
|
|
||||||
|
# possible tensor data types
|
||||||
|
# ftype == 0 -> float32
|
||||||
|
# ftype == 1 -> float16
|
||||||
|
|
||||||
|
# map from ftype to string
|
||||||
|
ftype_str = ["f32", "f16"]
|
||||||
|
|
||||||
|
ftype = 1
|
||||||
|
if len(sys.argv) > 2:
|
||||||
|
ftype = int(sys.argv[2])
|
||||||
|
if ftype < 0 or ftype > 1:
|
||||||
|
print("Invalid ftype: " + str(ftype))
|
||||||
|
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf"
|
||||||
|
|
||||||
|
print("gguf: loading model "+last_dir)
|
||||||
|
|
||||||
|
with open(dir_model + "/config.json", "r", encoding="utf-8") as f:
|
||||||
|
hparams = json.load(f)
|
||||||
|
|
||||||
|
if hparams["architectures"][0] != "LlamaForCausalLM":
|
||||||
|
print("Model architecture not supported: " + hparams["architectures"][0])
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
# get number of model parts
|
||||||
|
num_parts = count_model_parts(dir_model)
|
||||||
|
|
||||||
|
if num_parts > 1:
|
||||||
|
print("gguf: Only models with a single datafile are supported.")
|
||||||
|
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
ARCH=gguf.MODEL_ARCH.LLAMA
|
||||||
|
gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH])
|
||||||
|
|
||||||
|
|
||||||
|
print("gguf: get model metadata")
|
||||||
|
|
||||||
|
block_count = hparams["num_hidden_layers"]
|
||||||
|
head_count = hparams["num_attention_heads"]
|
||||||
|
|
||||||
|
if "num_key_value_heads" in hparams:
|
||||||
|
head_count_kv = hparams["num_key_value_heads"]
|
||||||
|
else:
|
||||||
|
head_count_kv = head_count
|
||||||
|
|
||||||
|
if "_name_or_path" in hparams:
|
||||||
|
hf_repo = hparams["_name_or_path"]
|
||||||
|
else:
|
||||||
|
hf_repo = ""
|
||||||
|
|
||||||
|
if "max_sequence_length" in hparams:
|
||||||
|
ctx_length = hparams["max_sequence_length"]
|
||||||
|
elif "max_position_embeddings" in hparams:
|
||||||
|
ctx_length = hparams["max_position_embeddings"]
|
||||||
|
else:
|
||||||
|
print("gguf: can not find ctx length parameter.")
|
||||||
|
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
|
||||||
|
gguf_writer.add_name(last_dir)
|
||||||
|
gguf_writer.add_source_hf_repo(hf_repo)
|
||||||
|
gguf_writer.add_tensor_data_layout("Meta AI original pth")
|
||||||
|
gguf_writer.add_context_length(ctx_length)
|
||||||
|
gguf_writer.add_embedding_length(hparams["hidden_size"])
|
||||||
|
gguf_writer.add_block_count(block_count)
|
||||||
|
gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
|
||||||
|
gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"])
|
||||||
|
gguf_writer.add_head_count(head_count)
|
||||||
|
gguf_writer.add_head_count_kv(head_count_kv)
|
||||||
|
gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
|
||||||
|
|
||||||
|
if "rope_scaling" in hparams and hparams["rope_scaling"] != None and "factor" in hparams["rope_scaling"]:
|
||||||
|
if "type" in hparams["rope_scaling"]:
|
||||||
|
if hparams["rope_scaling"]["type"] == "linear":
|
||||||
|
gguf_writer.add_rope_scale_linear(hparams["rope_scaling"]["factor"])
|
||||||
|
|
||||||
|
|
||||||
|
# TOKENIZATION
|
||||||
|
|
||||||
|
print("gguf: get tokenizer metadata")
|
||||||
|
|
||||||
|
tokens: List[bytes] = []
|
||||||
|
scores: List[float] = []
|
||||||
|
toktypes: List[int] = []
|
||||||
|
|
||||||
|
if Path(dir_model + "/tokenizer.model").is_file():
|
||||||
|
# vocab type sentencepiece
|
||||||
|
print("gguf: get sentencepiece tokenizer vocab and scores")
|
||||||
|
|
||||||
|
tokenizer = SentencePieceProcessor(dir_model + "/tokenizer.model")
|
||||||
|
|
||||||
|
for i in range(tokenizer.vocab_size()):
|
||||||
|
text: bytes
|
||||||
|
score: float
|
||||||
|
|
||||||
|
piece = tokenizer.id_to_piece(i)
|
||||||
|
text = piece.encode("utf-8")
|
||||||
|
score = tokenizer.get_score(i)
|
||||||
|
|
||||||
|
toktype = 1 # defualt to normal token type
|
||||||
|
if tokenizer.is_unknown(i):
|
||||||
|
toktype = 2
|
||||||
|
if tokenizer.is_control(i):
|
||||||
|
toktype = 3
|
||||||
|
|
||||||
|
# toktype = 4 is user-defined = tokens from added_tokens.json
|
||||||
|
|
||||||
|
if tokenizer.is_unused(i):
|
||||||
|
toktype = 5
|
||||||
|
if tokenizer.is_byte(i):
|
||||||
|
toktype = 6
|
||||||
|
|
||||||
|
tokens.append(text)
|
||||||
|
scores.append(score)
|
||||||
|
toktypes.append(toktype)
|
||||||
|
|
||||||
|
if Path(dir_model + "/added_tokens.json").is_file():
|
||||||
|
with open(dir_model + "/added_tokens.json", "r", encoding="utf-8") as f:
|
||||||
|
addtokens_json = json.load(f)
|
||||||
|
|
||||||
|
print("gguf: get added tokens")
|
||||||
|
|
||||||
|
for key in addtokens_json:
|
||||||
|
tokens.append( key.encode("utf-8") )
|
||||||
|
scores.append(-1000.0)
|
||||||
|
toktypes.append(4) # user-defined token type
|
||||||
|
|
||||||
|
gguf_writer.add_tokenizer_model("llama")
|
||||||
|
gguf_writer.add_token_list(tokens)
|
||||||
|
gguf_writer.add_token_scores(scores)
|
||||||
|
gguf_writer.add_token_types(toktypes)
|
||||||
|
|
||||||
|
|
||||||
|
print("gguf: get special token ids")
|
||||||
|
|
||||||
|
if Path(dir_model + "/tokenizer.json").is_file():
|
||||||
|
# Look for special tokens in tokenizer.json if it exists
|
||||||
|
|
||||||
|
with open(dir_model + "/tokenizer.json", "r", encoding="utf-8") as f:
|
||||||
|
tokenizer = json.load(f)
|
||||||
|
|
||||||
|
if "added_tokens" in tokenizer and Path(dir_model + "/tokenizer_config.json").is_file():
|
||||||
|
|
||||||
|
with open(dir_model + "/tokenizer_config.json", "r", encoding="utf-8") as f:
|
||||||
|
tokenizer_config = json.load(f)
|
||||||
|
|
||||||
|
if "bos_token" in tokenizer_config and tokenizer_config["bos_token"] != None:
|
||||||
|
for key in tokenizer["added_tokens"]:
|
||||||
|
if key["content"] == tokenizer_config["bos_token"]["content"]:
|
||||||
|
gguf_writer.add_bos_token_id(key["id"])
|
||||||
|
|
||||||
|
if "eos_token" in tokenizer_config and tokenizer_config["eos_token"] != None:
|
||||||
|
for key in tokenizer["added_tokens"]:
|
||||||
|
if key["content"] == tokenizer_config["eos_token"]["content"]:
|
||||||
|
gguf_writer.add_eos_token_id(key["id"])
|
||||||
|
|
||||||
|
if "unk_token" in tokenizer_config and tokenizer_config["unk_token"] != None:
|
||||||
|
for key in tokenizer["added_tokens"]:
|
||||||
|
if key["content"] == tokenizer_config["unk_token"]["content"]:
|
||||||
|
gguf_writer.add_unk_token_id(key["id"])
|
||||||
|
|
||||||
|
if "sep_token" in tokenizer_config and tokenizer_config["sep_token"] != None:
|
||||||
|
for key in tokenizer["added_tokens"]:
|
||||||
|
if key["content"] == tokenizer_config["sep_token"]["content"]:
|
||||||
|
gguf_writer.add_sep_token_id(key["id"])
|
||||||
|
|
||||||
|
if "pad_token" in tokenizer_config and tokenizer_config["pad_token"] != None:
|
||||||
|
for key in tokenizer["added_tokens"]:
|
||||||
|
if key["content"] == tokenizer_config["pad_token"]["content"]:
|
||||||
|
gguf_writer.add_pad_token_id(key["id"])
|
||||||
|
else:
|
||||||
|
# If no tokenizer.json: Look for special tokens in config.json
|
||||||
|
|
||||||
|
if "bos_token_id" in hparams and hparams["bos_token_id"] != None:
|
||||||
|
gguf_writer.add_bos_token_id(hparams["bos_token_id"])
|
||||||
|
|
||||||
|
if "eos_token_id" in hparams and hparams["eos_token_id"] != None:
|
||||||
|
gguf_writer.add_eos_token_id(hparams["eos_token_id"])
|
||||||
|
|
||||||
|
if "unk_token_id" in hparams and hparams["unk_token_id"] != None:
|
||||||
|
gguf_writer.add_unk_token_id(hparams["unk_token_id"])
|
||||||
|
|
||||||
|
if "sep_token_id" in hparams and hparams["sep_token_id"] != None:
|
||||||
|
gguf_writer.add_sep_token_id(hparams["sep_token_id"])
|
||||||
|
|
||||||
|
if "pad_token_id" in hparams and hparams["pad_token_id"] != None:
|
||||||
|
gguf_writer.add_pad_token_id(hparams["pad_token_id"])
|
||||||
|
|
||||||
|
|
||||||
|
# TENSORS
|
||||||
|
|
||||||
|
tensor_map = gguf.get_tensor_name_map(ARCH,block_count)
|
||||||
|
|
||||||
|
# tensor info
|
||||||
|
print("gguf: get tensor metadata")
|
||||||
|
|
||||||
|
part_names = (f"consolidated.{n:02}.pth" for n in range(0, num_parts))
|
||||||
|
|
||||||
|
for part_name in part_names:
|
||||||
|
print("gguf: loading model part '" + part_name + "'")
|
||||||
|
model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu")
|
||||||
|
|
||||||
|
for name in model_part.keys():
|
||||||
|
data = model_part[name]
|
||||||
|
|
||||||
|
# we don't need these
|
||||||
|
if name == "rope.freqs":
|
||||||
|
continue
|
||||||
|
|
||||||
|
old_dtype = data.dtype
|
||||||
|
|
||||||
|
# convert any unsupported data types to float32
|
||||||
|
if data.dtype != torch.float16 and data.dtype != torch.float32:
|
||||||
|
data = data.to(torch.float32)
|
||||||
|
|
||||||
|
data = data.squeeze().numpy()
|
||||||
|
|
||||||
|
# map tensor names
|
||||||
|
if name.endswith(".weight") and name[:-7] in tensor_map:
|
||||||
|
name = tensor_map[name[:-7]] + ".weight"
|
||||||
|
elif name.endswith(".bias") and name[:-5] in tensor_map:
|
||||||
|
name = tensor_map[name[:-5]] + ".bias"
|
||||||
|
else:
|
||||||
|
print("Can not map tensor '" + name + "'")
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
n_dims = len(data.shape)
|
||||||
|
data_dtype = data.dtype
|
||||||
|
|
||||||
|
# if f32 desired, convert any float16 to float32
|
||||||
|
if ftype == 0 and data_dtype == np.float16:
|
||||||
|
data = data.astype(np.float32)
|
||||||
|
|
||||||
|
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
|
||||||
|
if ftype == 1 and data_dtype == np.float16 and n_dims == 1:
|
||||||
|
data = data.astype(np.float32)
|
||||||
|
|
||||||
|
# if f16 desired, convert any float32 2-dim weight tensors to float16
|
||||||
|
if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
||||||
|
data = data.astype(np.float16)
|
||||||
|
|
||||||
|
print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype))
|
||||||
|
|
||||||
|
gguf_writer.add_tensor(name, data)
|
||||||
|
|
||||||
|
|
||||||
|
print("gguf: write header")
|
||||||
|
gguf_writer.write_header_to_file()
|
||||||
|
print("gguf: write metadata")
|
||||||
|
gguf_writer.write_kv_data_to_file()
|
||||||
|
print("gguf: write tensors")
|
||||||
|
gguf_writer.write_tensors_to_file()
|
||||||
|
|
||||||
|
gguf_writer.close()
|
||||||
|
|
||||||
|
|
||||||
|
print("gguf: model successfully exported to '" + fname_out + "'")
|
||||||
|
print("")
|
345
convert-llama-ggmlv3-to-gguf.py
Executable file
345
convert-llama-ggmlv3-to-gguf.py
Executable file
@ -0,0 +1,345 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import sys, struct, math, argparse
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
import gguf
|
||||||
|
|
||||||
|
# Note: Does not support GGML_QKK_64
|
||||||
|
QK_K = 256
|
||||||
|
# Items here are (block size, type size)
|
||||||
|
GGML_QUANT_SIZES = {
|
||||||
|
gguf.GGMLQuantizationType.F32 : (1, 4),
|
||||||
|
gguf.GGMLQuantizationType.F16 : (1, 2),
|
||||||
|
gguf.GGMLQuantizationType.Q4_0 : (32, 2 + 16),
|
||||||
|
gguf.GGMLQuantizationType.Q4_1 : (32, 2 + 2 + 16),
|
||||||
|
gguf.GGMLQuantizationType.Q5_0 : (32, 2 + 4 + 16),
|
||||||
|
gguf.GGMLQuantizationType.Q5_1 : (32, 2 + 2 + 4 + 16),
|
||||||
|
gguf.GGMLQuantizationType.Q8_0 : (32, 2 + 32),
|
||||||
|
gguf.GGMLQuantizationType.Q8_1 : (32, 4 + 4 + 32),
|
||||||
|
gguf.GGMLQuantizationType.Q2_K : (256, 2 + 2 + QK_K // 16 + QK_K // 4),
|
||||||
|
gguf.GGMLQuantizationType.Q3_K : (256, 2 + QK_K // 4 + QK_K // 8 + 12),
|
||||||
|
gguf.GGMLQuantizationType.Q4_K : (256, 2 + 2 + QK_K // 2 + 12),
|
||||||
|
gguf.GGMLQuantizationType.Q5_K : (256, 2 + 2 + QK_K // 2 + QK_K // 8 + 12),
|
||||||
|
gguf.GGMLQuantizationType.Q6_K : (256, 2 + QK_K // 2 + QK_K // 4 + QK_K // 16),
|
||||||
|
gguf.GGMLQuantizationType.Q8_K : (256, 4 + QK_K + QK_K // 8),
|
||||||
|
}
|
||||||
|
|
||||||
|
class Hyperparameters:
|
||||||
|
def __init__(self):
|
||||||
|
self.n_vocab = self.n_embd = self.n_mult = self.n_head = self.n_layer = self.n_rot = self.ftype = 0
|
||||||
|
self.n_ff = 0
|
||||||
|
|
||||||
|
def set_n_ff(self, model):
|
||||||
|
ff_tensor_idx = model.tensor_map.get(b'layers.0.feed_forward.w1.weight')
|
||||||
|
assert ff_tensor_idx is not None, 'Missing layer 0 FF tensor'
|
||||||
|
ff_tensor = model.tensors[ff_tensor_idx]
|
||||||
|
self.n_ff = ff_tensor.dims[1]
|
||||||
|
|
||||||
|
def load(self, data, offset):
|
||||||
|
(
|
||||||
|
self.n_vocab,
|
||||||
|
self.n_embd,
|
||||||
|
self.n_mult,
|
||||||
|
self.n_head,
|
||||||
|
self.n_layer,
|
||||||
|
self.n_rot,
|
||||||
|
self.ftype,
|
||||||
|
) = struct.unpack('<7I', data[offset:offset + (4 * 7)])
|
||||||
|
return 4 * 7
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return f'<Hyperparameters: n_vocab={self.n_vocab}, n_embd={self.n_embd}, n_mult={self.n_mult}, n_head={self.n_head}, n_layer={self.n_layer}, n_rot={self.n_rot}, n_ff={self.n_ff}, ftype={self.ftype}>'
|
||||||
|
|
||||||
|
class Vocab:
|
||||||
|
def __init__(self):
|
||||||
|
self.items = []
|
||||||
|
|
||||||
|
def load(self, data, offset, n_vocab):
|
||||||
|
orig_offset = offset
|
||||||
|
for _ in range(n_vocab):
|
||||||
|
itemlen = struct.unpack('<I', data[offset:offset + 4])[0]
|
||||||
|
assert itemlen < 4096, 'Absurd vocab item length'
|
||||||
|
offset += 4
|
||||||
|
vocab = bytes(data[offset:offset + itemlen])
|
||||||
|
offset += itemlen
|
||||||
|
score = struct.unpack('<f', data[offset:offset + 4])[0]
|
||||||
|
offset += 4
|
||||||
|
self.items.append((vocab, score))
|
||||||
|
return offset - orig_offset
|
||||||
|
|
||||||
|
class Tensor:
|
||||||
|
def __init__(self):
|
||||||
|
self.name = None
|
||||||
|
self.dims = ()
|
||||||
|
self.dtype = None
|
||||||
|
self.start_offset = 0
|
||||||
|
self.len_bytes = 0
|
||||||
|
|
||||||
|
def load(self, data, offset):
|
||||||
|
orig_offset = offset
|
||||||
|
(n_dims, name_len, dtype) = struct.unpack('<3I', data[offset:offset + 12])
|
||||||
|
assert n_dims >= 0 and n_dims <= 4, f'Invalid tensor dimensions {n_dims}'
|
||||||
|
assert name_len < 4096, 'Absurd tensor name length'
|
||||||
|
quant = GGML_QUANT_SIZES.get(dtype)
|
||||||
|
assert quant is not None, 'Unknown tensor type'
|
||||||
|
(blksize, tysize) = quant
|
||||||
|
offset += 12
|
||||||
|
self.dtype= dtype
|
||||||
|
self.dims = struct.unpack(f'<{n_dims}I', data[offset:offset + (4 * n_dims)])
|
||||||
|
offset += 4 * n_dims
|
||||||
|
self.name = bytes(data[offset:offset + name_len])
|
||||||
|
offset += name_len
|
||||||
|
pad = ((offset + 31) & ~31) - offset
|
||||||
|
offset += pad
|
||||||
|
n_elems = np.prod(self.dims)
|
||||||
|
n_bytes = np.int64(np.int64(n_elems) * np.int64(tysize)) // np.int64(blksize)
|
||||||
|
self.start_offset = offset
|
||||||
|
self.len_bytes = n_bytes
|
||||||
|
offset += n_bytes
|
||||||
|
# print(n_dims, name_len, dtype, self.dims, self.name, pad)
|
||||||
|
return offset - orig_offset
|
||||||
|
|
||||||
|
class GGMLV3Model:
|
||||||
|
def __init__(self):
|
||||||
|
self.hyperparameters = None
|
||||||
|
self.vocab = None
|
||||||
|
self.tensor_map = {}
|
||||||
|
self.tensors = []
|
||||||
|
|
||||||
|
def validate_header(self, data, offset):
|
||||||
|
if bytes(data[offset:offset + 4]) != b'tjgg' or struct.unpack('<I', data[offset + 4:offset + 8])[0] != 3:
|
||||||
|
raise ValueError('Only GGJTv3 supported')
|
||||||
|
return 8
|
||||||
|
|
||||||
|
def load(self, data, offset):
|
||||||
|
offset += self.validate_header(data, offset)
|
||||||
|
hp = Hyperparameters()
|
||||||
|
offset += hp.load(data, offset)
|
||||||
|
vocab = Vocab()
|
||||||
|
offset += vocab.load(data, offset, hp.n_vocab)
|
||||||
|
tensors = []
|
||||||
|
tensor_map = {}
|
||||||
|
while offset < len(data):
|
||||||
|
tensor = Tensor()
|
||||||
|
offset += tensor.load(data, offset)
|
||||||
|
tensor_map[tensor.name] = len(tensors)
|
||||||
|
tensors.append(tensor)
|
||||||
|
self.hyperparameters = hp
|
||||||
|
self.vocab = vocab
|
||||||
|
self.tensors = tensors
|
||||||
|
self.tensor_map = tensor_map
|
||||||
|
hp.set_n_ff(self)
|
||||||
|
return offset
|
||||||
|
|
||||||
|
class GGMLToGGUF:
|
||||||
|
def __init__(self, ggml_model, data, cfg, params_override = None, vocab_override = None):
|
||||||
|
hp = ggml_model.hyperparameters
|
||||||
|
self.model = ggml_model
|
||||||
|
self.data = data
|
||||||
|
self.cfg = cfg
|
||||||
|
self.params_override = params_override
|
||||||
|
self.vocab_override = vocab_override
|
||||||
|
if params_override is not None:
|
||||||
|
n_kv_head = params_override.n_head_kv
|
||||||
|
else:
|
||||||
|
if cfg.gqa == 1:
|
||||||
|
n_kv_head = hp.n_head
|
||||||
|
else:
|
||||||
|
gqa = float(cfg.gqa)
|
||||||
|
n_kv_head = None
|
||||||
|
for x in range(1, 256):
|
||||||
|
if float(hp.n_head) / float(x) == gqa:
|
||||||
|
n_kv_head = x
|
||||||
|
assert n_kv_head is not None, "Couldn't determine n_kv_head from GQA param"
|
||||||
|
print(f'- Guessed n_kv_head = {n_kv_head} based on GQA {cfg.gqa}')
|
||||||
|
self.n_kv_head = n_kv_head
|
||||||
|
self.name_map = gguf.get_tensor_name_map(gguf.MODEL_ARCH.LLAMA, ggml_model.hyperparameters.n_layer)
|
||||||
|
|
||||||
|
def save(self):
|
||||||
|
print('* Preparing to save GGUF file')
|
||||||
|
gguf_writer = gguf.GGUFWriter(self.cfg.output, gguf.MODEL_ARCH_NAMES[gguf.MODEL_ARCH.LLAMA], use_temp_file = False)
|
||||||
|
self.add_params(gguf_writer)
|
||||||
|
self.add_vocab(gguf_writer)
|
||||||
|
self.add_tensors(gguf_writer)
|
||||||
|
print(" gguf: write header")
|
||||||
|
gguf_writer.write_header_to_file()
|
||||||
|
print(" gguf: write metadata")
|
||||||
|
gguf_writer.write_kv_data_to_file()
|
||||||
|
print(" gguf: write tensors")
|
||||||
|
gguf_writer.write_tensors_to_file()
|
||||||
|
gguf_writer.close()
|
||||||
|
|
||||||
|
def add_params(self, gguf_writer):
|
||||||
|
hp = self.model.hyperparameters
|
||||||
|
cfg = self.cfg
|
||||||
|
desc = cfg.desc if cfg.desc is not None else 'converted from legacy GGJTv3 format'
|
||||||
|
try:
|
||||||
|
# Filenames aren't necessarily valid UTF8.
|
||||||
|
name = cfg.name if cfg.name is not None else cfg.input.name
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
name = None
|
||||||
|
print('* Adding model parameters and KV items')
|
||||||
|
if name is not None:
|
||||||
|
gguf_writer.add_name(name)
|
||||||
|
gguf_writer.add_description(desc)
|
||||||
|
if self.params_override is not None:
|
||||||
|
po = self.params_override
|
||||||
|
assert po.n_embd == hp.n_embd, 'Model hyperparams mismatch'
|
||||||
|
assert po.n_layer == hp.n_layer, 'Model hyperparams mismatch'
|
||||||
|
assert po.n_head == hp.n_head, 'Model hyperparams mismatch'
|
||||||
|
gguf_writer.add_context_length (po.n_ctx)
|
||||||
|
gguf_writer.add_embedding_length (po.n_embd)
|
||||||
|
gguf_writer.add_block_count (po.n_layer)
|
||||||
|
gguf_writer.add_feed_forward_length (po.n_ff)
|
||||||
|
gguf_writer.add_rope_dimension_count(po.n_embd // po.n_head)
|
||||||
|
gguf_writer.add_head_count (po.n_head)
|
||||||
|
gguf_writer.add_head_count_kv (po.n_head_kv)
|
||||||
|
gguf_writer.add_layer_norm_rms_eps (po.f_norm_eps)
|
||||||
|
return
|
||||||
|
gguf_writer.add_context_length(cfg.context_length)
|
||||||
|
gguf_writer.add_embedding_length(hp.n_embd)
|
||||||
|
gguf_writer.add_block_count(hp.n_layer)
|
||||||
|
gguf_writer.add_feed_forward_length(hp.n_ff)
|
||||||
|
gguf_writer.add_rope_dimension_count(hp.n_embd // hp.n_head)
|
||||||
|
gguf_writer.add_head_count(hp.n_head)
|
||||||
|
gguf_writer.add_head_count_kv(self.n_kv_head)
|
||||||
|
gguf_writer.add_layer_norm_rms_eps(float(cfg.eps))
|
||||||
|
|
||||||
|
def add_vocab(self, gguf_writer):
|
||||||
|
hp = self.model.hyperparameters
|
||||||
|
gguf_writer.add_tokenizer_model('llama')
|
||||||
|
tokens = []
|
||||||
|
scores = []
|
||||||
|
toktypes = []
|
||||||
|
if self.vocab_override is not None:
|
||||||
|
vo = self.vocab_override
|
||||||
|
print('* Adding vocab item(s)')
|
||||||
|
for (idx, (vbytes, score, ttype)) in enumerate(vo.all_tokens()):
|
||||||
|
tokens.append(vbytes)
|
||||||
|
scores.append(score)
|
||||||
|
toktypes.append(ttype)
|
||||||
|
assert len(tokens) == hp.n_vocab, f'Override vocab has a different number of items than hyperparameters - override = {len(tokens)} but n_vocab={hp.n_vocab}'
|
||||||
|
gguf_writer.add_token_list(tokens)
|
||||||
|
gguf_writer.add_token_scores(scores)
|
||||||
|
if len(toktypes) > 0:
|
||||||
|
gguf_writer.add_token_types(toktypes)
|
||||||
|
return
|
||||||
|
print(f'* Adding {hp.n_vocab} vocab item(s)')
|
||||||
|
assert len(self.model.vocab.items) >= 3, 'Cannot handle unexpectedly short model vocab'
|
||||||
|
for (tokid, (vbytes, vscore)) in enumerate(self.model.vocab.items):
|
||||||
|
tt = 1 # Normal
|
||||||
|
# Special handling for UNK, BOS, EOS tokens.
|
||||||
|
if tokid <= 2:
|
||||||
|
if tokid == 0:
|
||||||
|
vbytes = b'<unk>'
|
||||||
|
tt = 2
|
||||||
|
elif tokid == 1:
|
||||||
|
vbytes = b'<s>'
|
||||||
|
tt = 3
|
||||||
|
else:
|
||||||
|
vbytes = b'</s>'
|
||||||
|
tt = 3
|
||||||
|
elif len(vbytes) == 0:
|
||||||
|
tt = 3 # Control
|
||||||
|
elif tokid >= 3 and tokid <= 258 and len(vbytes) == 1:
|
||||||
|
vbytes = bytes(f'<0x{vbytes[0]:02X}>', encoding = 'UTF-8')
|
||||||
|
tt = 6 # Byte
|
||||||
|
else:
|
||||||
|
vbytes = vbytes.replace(b' ', b'\xe2\x96\x81')
|
||||||
|
toktypes.append(tt)
|
||||||
|
tokens.append(vbytes)
|
||||||
|
scores.append(vscore)
|
||||||
|
gguf_writer.add_token_list(tokens)
|
||||||
|
gguf_writer.add_token_scores(scores)
|
||||||
|
gguf_writer.add_token_types(toktypes)
|
||||||
|
gguf_writer.add_unk_token_id(0)
|
||||||
|
gguf_writer.add_bos_token_id(1)
|
||||||
|
gguf_writer.add_eos_token_id(2)
|
||||||
|
|
||||||
|
def add_tensors(self, gguf_writer):
|
||||||
|
nm = self.name_map
|
||||||
|
data = self.data
|
||||||
|
print(f'* Adding {len(self.model.tensors)} tensor(s)')
|
||||||
|
for tensor in self.model.tensors:
|
||||||
|
name = str(tensor.name, 'UTF-8')
|
||||||
|
if name.endswith('.weight'):
|
||||||
|
name = name[:-7]
|
||||||
|
suffix = '.weight'
|
||||||
|
elif name.endswith('.bias'):
|
||||||
|
name = name[:-5]
|
||||||
|
suffix = '.bias'
|
||||||
|
mapped_name = nm.get(name)
|
||||||
|
assert mapped_name is not None, f'Bad name {name}'
|
||||||
|
mapped_name += suffix
|
||||||
|
tempdims = list(tensor.dims[:])
|
||||||
|
if len(tempdims) > 1:
|
||||||
|
temp = tempdims[1]
|
||||||
|
tempdims[1] = tempdims[0]
|
||||||
|
tempdims[0] = temp
|
||||||
|
# print(f'+ {tensor.name} | {mapped_name} {tensor.dims} :: {tempdims}')
|
||||||
|
gguf_writer.add_tensor(mapped_name, data[tensor.start_offset:tensor.start_offset + tensor.len_bytes], raw_shape = tempdims, raw_dtype = tensor.dtype)
|
||||||
|
|
||||||
|
def handle_metadata(cfg, hp):
|
||||||
|
import convert
|
||||||
|
assert cfg.model_metadata_dir.is_dir(), 'Metadata dir is not a directory'
|
||||||
|
hf_config_path = cfg.model_metadata_dir / "config.json"
|
||||||
|
orig_config_path = cfg.model_metadata_dir / "params.json"
|
||||||
|
# We pass a fake model here. "original" mode will check the shapes of some
|
||||||
|
# tensors if information is missing in the .json file: other than that, the
|
||||||
|
# model data isn't used so this should be safe (at least for now).
|
||||||
|
fakemodel = {
|
||||||
|
'tok_embeddings.weight': convert.LazyTensor.__new__(convert.LazyTensor),
|
||||||
|
'layers.0.feed_forward.w1.weight': convert.LazyTensor.__new__(convert.LazyTensor),
|
||||||
|
}
|
||||||
|
fakemodel['tok_embeddings.weight'].shape = [hp.n_vocab]
|
||||||
|
fakemodel['layers.0.feed_forward.w1.weight'].shape = [hp.n_ff]
|
||||||
|
if hf_config_path.exists():
|
||||||
|
params = convert.Params.loadHFTransformerJson(fakemodel, hf_config_path)
|
||||||
|
elif orig_config_path.exists():
|
||||||
|
params = convert.Params.loadOriginalParamsJson(fakemodel, orig_config_path)
|
||||||
|
else:
|
||||||
|
raise ValueError('Unable to load metadata')
|
||||||
|
vocab = convert.load_vocab(cfg.vocab_dir if cfg.vocab_dir is not None else cfg.model_metadata_dir, cfg.vocabtype)
|
||||||
|
convert.check_vocab_size(params, vocab)
|
||||||
|
return (params, vocab)
|
||||||
|
|
||||||
|
def handle_args():
|
||||||
|
parser = argparse.ArgumentParser(description = 'Convert GGMLv3 models to GGUF')
|
||||||
|
parser.add_argument('--input', '-i', type = Path, help = 'Input GGMLv3 filename')
|
||||||
|
parser.add_argument('--output', '-o', type = Path, help ='Output GGUF filename')
|
||||||
|
parser.add_argument('--name', help = 'Set model name')
|
||||||
|
parser.add_argument('--desc', help = 'Set model description')
|
||||||
|
parser.add_argument('--gqa', type = int, default = 1, help = 'grouped-query attention factor (use 8 for LLaMA2 70B)')
|
||||||
|
parser.add_argument('--eps', default = '5.0e-06', help = 'RMS norm eps: Use 1e-6 for LLaMA1 and OpenLLaMA, use 1e-5 for LLaMA2')
|
||||||
|
parser.add_argument('--context-length', '-c', type=int, default = 2048, help = 'Default max context length: LLaMA1 is typically 2048, LLaMA2 is typically 4096')
|
||||||
|
parser.add_argument('--model-metadata-dir', '-m', type = Path, help ='Load HuggingFace/.pth vocab and metadata from the specified directory')
|
||||||
|
parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file - only meaningful with --model-metadata-dir")
|
||||||
|
parser.add_argument("--vocabtype", choices=["spm", "bpe"], help="vocab format - only meaningful with --model-metadata-dir and/or --vocab-dir (default: spm)", default="spm")
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
def main():
|
||||||
|
cfg = handle_args()
|
||||||
|
print(f'* Using config: {cfg}')
|
||||||
|
print('\n=== WARNING === Be aware that this conversion script is best-effort. Use a native GGUF model if possible. === WARNING ===\n')
|
||||||
|
data = np.memmap(cfg.input, mode = 'r')
|
||||||
|
model = GGMLV3Model()
|
||||||
|
print('* Scanning GGML input file')
|
||||||
|
offset = model.load(data, 0)
|
||||||
|
print(f'* GGML model hyperparameters: {model.hyperparameters}')
|
||||||
|
vocab_override = None
|
||||||
|
params_override = None
|
||||||
|
if cfg.model_metadata_dir is not None:
|
||||||
|
(params_override, vocab_override) = handle_metadata(cfg, model.hyperparameters)
|
||||||
|
print('!! Note: When overriding params the --gqa, --eps and --context-length options are ignored.')
|
||||||
|
print(f'* Overriding params: {params_override}')
|
||||||
|
print(f'* Overriding vocab: {vocab_override}')
|
||||||
|
else:
|
||||||
|
print('\n=== WARNING === Special tokens may not be converted correctly. Use --model-metadata-dir if possible === WARNING ===\n')
|
||||||
|
converter = GGMLToGGUF(model, data, cfg, params_override = params_override, vocab_override = vocab_override)
|
||||||
|
converter.save()
|
||||||
|
print(f'* Successful completion. Output saved to: {cfg.output}')
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
328
convert-llama-hf-to-gguf.py
Executable file
328
convert-llama-hf-to-gguf.py
Executable file
@ -0,0 +1,328 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# HF llama --> gguf conversion
|
||||||
|
|
||||||
|
import gguf
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import struct
|
||||||
|
import json
|
||||||
|
import numpy as np
|
||||||
|
import torch
|
||||||
|
|
||||||
|
from typing import Any, List, Optional
|
||||||
|
from pathlib import Path
|
||||||
|
from sentencepiece import SentencePieceProcessor
|
||||||
|
|
||||||
|
#NDArray = np.ndarray[Any, Any]
|
||||||
|
# compatible with python < 3.9
|
||||||
|
NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]'
|
||||||
|
|
||||||
|
# reverse HF permute back to original pth layout
|
||||||
|
# https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py
|
||||||
|
|
||||||
|
|
||||||
|
def reverse_hf_permute(weights: NDArray, n_head: int, n_kv_head: Optional[int] = None) -> NDArray:
|
||||||
|
if n_kv_head is not None and n_head != n_kv_head:
|
||||||
|
n_head //= n_kv_head
|
||||||
|
|
||||||
|
return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
|
||||||
|
.swapaxes(1, 2)
|
||||||
|
.reshape(weights.shape))
|
||||||
|
|
||||||
|
|
||||||
|
def count_model_parts(dir_model: str) -> int:
|
||||||
|
num_parts = 0
|
||||||
|
|
||||||
|
for filename in os.listdir(dir_model):
|
||||||
|
if filename.startswith("pytorch_model-"):
|
||||||
|
num_parts += 1
|
||||||
|
|
||||||
|
if num_parts > 0:
|
||||||
|
print("gguf: found " + str(num_parts) + " model parts")
|
||||||
|
|
||||||
|
return num_parts
|
||||||
|
|
||||||
|
|
||||||
|
if len(sys.argv) < 3:
|
||||||
|
print("Usage: convert-h5-to-ggml.py dir-model ftype\n")
|
||||||
|
print(" ftype == 0 -> float32")
|
||||||
|
print(" ftype == 1 -> float16")
|
||||||
|
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
# output in the same directory as the model
|
||||||
|
dir_model = sys.argv[1]
|
||||||
|
last_dir = os.path.basename(os.path.normpath(dir_model))
|
||||||
|
|
||||||
|
|
||||||
|
# possible tensor data types
|
||||||
|
# ftype == 0 -> float32
|
||||||
|
# ftype == 1 -> float16
|
||||||
|
|
||||||
|
|
||||||
|
# map from ftype to string
|
||||||
|
ftype_str = ["f32", "f16"]
|
||||||
|
|
||||||
|
ftype = 1
|
||||||
|
if len(sys.argv) > 2:
|
||||||
|
ftype = int(sys.argv[2])
|
||||||
|
if ftype < 0 or ftype > 1:
|
||||||
|
print("Invalid ftype: " + str(ftype))
|
||||||
|
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".gguf"
|
||||||
|
|
||||||
|
print("gguf: loading model "+last_dir)
|
||||||
|
|
||||||
|
with open(dir_model + "/config.json", "r", encoding="utf-8") as f:
|
||||||
|
hparams = json.load(f)
|
||||||
|
|
||||||
|
if hparams["architectures"][0] != "LlamaForCausalLM":
|
||||||
|
print("Model architecture not supported: " + hparams["architectures"][0])
|
||||||
|
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
# get number of model parts
|
||||||
|
num_parts = count_model_parts(dir_model)
|
||||||
|
|
||||||
|
ARCH=gguf.MODEL_ARCH.LLAMA
|
||||||
|
gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH])
|
||||||
|
|
||||||
|
print("gguf: get model metadata")
|
||||||
|
|
||||||
|
block_count = hparams["num_hidden_layers"]
|
||||||
|
head_count = hparams["num_attention_heads"]
|
||||||
|
|
||||||
|
if "num_key_value_heads" in hparams:
|
||||||
|
head_count_kv = hparams["num_key_value_heads"]
|
||||||
|
else:
|
||||||
|
head_count_kv = head_count
|
||||||
|
|
||||||
|
if "_name_or_path" in hparams:
|
||||||
|
hf_repo = hparams["_name_or_path"]
|
||||||
|
else:
|
||||||
|
hf_repo = ""
|
||||||
|
|
||||||
|
if "max_sequence_length" in hparams:
|
||||||
|
ctx_length = hparams["max_sequence_length"]
|
||||||
|
elif "max_position_embeddings" in hparams:
|
||||||
|
ctx_length = hparams["max_position_embeddings"]
|
||||||
|
else:
|
||||||
|
print("gguf: can not find ctx length parameter.")
|
||||||
|
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
|
||||||
|
gguf_writer.add_name(last_dir)
|
||||||
|
gguf_writer.add_source_hf_repo(hf_repo)
|
||||||
|
gguf_writer.add_tensor_data_layout("Meta AI original pth")
|
||||||
|
gguf_writer.add_context_length(ctx_length)
|
||||||
|
gguf_writer.add_embedding_length(hparams["hidden_size"])
|
||||||
|
gguf_writer.add_block_count(block_count)
|
||||||
|
gguf_writer.add_feed_forward_length(hparams["intermediate_size"])
|
||||||
|
gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"])
|
||||||
|
gguf_writer.add_head_count(head_count)
|
||||||
|
gguf_writer.add_head_count_kv(head_count_kv)
|
||||||
|
gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"])
|
||||||
|
|
||||||
|
if "rope_scaling" in hparams and hparams["rope_scaling"] != None and "factor" in hparams["rope_scaling"]:
|
||||||
|
if "type" in hparams["rope_scaling"]:
|
||||||
|
if hparams["rope_scaling"]["type"] == "linear":
|
||||||
|
gguf_writer.add_rope_scale_linear(hparams["rope_scaling"]["factor"])
|
||||||
|
|
||||||
|
|
||||||
|
# TOKENIZATION
|
||||||
|
|
||||||
|
print("gguf: get tokenizer metadata")
|
||||||
|
|
||||||
|
tokens: List[bytes] = []
|
||||||
|
scores: List[float] = []
|
||||||
|
toktypes: List[int] = []
|
||||||
|
|
||||||
|
if Path(dir_model + "/tokenizer.model").is_file():
|
||||||
|
# vocab type sentencepiece
|
||||||
|
print("gguf: get sentencepiece tokenizer vocab, scores and token types")
|
||||||
|
|
||||||
|
tokenizer = SentencePieceProcessor(dir_model + "/tokenizer.model")
|
||||||
|
|
||||||
|
for i in range(tokenizer.vocab_size()):
|
||||||
|
text: bytes
|
||||||
|
score: float
|
||||||
|
|
||||||
|
piece = tokenizer.id_to_piece(i)
|
||||||
|
text = piece.encode("utf-8")
|
||||||
|
score = tokenizer.get_score(i)
|
||||||
|
|
||||||
|
toktype = 1 # defualt to normal token type
|
||||||
|
if tokenizer.is_unknown(i):
|
||||||
|
toktype = 2
|
||||||
|
if tokenizer.is_control(i):
|
||||||
|
toktype = 3
|
||||||
|
|
||||||
|
# toktype = 4 is user-defined = tokens from added_tokens.json
|
||||||
|
|
||||||
|
if tokenizer.is_unused(i):
|
||||||
|
toktype = 5
|
||||||
|
if tokenizer.is_byte(i):
|
||||||
|
toktype = 6
|
||||||
|
|
||||||
|
tokens.append(text)
|
||||||
|
scores.append(score)
|
||||||
|
toktypes.append(toktype)
|
||||||
|
|
||||||
|
if Path(dir_model + "/added_tokens.json").is_file():
|
||||||
|
with open(dir_model + "/added_tokens.json", "r", encoding="utf-8") as f:
|
||||||
|
addtokens_json = json.load(f)
|
||||||
|
|
||||||
|
print("gguf: get added tokens")
|
||||||
|
|
||||||
|
for key in addtokens_json:
|
||||||
|
tokens.append( key.encode("utf-8") )
|
||||||
|
scores.append(-1000.0)
|
||||||
|
toktypes.append(4) # user-defined token type
|
||||||
|
|
||||||
|
|
||||||
|
gguf_writer.add_tokenizer_model("llama")
|
||||||
|
gguf_writer.add_token_list(tokens)
|
||||||
|
gguf_writer.add_token_scores(scores)
|
||||||
|
gguf_writer.add_token_types(toktypes)
|
||||||
|
|
||||||
|
|
||||||
|
print("gguf: get special token ids")
|
||||||
|
|
||||||
|
if Path(dir_model + "/tokenizer.json").is_file():
|
||||||
|
# Look for special tokens in tokenizer.json if it exists
|
||||||
|
|
||||||
|
with open(dir_model + "/tokenizer.json", "r", encoding="utf-8") as f:
|
||||||
|
tokenizer = json.load(f)
|
||||||
|
|
||||||
|
if "added_tokens" in tokenizer and Path(dir_model + "/tokenizer_config.json").is_file():
|
||||||
|
|
||||||
|
with open(dir_model + "/tokenizer_config.json", "r", encoding="utf-8") as f:
|
||||||
|
tokenizer_config = json.load(f)
|
||||||
|
|
||||||
|
if "bos_token" in tokenizer_config and tokenizer_config["bos_token"] != None:
|
||||||
|
for key in tokenizer["added_tokens"]:
|
||||||
|
if key["content"] == tokenizer_config["bos_token"]["content"]:
|
||||||
|
gguf_writer.add_bos_token_id(key["id"])
|
||||||
|
|
||||||
|
if "eos_token" in tokenizer_config and tokenizer_config["eos_token"] != None:
|
||||||
|
for key in tokenizer["added_tokens"]:
|
||||||
|
if key["content"] == tokenizer_config["eos_token"]["content"]:
|
||||||
|
gguf_writer.add_eos_token_id(key["id"])
|
||||||
|
|
||||||
|
if "unk_token" in tokenizer_config and tokenizer_config["unk_token"] != None:
|
||||||
|
for key in tokenizer["added_tokens"]:
|
||||||
|
if key["content"] == tokenizer_config["unk_token"]["content"]:
|
||||||
|
gguf_writer.add_unk_token_id(key["id"])
|
||||||
|
|
||||||
|
if "sep_token" in tokenizer_config and tokenizer_config["sep_token"] != None:
|
||||||
|
for key in tokenizer["added_tokens"]:
|
||||||
|
if key["content"] == tokenizer_config["sep_token"]["content"]:
|
||||||
|
gguf_writer.add_sep_token_id(key["id"])
|
||||||
|
|
||||||
|
if "pad_token" in tokenizer_config and tokenizer_config["pad_token"] != None:
|
||||||
|
for key in tokenizer["added_tokens"]:
|
||||||
|
if key["content"] == tokenizer_config["pad_token"]["content"]:
|
||||||
|
gguf_writer.add_pad_token_id(key["id"])
|
||||||
|
else:
|
||||||
|
# If no tokenizer.json: Look for special tokens in config.json
|
||||||
|
|
||||||
|
if "bos_token_id" in hparams and hparams["bos_token_id"] != None:
|
||||||
|
gguf_writer.add_bos_token_id(hparams["bos_token_id"])
|
||||||
|
|
||||||
|
if "eos_token_id" in hparams and hparams["eos_token_id"] != None:
|
||||||
|
gguf_writer.add_eos_token_id(hparams["eos_token_id"])
|
||||||
|
|
||||||
|
if "unk_token_id" in hparams and hparams["unk_token_id"] != None:
|
||||||
|
gguf_writer.add_unk_token_id(hparams["unk_token_id"])
|
||||||
|
|
||||||
|
if "sep_token_id" in hparams and hparams["sep_token_id"] != None:
|
||||||
|
gguf_writer.add_sep_token_id(hparams["sep_token_id"])
|
||||||
|
|
||||||
|
if "pad_token_id" in hparams and hparams["pad_token_id"] != None:
|
||||||
|
gguf_writer.add_pad_token_id(hparams["pad_token_id"])
|
||||||
|
|
||||||
|
|
||||||
|
# TENSORS
|
||||||
|
|
||||||
|
tensor_map = gguf.get_tensor_name_map(ARCH,block_count)
|
||||||
|
|
||||||
|
# tensor info
|
||||||
|
print("gguf: get tensor metadata")
|
||||||
|
|
||||||
|
if num_parts == 0:
|
||||||
|
part_names = ("pytorch_model.bin",)
|
||||||
|
else:
|
||||||
|
part_names = (
|
||||||
|
f"pytorch_model-{n:05}-of-{num_parts:05}.bin" for n in range(1, num_parts + 1)
|
||||||
|
)
|
||||||
|
|
||||||
|
for part_name in part_names:
|
||||||
|
print("gguf: loading model part '" + part_name + "'")
|
||||||
|
model_part = torch.load(f"{dir_model}/{part_name}", map_location="cpu")
|
||||||
|
|
||||||
|
for name in model_part.keys():
|
||||||
|
data = model_part[name]
|
||||||
|
|
||||||
|
# we don't need these
|
||||||
|
if name.endswith(".rotary_emb.inv_freq"):
|
||||||
|
continue
|
||||||
|
|
||||||
|
old_dtype = data.dtype
|
||||||
|
|
||||||
|
# convert any unsupported data types to float32
|
||||||
|
if data.dtype != torch.float16 and data.dtype != torch.float32:
|
||||||
|
data = data.to(torch.float32)
|
||||||
|
|
||||||
|
data = data.squeeze().numpy()
|
||||||
|
|
||||||
|
# reverse permute these
|
||||||
|
if name.endswith(".q_proj.weight"):
|
||||||
|
data = reverse_hf_permute(data, head_count)
|
||||||
|
if name.endswith(".k_proj.weight"):
|
||||||
|
data = reverse_hf_permute(data, head_count, head_count_kv)
|
||||||
|
|
||||||
|
# map tensor names
|
||||||
|
if name.endswith(".weight") and name[:-7] in tensor_map:
|
||||||
|
name = tensor_map[name[:-7]] + ".weight"
|
||||||
|
elif name.endswith(".bias") and name[:-5] in tensor_map:
|
||||||
|
name = tensor_map[name[:-5]] + ".bias"
|
||||||
|
else:
|
||||||
|
print("Can not map tensor '" + name + "'")
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
n_dims = len(data.shape)
|
||||||
|
data_dtype = data.dtype
|
||||||
|
|
||||||
|
# if f32 desired, convert any float16 to float32
|
||||||
|
if ftype == 0 and data_dtype == np.float16:
|
||||||
|
data = data.astype(np.float32)
|
||||||
|
|
||||||
|
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
|
||||||
|
if ftype == 1 and data_dtype == np.float16 and n_dims == 1:
|
||||||
|
data = data.astype(np.float32)
|
||||||
|
|
||||||
|
# if f16 desired, convert any float32 2-dim weight tensors to float16
|
||||||
|
if ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
|
||||||
|
data = data.astype(np.float16)
|
||||||
|
|
||||||
|
print(name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype))
|
||||||
|
|
||||||
|
gguf_writer.add_tensor(name, data)
|
||||||
|
|
||||||
|
|
||||||
|
print("gguf: write header")
|
||||||
|
gguf_writer.write_header_to_file()
|
||||||
|
print("gguf: write metadata")
|
||||||
|
gguf_writer.write_kv_data_to_file()
|
||||||
|
print("gguf: write tensors")
|
||||||
|
gguf_writer.write_tensors_to_file()
|
||||||
|
|
||||||
|
gguf_writer.close()
|
||||||
|
|
||||||
|
|
||||||
|
print("gguf: model successfully exported to '" + fname_out + "'")
|
||||||
|
print("")
|
@ -1,4 +1,4 @@
|
|||||||
#!/usr/bin/env python
|
#!/usr/bin/env python3
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
@ -6,23 +6,22 @@ import struct
|
|||||||
import sys
|
import sys
|
||||||
from typing import Any, Dict, Sequence, TextIO
|
from typing import Any, Dict, Sequence, TextIO
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
from convert import DATA_TYPE_TO_FTYPE, NUMPY_TYPE_TO_DATA_TYPE, DataType
|
NUMPY_TYPE_TO_FTYPE: Dict[str, int] = {"float32": 0, "float16": 1}
|
||||||
|
|
||||||
|
|
||||||
HF_SUBLAYER_TO_GGML = {
|
HF_SUBLAYER_TO_GGML = {
|
||||||
"self_attn.q_proj": "attention.wq",
|
"self_attn.q_proj": "attn_q",
|
||||||
"self_attn.k_proj": "attention.wk",
|
"self_attn.k_proj": "attn_k",
|
||||||
"self_attn.v_proj": "attention.wv",
|
"self_attn.v_proj": "attn_v",
|
||||||
"self_attn.o_proj": "attention.wo",
|
"self_attn.o_proj": "attn_output",
|
||||||
"mlp.gate_proj": "feed_forward.w1",
|
"mlp.gate_proj": "ffn_gate",
|
||||||
"mlp.down_proj": "feed_forward.w2",
|
"mlp.down_proj": "ffn_down",
|
||||||
"mlp.up_proj": "feed_forward.w3",
|
"mlp.up_proj": "ffn_up",
|
||||||
"input_layernorm": "attention_norm",
|
"input_layernorm": "attn_norm",
|
||||||
"post_attention_layernorm": "ffn_norm",
|
"post_attention_layernorm": "ffn_norm",
|
||||||
# "norm": "norm",
|
|
||||||
# "embed_tokens": "tok_embeddings",
|
|
||||||
# "lm_head": "output",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -39,7 +38,7 @@ def translate_tensor_name(t: str) -> str:
|
|||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
output_string = (
|
output_string = (
|
||||||
f"layers.{nn}.{HF_SUBLAYER_TO_GGML[sub_layer]}.weight.lora{lora_type}"
|
f"blk.{nn}.{HF_SUBLAYER_TO_GGML[sub_layer]}.weight.lora{lora_type}"
|
||||||
)
|
)
|
||||||
return output_string
|
return output_string
|
||||||
else:
|
else:
|
||||||
@ -54,12 +53,14 @@ def write_file_header(fout: TextIO, params: Dict[str, Any]) -> None:
|
|||||||
# https://opendelta.readthedocs.io/en/latest/modules/deltas.html says that `lora_alpha` is an int
|
# https://opendelta.readthedocs.io/en/latest/modules/deltas.html says that `lora_alpha` is an int
|
||||||
# but some models ship a float value instead
|
# but some models ship a float value instead
|
||||||
# let's convert to int, but fail if lossless conversion is not possible
|
# let's convert to int, but fail if lossless conversion is not possible
|
||||||
assert int(params["lora_alpha"]) == params["lora_alpha"], "cannot convert float to int losslessly"
|
assert (
|
||||||
|
int(params["lora_alpha"]) == params["lora_alpha"]
|
||||||
|
), "cannot convert float to int losslessly"
|
||||||
fout.write(struct.pack("i", int(params["lora_alpha"])))
|
fout.write(struct.pack("i", int(params["lora_alpha"])))
|
||||||
|
|
||||||
|
|
||||||
def write_tensor_header(
|
def write_tensor_header(
|
||||||
self, name: str, shape: Sequence[int], data_type: DataType
|
self, name: str, shape: Sequence[int], data_type: np.dtype
|
||||||
) -> None:
|
) -> None:
|
||||||
sname = name.encode("utf-8")
|
sname = name.encode("utf-8")
|
||||||
fout.write(
|
fout.write(
|
||||||
@ -67,7 +68,7 @@ def write_tensor_header(
|
|||||||
"iii",
|
"iii",
|
||||||
len(shape),
|
len(shape),
|
||||||
len(sname),
|
len(sname),
|
||||||
DATA_TYPE_TO_FTYPE[NUMPY_TYPE_TO_DATA_TYPE[data_type]],
|
NUMPY_TYPE_TO_FTYPE[data_type.name],
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
fout.write(struct.pack("i" * len(shape), *shape[::-1]))
|
fout.write(struct.pack("i" * len(shape), *shape[::-1]))
|
||||||
|
@ -1,13 +0,0 @@
|
|||||||
# Compatibility stub
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
|
|
||||||
import convert
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="""[DEPRECATED - use `convert.py` instead]
|
|
||||||
Convert a LLaMA model checkpoint to a ggml compatible file""")
|
|
||||||
parser.add_argument('dir_model', help='directory containing the model checkpoint')
|
|
||||||
parser.add_argument('ftype', help='file type (0: float32, 1: float16)', type=int, choices=[0, 1], default=1)
|
|
||||||
args = parser.parse_args()
|
|
||||||
convert.main(['--outtype', 'f16' if args.ftype == 1 else 'f32', '--', args.dir_model])
|
|
979
convert.py
Normal file → Executable file
979
convert.py
Normal file → Executable file
File diff suppressed because it is too large
Load Diff
@ -3,7 +3,7 @@
|
|||||||
## Verifying that the model is running on the GPU with cuBLAS
|
## Verifying that the model is running on the GPU with cuBLAS
|
||||||
Make sure you compiled llama with the correct env variables according to [this guide](../README.md#cublas), so that llama accepts the `-ngl N` (or `--n-gpu-layers N`) flag. When running llama, you may configure `N` to be very large, and llama will offload the maximum possible number of layers to the GPU, even if it's less than the number you configured. For example:
|
Make sure you compiled llama with the correct env variables according to [this guide](../README.md#cublas), so that llama accepts the `-ngl N` (or `--n-gpu-layers N`) flag. When running llama, you may configure `N` to be very large, and llama will offload the maximum possible number of layers to the GPU, even if it's less than the number you configured. For example:
|
||||||
```shell
|
```shell
|
||||||
./main -m "path/to/model.bin" -ngl 200000 -p "Please sir, may I have some "
|
./main -m "path/to/model.gguf" -ngl 200000 -p "Please sir, may I have some "
|
||||||
```
|
```
|
||||||
|
|
||||||
When running llama, before it starts the inference work, it will output diagnostic information that shows whether cuBLAS is offloading work to the GPU. Look for these lines:
|
When running llama, before it starts the inference work, it will output diagnostic information that shows whether cuBLAS is offloading work to the GPU. Look for these lines:
|
||||||
@ -25,9 +25,9 @@ GPU: A6000 (48GB VRAM)
|
|||||||
CPU: 7 physical cores
|
CPU: 7 physical cores
|
||||||
RAM: 32GB
|
RAM: 32GB
|
||||||
|
|
||||||
Model: `TheBloke_Wizard-Vicuna-30B-Uncensored-GGML/Wizard-Vicuna-30B-Uncensored.ggmlv3.q4_0.bin` (30B parameters, 4bit quantization, GGML)
|
Model: `TheBloke_Wizard-Vicuna-30B-Uncensored-GGML/Wizard-Vicuna-30B-Uncensored.q4_0.gguf` (30B parameters, 4bit quantization, GGML)
|
||||||
|
|
||||||
Run command: `./main -m "path/to/model.bin" -p "-p "An extremely detailed description of the 10 best ethnic dishes will follow, with recipes: " -n 1000 [additional benchmark flags]`
|
Run command: `./main -m "path/to/model.gguf" -p "An extremely detailed description of the 10 best ethnic dishes will follow, with recipes: " -n 1000 [additional benchmark flags]`
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
|
|
||||||
|
@ -6,27 +6,6 @@ find_package(Threads REQUIRED)
|
|||||||
|
|
||||||
# ...
|
# ...
|
||||||
|
|
||||||
# common
|
|
||||||
|
|
||||||
set(TARGET common)
|
|
||||||
|
|
||||||
add_library(${TARGET} OBJECT
|
|
||||||
common.h
|
|
||||||
common.cpp
|
|
||||||
console.h
|
|
||||||
console.cpp
|
|
||||||
grammar-parser.h
|
|
||||||
grammar-parser.cpp
|
|
||||||
)
|
|
||||||
|
|
||||||
if (BUILD_SHARED_LIBS)
|
|
||||||
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
target_include_directories(${TARGET} PUBLIC .)
|
|
||||||
target_compile_features(${TARGET} PUBLIC cxx_std_11)
|
|
||||||
target_link_libraries(${TARGET} PRIVATE llama)
|
|
||||||
|
|
||||||
# examples
|
# examples
|
||||||
|
|
||||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
|
@ -12,15 +12,19 @@ usage: ./convert-llama2c-to-ggml [options]
|
|||||||
|
|
||||||
options:
|
options:
|
||||||
-h, --help show this help message and exit
|
-h, --help show this help message and exit
|
||||||
--copy-vocab-from-model FNAME model path from which to copy vocab (default 'models/ggml-vocab.bin')
|
--copy-vocab-from-model FNAME model path from which to copy vocab (default 'tokenizer.bin')
|
||||||
--llama2c-model FNAME [REQUIRED] model path from which to load Karpathy's llama2.c model
|
--llama2c-model FNAME [REQUIRED] model path from which to load Karpathy's llama2.c model
|
||||||
--llama2c-output-model FNAME model path to save the converted llama2.c model (default ak_llama_model.bin')
|
--llama2c-output-model FNAME model path to save the converted llama2.c model (default ak_llama_model.bin')
|
||||||
```
|
```
|
||||||
|
|
||||||
An example command is as follows:
|
An example command using a model from [karpathy/tinyllamas](https://huggingface.co/karpathy/tinyllamas) is as follows:
|
||||||
|
|
||||||
`$ ./convert-llama2c-to-ggml --copy-vocab-from-model <ggml-vocab.bin> --llama2c-model <llama2.c model path> --llama2c-output-model <ggml output model path>`
|
`$ ./convert-llama2c-to-ggml --copy-vocab-from-model ../llama2.c/tokenizer.bin --llama2c-model stories42M.bin --llama2c-output-model stories42M.ggmlv3.bin`
|
||||||
|
|
||||||
Now you can use the model with command like:
|
For now the generated model is in the legacy GGJTv3 format, so you need to convert it to gguf manually:
|
||||||
|
|
||||||
`$ ./main -m <ggml output model path> -p "One day, Lily met a Shoggoth" -n 500 -c 256 -eps 1e-5`
|
`$ python ./convert-llama-ggmlv3-to-gguf.py --eps 1e-5 --input stories42M.ggmlv3.bin --output stories42M.gguf.bin`
|
||||||
|
|
||||||
|
Now you can use the model with a command like:
|
||||||
|
|
||||||
|
`$ ./main -m stories42M.gguf.bin -p "One day, Lily met a Shoggoth" -n 500 -c 256`
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
#include "llama.h"
|
#include "llama.h"
|
||||||
|
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
@ -16,6 +17,9 @@
|
|||||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define LLAMA_FILE_MAGIC_GGJT 0x67676a74u // 'ggjt'
|
||||||
|
#define LLAMA_FILE_VERSION_GGJT_V3 3
|
||||||
|
|
||||||
//////////////////////////////////////// llama2.c model structs and functions to load models, alloc memory etc.
|
//////////////////////////////////////// llama2.c model structs and functions to load models, alloc memory etc.
|
||||||
typedef struct {
|
typedef struct {
|
||||||
int dim; // transformer dimension
|
int dim; // transformer dimension
|
||||||
@ -48,10 +52,10 @@ typedef struct {
|
|||||||
// float* freq_cis_real; // (seq_len, dim/2)
|
// float* freq_cis_real; // (seq_len, dim/2)
|
||||||
// float* freq_cis_imag; // (seq_len, dim/2)
|
// float* freq_cis_imag; // (seq_len, dim/2)
|
||||||
// (optional) classifier weights for the logits, on the last layer
|
// (optional) classifier weights for the logits, on the last layer
|
||||||
//float* wcls;
|
float* wcls;
|
||||||
} TransformerWeights;
|
} TransformerWeights;
|
||||||
|
|
||||||
void malloc_weights(TransformerWeights* w, Config* p) {
|
void malloc_weights(TransformerWeights* w, Config* p, bool shared_weights) {
|
||||||
// we calloc instead of malloc to keep valgrind happy
|
// we calloc instead of malloc to keep valgrind happy
|
||||||
w->token_embedding_table = new float[p->vocab_size * p->dim]();
|
w->token_embedding_table = new float[p->vocab_size * p->dim]();
|
||||||
printf("[%s:AK] Allocating [%d] x [%d] = [%d] float space for w->token_embedding_table\n",__func__,p->vocab_size , p->dim, p->vocab_size * p->dim);
|
printf("[%s:AK] Allocating [%d] x [%d] = [%d] float space for w->token_embedding_table\n",__func__,p->vocab_size , p->dim, p->vocab_size * p->dim);
|
||||||
@ -85,9 +89,16 @@ void malloc_weights(TransformerWeights* w, Config* p) {
|
|||||||
|
|
||||||
w->rms_final_weight = new float[p->dim]();
|
w->rms_final_weight = new float[p->dim]();
|
||||||
printf("[%s:AK] Allocating [%d] float space for w->rms_final_weight\n",__func__,p->dim);
|
printf("[%s:AK] Allocating [%d] float space for w->rms_final_weight\n",__func__,p->dim);
|
||||||
|
|
||||||
|
if (shared_weights) {
|
||||||
|
w->wcls = NULL;
|
||||||
|
} else {
|
||||||
|
w->wcls = new float[p->vocab_size * p->dim]();
|
||||||
|
printf("[%s:AK] Allocating [%d] x [%d] = [%d] float space for w->wcls\n",__func__,p->vocab_size , p->dim, p->vocab_size * p->dim);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int checkpoint_init_weights(TransformerWeights *w, Config* p, FILE* f) {
|
int checkpoint_init_weights(TransformerWeights *w, Config* p, FILE* f, bool shared_weights) {
|
||||||
if (fread(w->token_embedding_table, sizeof(float), p->vocab_size * p->dim, f) != static_cast<size_t>(p->vocab_size * p->dim)) return 1;
|
if (fread(w->token_embedding_table, sizeof(float), p->vocab_size * p->dim, f) != static_cast<size_t>(p->vocab_size * p->dim)) return 1;
|
||||||
if (fread(w->rms_att_weight, sizeof(float), p->n_layers * p->dim, f) != static_cast<size_t>(p->n_layers * p->dim)) return 1;
|
if (fread(w->rms_att_weight, sizeof(float), p->n_layers * p->dim, f) != static_cast<size_t>(p->n_layers * p->dim)) return 1;
|
||||||
if (fread(w->wq, sizeof(float), p->n_layers * p->dim * p->dim, f) != static_cast<size_t>(p->n_layers * p->dim * p->dim)) return 1;
|
if (fread(w->wq, sizeof(float), p->n_layers * p->dim * p->dim, f) != static_cast<size_t>(p->n_layers * p->dim * p->dim)) return 1;
|
||||||
@ -99,6 +110,22 @@ int checkpoint_init_weights(TransformerWeights *w, Config* p, FILE* f) {
|
|||||||
if (fread(w->w2, sizeof(float), p->n_layers * p->hidden_dim * p->dim, f) != static_cast<size_t>(p->n_layers * p->hidden_dim * p->dim)) return 1;
|
if (fread(w->w2, sizeof(float), p->n_layers * p->hidden_dim * p->dim, f) != static_cast<size_t>(p->n_layers * p->hidden_dim * p->dim)) return 1;
|
||||||
if (fread(w->w3, sizeof(float), p->n_layers * p->dim * p->hidden_dim, f) != static_cast<size_t>(p->n_layers * p->dim * p->hidden_dim)) return 1;
|
if (fread(w->w3, sizeof(float), p->n_layers * p->dim * p->hidden_dim, f) != static_cast<size_t>(p->n_layers * p->dim * p->hidden_dim)) return 1;
|
||||||
if (fread(w->rms_final_weight, sizeof(float), p->dim, f) != static_cast<size_t>(p->dim)) return 1;
|
if (fread(w->rms_final_weight, sizeof(float), p->dim, f) != static_cast<size_t>(p->dim)) return 1;
|
||||||
|
|
||||||
|
// Skip freq_cis_real & freq_cis_imag
|
||||||
|
int head_size = p->dim / p->n_heads;
|
||||||
|
fseek(f, p->seq_len * head_size * sizeof(float), SEEK_CUR);
|
||||||
|
|
||||||
|
if (!shared_weights && fread(w->wcls, sizeof(float), p->vocab_size * p->dim, f) != static_cast<size_t>(p->vocab_size * p->dim)) return 1;
|
||||||
|
|
||||||
|
// Check we didn't forget to read anything
|
||||||
|
auto curr = ftell(f);
|
||||||
|
fseek(f, 0, SEEK_END);
|
||||||
|
auto end = ftell(f);
|
||||||
|
if (curr != end) {
|
||||||
|
printf("Error: failed to read the checkpoint file to the end (curr = %ld, end = %ld)\n", curr, end);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,6 +141,7 @@ void free_weights(TransformerWeights* w) {
|
|||||||
delete w->w2;
|
delete w->w2;
|
||||||
delete w->w3;
|
delete w->w3;
|
||||||
delete w->rms_final_weight;
|
delete w->rms_final_weight;
|
||||||
|
if (w->wcls) delete w->wcls;
|
||||||
}
|
}
|
||||||
|
|
||||||
void print_sample_weights(TransformerWeights *w){
|
void print_sample_weights(TransformerWeights *w){
|
||||||
@ -130,6 +158,7 @@ void print_sample_weights(TransformerWeights *w){
|
|||||||
printf("%f\n", w->w2[0]);
|
printf("%f\n", w->w2[0]);
|
||||||
printf("%f\n", w->w3[0]);
|
printf("%f\n", w->w3[0]);
|
||||||
printf("%f\n", w->rms_att_weight[0]);
|
printf("%f\n", w->rms_att_weight[0]);
|
||||||
|
if (w->wcls) printf("%f\n", w->wcls[0]);
|
||||||
}
|
}
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
@ -138,14 +167,16 @@ void print_sample_weights(TransformerWeights *w){
|
|||||||
struct llama_vocab {
|
struct llama_vocab {
|
||||||
using id = int32_t;
|
using id = int32_t;
|
||||||
using token = std::string;
|
using token = std::string;
|
||||||
|
using ttype = llama_token_type;
|
||||||
|
|
||||||
struct token_score {
|
struct token_data {
|
||||||
token tok;
|
token text;
|
||||||
float score;
|
float score;
|
||||||
|
ttype type;
|
||||||
};
|
};
|
||||||
|
|
||||||
std::unordered_map<token, id> token_to_id;
|
std::unordered_map<token, id> token_to_id;
|
||||||
std::vector<token_score> id_to_token;
|
std::vector<token_data> id_to_token;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct my_llama_hparams {
|
struct my_llama_hparams {
|
||||||
@ -502,49 +533,51 @@ bool is_ggml_file(const char *filename) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
uint32_t magic = file.read_u32();
|
uint32_t magic = file.read_u32();
|
||||||
return magic == LLAMA_FILE_MAGIC;
|
return magic == GGUF_MAGIC;
|
||||||
}
|
}
|
||||||
|
|
||||||
void load_vocab(const char *filename, Config *config, struct llama_vocab *vocab) {
|
void load_vocab(const char *filename, Config *config, struct llama_vocab *vocab) {
|
||||||
// heuristic to infer whether vocab is from ggml or from llama2.c vocabulary
|
#pragma message("TODO: implement reading vocabulary using gguf")
|
||||||
if (is_ggml_file(filename)) {
|
// // heuristic to infer whether vocab is from ggml or from llama2.c vocabulary
|
||||||
|
// if (is_ggml_file(filename)) {
|
||||||
struct llama_context_params llama_params = llama_context_default_params();
|
//
|
||||||
llama_params.vocab_only = true;
|
// struct llama_context_params llama_params = llama_context_default_params();
|
||||||
|
// llama_params.vocab_only = true;
|
||||||
struct llama_model * lmodel = llama_load_model_from_file(filename, llama_params);
|
//
|
||||||
struct llama_context * lctx = llama_new_context_with_model(lmodel, llama_params);
|
// struct llama_model * lmodel = llama_load_model_from_file(filename, llama_params);
|
||||||
|
// struct llama_context * lctx = llama_new_context_with_model(lmodel, llama_params);
|
||||||
std::vector<const char *> strings;
|
//
|
||||||
std::vector<float> scores;
|
// const int n_vocab = llama_n_vocab(lctx);
|
||||||
int n_vocab = llama_n_vocab(lctx);
|
// vocab->id_to_token.resize(n_vocab);
|
||||||
strings.resize(n_vocab, NULL);
|
// for (int i=0; i<n_vocab; ++i) {
|
||||||
scores.resize(n_vocab, 0);
|
// vocab->id_to_token[i].text = llama_token_get_text(lctx, i);
|
||||||
n_vocab = llama_get_vocab(lctx, strings.data(), scores.data(), n_vocab);
|
// vocab->id_to_token[i].score = llama_token_get_score(lctx, i);
|
||||||
GGML_ASSERT(n_vocab == llama_n_vocab(lctx));
|
// vocab->id_to_token[i].type = llama_token_get_type(lctx, i);
|
||||||
vocab->id_to_token.resize(n_vocab);
|
// vocab->token_to_id.emplace(vocab->id_to_token[i].text, i);
|
||||||
for (int i=0; i<n_vocab; ++i) {
|
// }
|
||||||
std::string tok = std::string(strings[i]);
|
// llama_free(lctx);
|
||||||
float score = scores[i];
|
// llama_free_model(lmodel);
|
||||||
vocab->id_to_token[i].tok = tok;
|
// } else
|
||||||
vocab->id_to_token[i].score = score;
|
{ // assume llama2.c vocabulary
|
||||||
vocab->token_to_id.emplace(tok, i);
|
|
||||||
}
|
|
||||||
llama_free(lctx);
|
|
||||||
llama_free_model(lmodel);
|
|
||||||
} else { // assume llama2.c vocabulary
|
|
||||||
printf("Assuming llama2.c vocabulary since %s is not a ggml file\n", filename);
|
printf("Assuming llama2.c vocabulary since %s is not a ggml file\n", filename);
|
||||||
llama_file file(filename, "rb");
|
llama_file file(filename, "rb");
|
||||||
uint32_t n_vocab = config->vocab_size;
|
const int n_vocab = config->vocab_size;
|
||||||
/* uint32_t max_token_length = */ file.read_u32(); // unused
|
/* uint32_t max_token_length = */ file.read_u32(); // unused
|
||||||
vocab->id_to_token.resize(n_vocab);
|
vocab->id_to_token.resize(n_vocab);
|
||||||
for (uint32_t i=0; i<n_vocab; ++i) {
|
for (int i=0; i<n_vocab; ++i) {
|
||||||
float_t score = file.read_f32();
|
float_t score = file.read_f32();
|
||||||
uint32_t len = file.read_u32();
|
uint32_t len = file.read_u32();
|
||||||
std::string tok = file.read_string(len);
|
std::string text = file.read_string(len);
|
||||||
vocab->id_to_token[i].tok = tok;
|
// Special-case handling of <0xXX> single byte tokens.
|
||||||
|
char byte_val;
|
||||||
|
if (sscanf(text.c_str(), "<0x%02hhX>", &byte_val) == 1) {
|
||||||
|
char cstr[2] = { byte_val, 0 };
|
||||||
|
text = cstr;
|
||||||
|
}
|
||||||
|
vocab->id_to_token[i].text = text;
|
||||||
vocab->id_to_token[i].score = score;
|
vocab->id_to_token[i].score = score;
|
||||||
vocab->token_to_id.emplace(tok, i);
|
vocab->id_to_token[i].type = LLAMA_TOKEN_TYPE_UNDEFINED;
|
||||||
|
vocab->token_to_id.emplace(text, i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -590,9 +623,11 @@ void save_as_llama_model(struct llama_vocab * vocab, struct my_llama_model * mod
|
|||||||
if (file.fp == NULL) {
|
if (file.fp == NULL) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#pragma message("TODO: implement file saving using gguf")
|
||||||
// write_magic
|
// write_magic
|
||||||
file.write_u32(LLAMA_FILE_MAGIC); // magic
|
file.write_u32(LLAMA_FILE_MAGIC_GGJT); // magic
|
||||||
file.write_u32(LLAMA_FILE_VERSION); // version
|
file.write_u32(LLAMA_FILE_VERSION_GGJT_V3); // version
|
||||||
// write_hparams
|
// write_hparams
|
||||||
file.write_u32(model->hparams.n_vocab);
|
file.write_u32(model->hparams.n_vocab);
|
||||||
file.write_u32(model->hparams.n_embd);
|
file.write_u32(model->hparams.n_embd);
|
||||||
@ -605,17 +640,17 @@ void save_as_llama_model(struct llama_vocab * vocab, struct my_llama_model * mod
|
|||||||
// write_vocab - for now we are just writing the existing BPE voc. assuming karpathy's vocabulary is the same. idk.
|
// write_vocab - for now we are just writing the existing BPE voc. assuming karpathy's vocabulary is the same. idk.
|
||||||
uint32_t n_vocab = model->hparams.n_vocab;
|
uint32_t n_vocab = model->hparams.n_vocab;
|
||||||
for (uint32_t i = 0; i < n_vocab; i++) {
|
for (uint32_t i = 0; i < n_vocab; i++) {
|
||||||
const auto & token_score = vocab->id_to_token.at(i);
|
const auto & token_data = vocab->id_to_token.at(i);
|
||||||
file.write_u32((uint32_t) token_score.tok.size());
|
file.write_u32((uint32_t) token_data.text.size());
|
||||||
file.write_raw(token_score.tok.data(), token_score.tok.size());
|
file.write_raw(token_data.text.data(), token_data.text.size());
|
||||||
file.write_raw(&token_score.score, sizeof(token_score.score));
|
file.write_raw(&token_data.score, sizeof(token_data.score));
|
||||||
}
|
}
|
||||||
|
|
||||||
// stuff AK weights into GG weights one by one.
|
// stuff AK weights into GG weights one by one.
|
||||||
// w->token_embedding_table -> model->tok_embeddings
|
// w->token_embedding_table -> model->tok_embeddings
|
||||||
// float* -> struct ggml_tensor
|
// float* -> struct ggml_tensor
|
||||||
stuff_karpathy_weights_into_gg(model->tok_embeddings, w->token_embedding_table);
|
stuff_karpathy_weights_into_gg(model->tok_embeddings, w->token_embedding_table);
|
||||||
stuff_karpathy_weights_into_gg(model->output, w->token_embedding_table);
|
stuff_karpathy_weights_into_gg(model->output, w->wcls ? w->wcls : w->token_embedding_table);
|
||||||
|
|
||||||
stuff_karpathy_weights_into_gg(model->norm, w->rms_final_weight);
|
stuff_karpathy_weights_into_gg(model->norm, w->rms_final_weight);
|
||||||
//print_row(model->norm, 0);
|
//print_row(model->norm, 0);
|
||||||
@ -663,7 +698,7 @@ void save_as_llama_model(struct llama_vocab * vocab, struct my_llama_model * mod
|
|||||||
|
|
||||||
struct train_params get_default_train_params() {
|
struct train_params get_default_train_params() {
|
||||||
struct train_params params;
|
struct train_params params;
|
||||||
params.fn_vocab_model = "models/ggml-vocab.bin";
|
params.fn_vocab_model = "tokenizer.bin";
|
||||||
params.fn_llama2c_output_model = "ak_llama_model.bin";
|
params.fn_llama2c_output_model = "ak_llama_model.bin";
|
||||||
params.fn_train_data = "shakespeare.txt";
|
params.fn_train_data = "shakespeare.txt";
|
||||||
params.fn_checkpoint_in = "checkpoint.bin";
|
params.fn_checkpoint_in = "checkpoint.bin";
|
||||||
@ -716,7 +751,7 @@ void print_usage(int /*argc*/, char ** argv, const struct train_params * params)
|
|||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "options:\n");
|
fprintf(stderr, "options:\n");
|
||||||
fprintf(stderr, " -h, --help show this help message and exit\n");
|
fprintf(stderr, " -h, --help show this help message and exit\n");
|
||||||
fprintf(stderr, " --copy-vocab-from-model FNAME llama2.c vocabulary or ggml model path from which to copy vocab (default '%s')\n", params->fn_vocab_model);
|
fprintf(stderr, " --copy-vocab-from-model FNAME llama2.c vocabulary or ggmlv3 model path from which to copy vocab (default '%s')\n", params->fn_vocab_model);
|
||||||
fprintf(stderr, " --llama2c-model FNAME [REQUIRED] model path from which to load Karpathy's llama2.c model\n");
|
fprintf(stderr, " --llama2c-model FNAME [REQUIRED] model path from which to load Karpathy's llama2.c model\n");
|
||||||
fprintf(stderr, " --llama2c-output-model FNAME model path to save the converted llama2.c model (default %s')\n", params->fn_llama2c_output_model);
|
fprintf(stderr, " --llama2c-output-model FNAME model path to save the converted llama2.c model (default %s')\n", params->fn_llama2c_output_model);
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
@ -789,9 +824,12 @@ int main(int argc, char ** argv) {
|
|||||||
if (!file) { printf("Unable to open the checkpoint file %s!\n", params.fn_llama2c_model); return 1; }
|
if (!file) { printf("Unable to open the checkpoint file %s!\n", params.fn_llama2c_model); return 1; }
|
||||||
// read in the config header
|
// read in the config header
|
||||||
if(fread(&config, sizeof(Config), 1, file) != 1) { return 1; }
|
if(fread(&config, sizeof(Config), 1, file) != 1) { return 1; }
|
||||||
|
auto shared_weights = config.vocab_size > 0;
|
||||||
|
config.vocab_size = abs(config.vocab_size);
|
||||||
|
|
||||||
// read in the Transformer weights
|
// read in the Transformer weights
|
||||||
malloc_weights(&weights, &config);
|
malloc_weights(&weights, &config, shared_weights);
|
||||||
if(checkpoint_init_weights(&weights, &config, file)) { return 1; }
|
if(checkpoint_init_weights(&weights, &config, file, shared_weights)) { return 1; }
|
||||||
fclose(file);
|
fclose(file);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -167,7 +167,7 @@ llama_token sampling_id(struct MyModel* mymodel) {
|
|||||||
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
|
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
|
||||||
|
|
||||||
// TODO: Apply penalties
|
// TODO: Apply penalties
|
||||||
// float nl_logit = logits[llama_token_nl()];
|
// float nl_logit = logits[llama_token_nl(ctx)];
|
||||||
// auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx);
|
// auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx);
|
||||||
// llama_sample_repetition_penalty(ctx, &candidates_p,
|
// llama_sample_repetition_penalty(ctx, &candidates_p,
|
||||||
// last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
// last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
||||||
@ -176,7 +176,7 @@ llama_token sampling_id(struct MyModel* mymodel) {
|
|||||||
// last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
// last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
||||||
// last_n_repeat, alpha_frequency, alpha_presence);
|
// last_n_repeat, alpha_frequency, alpha_presence);
|
||||||
// if (!penalize_nl) {
|
// if (!penalize_nl) {
|
||||||
// logits[llama_token_nl()] = nl_logit;
|
// logits[llama_token_nl(ctx)] = nl_logit;
|
||||||
// }
|
// }
|
||||||
|
|
||||||
if (temp <= 0) {
|
if (temp <= 0) {
|
||||||
@ -211,7 +211,7 @@ const char * sampling(struct MyModel * mymodel) {
|
|||||||
llama_context * ctx = mymodel->ctx;
|
llama_context * ctx = mymodel->ctx;
|
||||||
int id = sampling_id(mymodel);
|
int id = sampling_id(mymodel);
|
||||||
static std::string ret;
|
static std::string ret;
|
||||||
if (id == llama_token_eos()) {
|
if (id == llama_token_eos(ctx)) {
|
||||||
ret = "</s>";
|
ret = "</s>";
|
||||||
} else {
|
} else {
|
||||||
ret = llama_token_to_str(ctx, id);
|
ret = llama_token_to_str(ctx, id);
|
||||||
|
1
examples/embd-input/embd_input.py
Normal file → Executable file
1
examples/embd-input/embd_input.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
import ctypes
|
import ctypes
|
||||||
from ctypes import cdll, c_char_p, c_void_p, POINTER, c_float, c_int
|
from ctypes import cdll, c_char_p, c_void_p, POINTER, c_float, c_int
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
1
examples/embd-input/llava.py
Normal file → Executable file
1
examples/embd-input/llava.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
sys.path.insert(0, os.path.dirname(__file__))
|
sys.path.insert(0, os.path.dirname(__file__))
|
||||||
|
1
examples/embd-input/minigpt4.py
Normal file → Executable file
1
examples/embd-input/minigpt4.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
sys.path.insert(0, os.path.dirname(__file__))
|
sys.path.insert(0, os.path.dirname(__file__))
|
||||||
|
1
examples/embd-input/panda_gpt.py
Normal file → Executable file
1
examples/embd-input/panda_gpt.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
sys.path.insert(0, os.path.dirname(__file__))
|
sys.path.insert(0, os.path.dirname(__file__))
|
||||||
|
@ -67,17 +67,25 @@ int main(int argc, char ** argv) {
|
|||||||
fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
|
fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
|
||||||
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
||||||
for (int i = 0; i < (int) embd_inp.size(); i++) {
|
for (int i = 0; i < (int) embd_inp.size(); i++) {
|
||||||
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i]));
|
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i]).c_str());
|
||||||
}
|
}
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.embedding){
|
if (embd_inp.size() > (size_t)params.n_ctx) {
|
||||||
if (embd_inp.size() > 0) {
|
fprintf(stderr, "%s: error: prompt is longer than the context window (%zu tokens, n_ctx = %d)\n",
|
||||||
if (llama_eval(ctx, embd_inp.data(), embd_inp.size(), n_past, params.n_threads)) {
|
__func__, embd_inp.size(), params.n_ctx);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (!embd_inp.empty()) {
|
||||||
|
int n_tokens = std::min(params.n_batch, (int) embd_inp.size());
|
||||||
|
if (llama_eval(ctx, embd_inp.data(), n_tokens, n_past, params.n_threads)) {
|
||||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
n_past += n_tokens;
|
||||||
|
embd_inp.erase(embd_inp.begin(), embd_inp.begin() + n_tokens);
|
||||||
}
|
}
|
||||||
|
|
||||||
const int n_embd = llama_n_embd(ctx);
|
const int n_embd = llama_n_embd(ctx);
|
||||||
@ -87,7 +95,6 @@ int main(int argc, char ** argv) {
|
|||||||
printf("%f ", embeddings[i]);
|
printf("%f ", embeddings[i]);
|
||||||
}
|
}
|
||||||
printf("\n");
|
printf("\n");
|
||||||
}
|
|
||||||
|
|
||||||
llama_print_timings(ctx);
|
llama_print_timings(ctx);
|
||||||
llama_free(ctx);
|
llama_free(ctx);
|
||||||
|
246
examples/gguf/gguf.cpp
Normal file
246
examples/gguf/gguf.cpp
Normal file
@ -0,0 +1,246 @@
|
|||||||
|
#include "ggml.h"
|
||||||
|
#include "llama.h"
|
||||||
|
|
||||||
|
#include <cstdio>
|
||||||
|
#include <cinttypes>
|
||||||
|
#include <string>
|
||||||
|
#include <sstream>
|
||||||
|
#include <fstream>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#undef MIN
|
||||||
|
#undef MAX
|
||||||
|
#define MIN(a, b) ((a) < (b) ? (a) : (b))
|
||||||
|
#define MAX(a, b) ((a) > (b) ? (a) : (b))
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
static std::string to_string(const T & val) {
|
||||||
|
std::stringstream ss;
|
||||||
|
ss << val;
|
||||||
|
return ss.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool gguf_ex_write(const std::string & fname) {
|
||||||
|
struct gguf_context * ctx = gguf_init_empty();
|
||||||
|
|
||||||
|
gguf_set_val_u8 (ctx, "some.parameter.uint8", 0x12);
|
||||||
|
gguf_set_val_i8 (ctx, "some.parameter.int8", -0x13);
|
||||||
|
gguf_set_val_u16 (ctx, "some.parameter.uint16", 0x1234);
|
||||||
|
gguf_set_val_i16 (ctx, "some.parameter.int16", -0x1235);
|
||||||
|
gguf_set_val_u32 (ctx, "some.parameter.uint32", 0x12345678);
|
||||||
|
gguf_set_val_i32 (ctx, "some.parameter.int32", -0x12345679);
|
||||||
|
gguf_set_val_f32 (ctx, "some.parameter.float32", 0.123456789f);
|
||||||
|
gguf_set_val_bool(ctx, "some.parameter.bool", true);
|
||||||
|
gguf_set_val_str (ctx, "some.parameter.string", "hello world");
|
||||||
|
|
||||||
|
gguf_set_arr_data(ctx, "some.parameter.arr.i16", GGUF_TYPE_INT16, std::vector<int16_t>{ 1, 2, 3, 4, }.data(), 4);
|
||||||
|
gguf_set_arr_data(ctx, "some.parameter.arr.f32", GGUF_TYPE_FLOAT32, std::vector<float>{ 3.145f, 2.718f, 1.414f, }.data(), 3);
|
||||||
|
gguf_set_arr_str (ctx, "some.parameter.arr.str", std::vector<const char *>{ "hello", "world", "!" }.data(), 3);
|
||||||
|
|
||||||
|
struct ggml_init_params params = {
|
||||||
|
/*.mem_size =*/ 128ull*1024ull*1024ull,
|
||||||
|
/*.mem_buffer =*/ NULL,
|
||||||
|
/*.no_alloc =*/ false,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ggml_context * ctx_data = ggml_init(params);
|
||||||
|
|
||||||
|
const int n_tensors = 10;
|
||||||
|
|
||||||
|
// tensor infos
|
||||||
|
for (int i = 0; i < n_tensors; ++i) {
|
||||||
|
const std::string name = "tensor_" + to_string(i);
|
||||||
|
|
||||||
|
int64_t ne[GGML_MAX_DIMS] = { 1 };
|
||||||
|
int32_t n_dims = rand() % GGML_MAX_DIMS + 1;
|
||||||
|
|
||||||
|
for (int j = 0; j < n_dims; ++j) {
|
||||||
|
ne[j] = rand() % 10 + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * cur = ggml_new_tensor(ctx_data, GGML_TYPE_F32, n_dims, ne);
|
||||||
|
ggml_set_name(cur, name.c_str());
|
||||||
|
|
||||||
|
{
|
||||||
|
float * data = (float *) cur->data;
|
||||||
|
for (int j = 0; j < ggml_nelements(cur); ++j) {
|
||||||
|
data[j] = 100 + i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
gguf_add_tensor(ctx, cur);
|
||||||
|
}
|
||||||
|
|
||||||
|
gguf_write_to_file(ctx, fname.c_str(), false);
|
||||||
|
|
||||||
|
fprintf(stdout, "%s: wrote file '%s;\n", __func__, fname.c_str());
|
||||||
|
|
||||||
|
ggml_free(ctx_data);
|
||||||
|
gguf_free(ctx);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// just read tensor info
|
||||||
|
bool gguf_ex_read_0(const std::string & fname) {
|
||||||
|
struct gguf_init_params params = {
|
||||||
|
/*.no_alloc = */ false,
|
||||||
|
/*.ctx = */ NULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct gguf_context * ctx = gguf_init_from_file(fname.c_str(), params);
|
||||||
|
|
||||||
|
fprintf(stdout, "%s: version: %d\n", __func__, gguf_get_version(ctx));
|
||||||
|
fprintf(stdout, "%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx));
|
||||||
|
fprintf(stdout, "%s: data offset: %zu\n", __func__, gguf_get_data_offset(ctx));
|
||||||
|
|
||||||
|
// kv
|
||||||
|
{
|
||||||
|
const int n_kv = gguf_get_n_kv(ctx);
|
||||||
|
|
||||||
|
fprintf(stdout, "%s: n_kv: %d\n", __func__, n_kv);
|
||||||
|
|
||||||
|
for (int i = 0; i < n_kv; ++i) {
|
||||||
|
const char * key = gguf_get_key(ctx, i);
|
||||||
|
|
||||||
|
fprintf(stdout, "%s: kv[%d]: key = %s\n", __func__, i, key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// find kv string
|
||||||
|
{
|
||||||
|
const char * findkey = "some.parameter.string";
|
||||||
|
|
||||||
|
const int keyidx = gguf_find_key(ctx, findkey);
|
||||||
|
if (keyidx == -1) {
|
||||||
|
fprintf(stdout, "%s: find key: %s not found.\n", __func__, findkey);
|
||||||
|
} else {
|
||||||
|
const char * key_value = gguf_get_val_str(ctx, keyidx);
|
||||||
|
fprintf(stdout, "%s: find key: %s found, kv[%d] value = %s\n", __func__, findkey, keyidx, key_value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// tensor info
|
||||||
|
{
|
||||||
|
const int n_tensors = gguf_get_n_tensors(ctx);
|
||||||
|
|
||||||
|
fprintf(stdout, "%s: n_tensors: %d\n", __func__, n_tensors);
|
||||||
|
|
||||||
|
for (int i = 0; i < n_tensors; ++i) {
|
||||||
|
const char * name = gguf_get_tensor_name (ctx, i);
|
||||||
|
const size_t offset = gguf_get_tensor_offset(ctx, i);
|
||||||
|
|
||||||
|
fprintf(stdout, "%s: tensor[%d]: name = %s, offset = %zu\n", __func__, i, name, offset);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
gguf_free(ctx);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// read and create ggml_context containing the tensors and their data
|
||||||
|
bool gguf_ex_read_1(const std::string & fname) {
|
||||||
|
struct ggml_context * ctx_data = NULL;
|
||||||
|
|
||||||
|
struct gguf_init_params params = {
|
||||||
|
/*.no_alloc = */ false,
|
||||||
|
/*.ctx = */ &ctx_data,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct gguf_context * ctx = gguf_init_from_file(fname.c_str(), params);
|
||||||
|
|
||||||
|
fprintf(stdout, "%s: version: %d\n", __func__, gguf_get_version(ctx));
|
||||||
|
fprintf(stdout, "%s: alignment: %zu\n", __func__, gguf_get_alignment(ctx));
|
||||||
|
fprintf(stdout, "%s: data offset: %zu\n", __func__, gguf_get_data_offset(ctx));
|
||||||
|
|
||||||
|
// kv
|
||||||
|
{
|
||||||
|
const int n_kv = gguf_get_n_kv(ctx);
|
||||||
|
|
||||||
|
fprintf(stdout, "%s: n_kv: %d\n", __func__, n_kv);
|
||||||
|
|
||||||
|
for (int i = 0; i < n_kv; ++i) {
|
||||||
|
const char * key = gguf_get_key(ctx, i);
|
||||||
|
|
||||||
|
fprintf(stdout, "%s: kv[%d]: key = %s\n", __func__, i, key);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// tensor info
|
||||||
|
{
|
||||||
|
const int n_tensors = gguf_get_n_tensors(ctx);
|
||||||
|
|
||||||
|
fprintf(stdout, "%s: n_tensors: %d\n", __func__, n_tensors);
|
||||||
|
|
||||||
|
for (int i = 0; i < n_tensors; ++i) {
|
||||||
|
const char * name = gguf_get_tensor_name (ctx, i);
|
||||||
|
const size_t offset = gguf_get_tensor_offset(ctx, i);
|
||||||
|
|
||||||
|
fprintf(stdout, "%s: tensor[%d]: name = %s, offset = %zu\n", __func__, i, name, offset);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// data
|
||||||
|
{
|
||||||
|
const int n_tensors = gguf_get_n_tensors(ctx);
|
||||||
|
|
||||||
|
for (int i = 0; i < n_tensors; ++i) {
|
||||||
|
fprintf(stdout, "%s: reading tensor %d data\n", __func__, i);
|
||||||
|
|
||||||
|
const char * name = gguf_get_tensor_name(ctx, i);
|
||||||
|
|
||||||
|
struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name);
|
||||||
|
|
||||||
|
fprintf(stdout, "%s: tensor[%d]: n_dims = %d, name = %s, data = %p\n", __func__, i, cur->n_dims, cur->name, cur->data);
|
||||||
|
|
||||||
|
// print first 10 elements
|
||||||
|
const float * data = (const float *) cur->data;
|
||||||
|
|
||||||
|
printf("%s data[:10] : ", name);
|
||||||
|
for (int j = 0; j < MIN(10, ggml_nelements(cur)); ++j) {
|
||||||
|
printf("%f ", data[j]);
|
||||||
|
}
|
||||||
|
printf("\n\n");
|
||||||
|
|
||||||
|
// check data
|
||||||
|
{
|
||||||
|
const float * data = (const float *) cur->data;
|
||||||
|
for (int j = 0; j < ggml_nelements(cur); ++j) {
|
||||||
|
if (data[j] != 100 + i) {
|
||||||
|
fprintf(stderr, "%s: tensor[%d]: data[%d] = %f\n", __func__, i, j, data[j]);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fprintf(stdout, "%s: ctx_data size: %zu\n", __func__, ggml_get_mem_size(ctx_data));
|
||||||
|
|
||||||
|
ggml_free(ctx_data);
|
||||||
|
gguf_free(ctx);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char ** argv) {
|
||||||
|
if (argc < 3) {
|
||||||
|
fprintf(stdout, "usage: %s data.gguf r|w\n", argv[0]);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::string fname(argv[1]);
|
||||||
|
const std::string mode (argv[2]);
|
||||||
|
|
||||||
|
GGML_ASSERT((mode == "r" || mode == "w") && "mode must be r or w");
|
||||||
|
|
||||||
|
if (mode == "w") {
|
||||||
|
GGML_ASSERT(gguf_ex_write(fname) && "failed to write gguf file");
|
||||||
|
} else if (mode == "r") {
|
||||||
|
GGML_ASSERT(gguf_ex_read_0(fname) && "failed to read gguf file");
|
||||||
|
GGML_ASSERT(gguf_ex_read_1(fname) && "failed to read gguf file");
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
1133
examples/gptneox-wip/cmpnct_gpt2bpe.hpp
Normal file
1133
examples/gptneox-wip/cmpnct_gpt2bpe.hpp
Normal file
File diff suppressed because it is too large
Load Diff
1111
examples/gptneox-wip/falcon-main.cpp
Normal file
1111
examples/gptneox-wip/falcon-main.cpp
Normal file
File diff suppressed because it is too large
Load Diff
1082
examples/gptneox-wip/gptneox-main.cpp
Normal file
1082
examples/gptneox-wip/gptneox-main.cpp
Normal file
File diff suppressed because it is too large
Load Diff
1
examples/jeopardy/graph.py
Normal file → Executable file
1
examples/jeopardy/graph.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
import os
|
import os
|
||||||
import csv
|
import csv
|
||||||
|
0
examples/jeopardy/jeopardy.sh
Normal file → Executable file
0
examples/jeopardy/jeopardy.sh
Normal file → Executable file
1
examples/json-schema-to-grammar.py
Normal file → Executable file
1
examples/json-schema-to-grammar.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
import argparse
|
import argparse
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
@ -148,7 +148,7 @@ struct cmd_params {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static const cmd_params cmd_params_defaults = {
|
static const cmd_params cmd_params_defaults = {
|
||||||
/* model */ {"models/7B/ggml-model-q4_0.bin"},
|
/* model */ {"models/7B/ggml-model-q4_0.gguf"},
|
||||||
/* n_prompt */ {512},
|
/* n_prompt */ {512},
|
||||||
/* n_gen */ {128},
|
/* n_gen */ {128},
|
||||||
/* n_batch */ {512},
|
/* n_batch */ {512},
|
||||||
@ -179,12 +179,12 @@ static void print_usage(int /* argc */, char ** argv) {
|
|||||||
fprintf(stdout, " -mg i, --main-gpu <n> (default: %s)\n", join(cmd_params_defaults.main_gpu, ",").c_str());
|
fprintf(stdout, " -mg i, --main-gpu <n> (default: %s)\n", join(cmd_params_defaults.main_gpu, ",").c_str());
|
||||||
fprintf(stdout, " -lv, --low-vram <0|1> (default: %s)\n", join(cmd_params_defaults.low_vram, ",").c_str());
|
fprintf(stdout, " -lv, --low-vram <0|1> (default: %s)\n", join(cmd_params_defaults.low_vram, ",").c_str());
|
||||||
fprintf(stdout, " -mmq, --mul-mat-q <0|1> (default: %s)\n", join(cmd_params_defaults.mul_mat_q, ",").c_str());
|
fprintf(stdout, " -mmq, --mul-mat-q <0|1> (default: %s)\n", join(cmd_params_defaults.mul_mat_q, ",").c_str());
|
||||||
fprintf(stdout, " -ts, --tensor_split <ts> \n");
|
fprintf(stdout, " -ts, --tensor_split <ts0/ts1/..> \n");
|
||||||
fprintf(stdout, " -r, --repetitions <n> (default: %d)\n", cmd_params_defaults.reps);
|
fprintf(stdout, " -r, --repetitions <n> (default: %d)\n", cmd_params_defaults.reps);
|
||||||
fprintf(stdout, " -o, --output <csv|json|md|sql> (default: %s)\n", cmd_params_defaults.output_format == CSV ? "csv" : cmd_params_defaults.output_format == JSON ? "json" : "md");
|
fprintf(stdout, " -o, --output <csv|json|md|sql> (default: %s)\n", cmd_params_defaults.output_format == CSV ? "csv" : cmd_params_defaults.output_format == JSON ? "json" : cmd_params_defaults.output_format == MARKDOWN ? "md" : "sql");
|
||||||
fprintf(stdout, " -v, --verbose (default: %s)\n", cmd_params_defaults.verbose ? "1" : "0");
|
fprintf(stdout, " -v, --verbose (default: %s)\n", cmd_params_defaults.verbose ? "1" : "0");
|
||||||
fprintf(stdout, "\n");
|
fprintf(stdout, "\n");
|
||||||
fprintf(stdout, "Multiple values can be given for each parameter by separating them with ',' or by repeating the parameter.\n");
|
fprintf(stdout, "Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter multiple times.\n");
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -606,6 +606,8 @@ const std::string test::cpu_info = get_cpu_info();
|
|||||||
const std::string test::gpu_info = get_gpu_info();
|
const std::string test::gpu_info = get_gpu_info();
|
||||||
|
|
||||||
struct printer {
|
struct printer {
|
||||||
|
virtual ~printer() {}
|
||||||
|
|
||||||
FILE * fout;
|
FILE * fout;
|
||||||
virtual void print_header(const cmd_params & params) { (void) params; };
|
virtual void print_header(const cmd_params & params) { (void) params; };
|
||||||
virtual void print_test(const test & t) = 0;
|
virtual void print_test(const test & t) = 0;
|
||||||
@ -726,7 +728,7 @@ struct markdown_printer : public printer {
|
|||||||
if (!is_cpu_backend) {
|
if (!is_cpu_backend) {
|
||||||
fields.push_back("n_gpu_layers");
|
fields.push_back("n_gpu_layers");
|
||||||
}
|
}
|
||||||
if (params.n_batch.size() > 1 || params.n_threads != cmd_params_defaults.n_threads || is_cpu_backend) {
|
if (params.n_threads.size() > 1 || params.n_threads != cmd_params_defaults.n_threads || is_cpu_backend) {
|
||||||
fields.push_back("n_threads");
|
fields.push_back("n_threads");
|
||||||
}
|
}
|
||||||
if (params.n_batch.size() > 1 || params.n_batch != cmd_params_defaults.n_batch) {
|
if (params.n_batch.size() > 1 || params.n_batch != cmd_params_defaults.n_batch) {
|
||||||
@ -849,7 +851,7 @@ struct sql_printer : public printer {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_batch, int n_threads) {
|
static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_batch, int n_threads) {
|
||||||
std::vector<llama_token> tokens(n_batch, llama_token_bos());
|
std::vector<llama_token> tokens(n_batch, llama_token_bos(ctx));
|
||||||
int n_processed = 0;
|
int n_processed = 0;
|
||||||
while (n_processed < n_prompt) {
|
while (n_processed < n_prompt) {
|
||||||
int n_tokens = std::min(n_prompt - n_processed, n_batch);
|
int n_tokens = std::min(n_prompt - n_processed, n_batch);
|
||||||
@ -859,7 +861,7 @@ static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_bat
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void test_gen(llama_context * ctx, int n_gen, int n_past, int n_threads) {
|
static void test_gen(llama_context * ctx, int n_gen, int n_past, int n_threads) {
|
||||||
llama_token token = llama_token_bos();
|
llama_token token = llama_token_bos(ctx);
|
||||||
for (int i = 0; i < n_gen; i++) {
|
for (int i = 0; i < n_gen; i++) {
|
||||||
llama_eval(ctx, &token, 1, n_past + i, n_threads);
|
llama_eval(ctx, &token, 1, n_past + i, n_threads);
|
||||||
}
|
}
|
||||||
|
@ -288,6 +288,10 @@ These options help improve the performance and memory usage of the LLaMA models.
|
|||||||
|
|
||||||
- `--prompt-cache FNAME`: Specify a file to cache the model state after the initial prompt. This can significantly speed up the startup time when you're using longer prompts. The file is created during the first run and is reused and updated in subsequent runs. **Note**: Restoring a cached prompt does not imply restoring the exact state of the session at the point it was saved. So even when specifying a specific seed, you are not guaranteed to get the same sequence of tokens as the original generation.
|
- `--prompt-cache FNAME`: Specify a file to cache the model state after the initial prompt. This can significantly speed up the startup time when you're using longer prompts. The file is created during the first run and is reused and updated in subsequent runs. **Note**: Restoring a cached prompt does not imply restoring the exact state of the session at the point it was saved. So even when specifying a specific seed, you are not guaranteed to get the same sequence of tokens as the original generation.
|
||||||
|
|
||||||
|
### Grammars
|
||||||
|
|
||||||
|
- `--grammar GRAMMAR`, `--grammar-file FILE`: Specify a grammar (defined inline or in a file) to constrain model output to a specific format. For example, you could force the model to output JSON or to speak only in emojis. See the [GBNF guide](../../grammars/README.md) for details on the syntax.
|
||||||
|
|
||||||
### Quantization
|
### Quantization
|
||||||
|
|
||||||
For information about 4-bit quantization, which can significantly improve performance and reduce memory usage, please refer to llama.cpp's primary [README](../../README.md#prepare-data--run).
|
For information about 4-bit quantization, which can significantly improve performance and reduce memory usage, please refer to llama.cpp's primary [README](../../README.md#prepare-data--run).
|
||||||
|
@ -143,7 +143,7 @@ int main(int argc, char ** argv) {
|
|||||||
{
|
{
|
||||||
fprintf(stderr, "%s: testing memory usage for n_batch = %d, n_ctx = %d\n", __func__, params.n_batch, params.n_ctx);
|
fprintf(stderr, "%s: testing memory usage for n_batch = %d, n_ctx = %d\n", __func__, params.n_batch, params.n_ctx);
|
||||||
|
|
||||||
const std::vector<llama_token> tmp(params.n_batch, llama_token_bos());
|
const std::vector<llama_token> tmp(params.n_batch, llama_token_bos(ctx));
|
||||||
llama_eval(ctx, tmp.data(), tmp.size(), params.n_ctx, params.n_threads);
|
llama_eval(ctx, tmp.data(), tmp.size(), params.n_ctx, params.n_threads);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -191,16 +191,17 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
// tokenize the prompt
|
// tokenize the prompt
|
||||||
std::vector<llama_token> embd_inp;
|
std::vector<llama_token> embd_inp;
|
||||||
|
|
||||||
// Add a space in front of the first character to match OG llama tokenizer behavior
|
|
||||||
params.prompt.insert(0, 1, ' ');
|
|
||||||
|
|
||||||
if (params.interactive_first || params.instruct || !params.prompt.empty() || session_tokens.empty()) {
|
if (params.interactive_first || params.instruct || !params.prompt.empty() || session_tokens.empty()) {
|
||||||
embd_inp = ::llama_tokenize(ctx, params.prompt, true);
|
embd_inp = ::llama_tokenize(ctx, params.prompt, true);
|
||||||
} else {
|
} else {
|
||||||
embd_inp = session_tokens;
|
embd_inp = session_tokens;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Should not run without any tokens
|
||||||
|
if (embd_inp.empty()) {
|
||||||
|
embd_inp.push_back(llama_token_bos(ctx));
|
||||||
|
}
|
||||||
|
|
||||||
// Tokenize negative prompt
|
// Tokenize negative prompt
|
||||||
std::vector<llama_token> guidance_inp;
|
std::vector<llama_token> guidance_inp;
|
||||||
int guidance_offset = 0;
|
int guidance_offset = 0;
|
||||||
@ -270,15 +271,12 @@ int main(int argc, char ** argv) {
|
|||||||
params.interactive = true;
|
params.interactive = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// determine newline token
|
|
||||||
auto llama_token_newline = ::llama_tokenize(ctx, "\n", false);
|
|
||||||
|
|
||||||
if (params.verbose_prompt) {
|
if (params.verbose_prompt) {
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
|
fprintf(stderr, "%s: prompt: '%s'\n", __func__, params.prompt.c_str());
|
||||||
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
fprintf(stderr, "%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
||||||
for (int i = 0; i < (int) embd_inp.size(); i++) {
|
for (int i = 0; i < (int) embd_inp.size(); i++) {
|
||||||
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i]));
|
fprintf(stderr, "%6d -> '%s'\n", embd_inp[i], llama_token_to_str(ctx, embd_inp[i]).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ctx_guidance) {
|
if (ctx_guidance) {
|
||||||
@ -286,14 +284,14 @@ int main(int argc, char ** argv) {
|
|||||||
fprintf(stderr, "%s: negative prompt: '%s'\n", __func__, params.cfg_negative_prompt.c_str());
|
fprintf(stderr, "%s: negative prompt: '%s'\n", __func__, params.cfg_negative_prompt.c_str());
|
||||||
fprintf(stderr, "%s: number of tokens in negative prompt = %zu\n", __func__, guidance_inp.size());
|
fprintf(stderr, "%s: number of tokens in negative prompt = %zu\n", __func__, guidance_inp.size());
|
||||||
for (int i = 0; i < (int) guidance_inp.size(); i++) {
|
for (int i = 0; i < (int) guidance_inp.size(); i++) {
|
||||||
fprintf(stderr, "%6d -> '%s'\n", guidance_inp[i], llama_token_to_str(ctx, guidance_inp[i]));
|
fprintf(stderr, "%6d -> '%s'\n", guidance_inp[i], llama_token_to_str(ctx, guidance_inp[i]).c_str());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.n_keep > 0) {
|
if (params.n_keep > 0) {
|
||||||
fprintf(stderr, "%s: static prompt based on n_keep: '", __func__);
|
fprintf(stderr, "%s: static prompt based on n_keep: '", __func__);
|
||||||
for (int i = 0; i < params.n_keep; i++) {
|
for (int i = 0; i < params.n_keep; i++) {
|
||||||
fprintf(stderr, "%s", llama_token_to_str(ctx, embd_inp[i]));
|
fprintf(stderr, "%s", llama_token_to_str(ctx, embd_inp[i]).c_str());
|
||||||
}
|
}
|
||||||
fprintf(stderr, "'\n");
|
fprintf(stderr, "'\n");
|
||||||
}
|
}
|
||||||
@ -311,7 +309,7 @@ int main(int argc, char ** argv) {
|
|||||||
auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
|
auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
|
||||||
return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
|
return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
|
||||||
};
|
};
|
||||||
SetConsoleCtrlHandler(static_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
|
SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
fprintf(stderr, "%s: interactive mode on.\n", __func__);
|
fprintf(stderr, "%s: interactive mode on.\n", __func__);
|
||||||
@ -352,10 +350,9 @@ int main(int argc, char ** argv) {
|
|||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
|
|
||||||
{
|
{
|
||||||
auto it = params.logit_bias.find(llama_token_eos());
|
auto it = params.logit_bias.find(llama_token_eos(ctx));
|
||||||
if (it != params.logit_bias.end() && it->second == -INFINITY) {
|
if (it != params.logit_bias.end() && it->second == -INFINITY) {
|
||||||
fprintf(stderr,
|
fprintf(stderr, "%s: warning: EOS token is disabled, which will cause most grammars to fail\n", __func__);
|
||||||
"%s: warning: EOS token is disabled, which will cause most grammars to fail\n", __func__);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -405,7 +402,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
// do one empty run to warm up the model
|
// do one empty run to warm up the model
|
||||||
{
|
{
|
||||||
const std::vector<llama_token> tmp = { llama_token_bos(), };
|
const std::vector<llama_token> tmp = { llama_token_bos(ctx), };
|
||||||
llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
|
llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
|
||||||
llama_reset_timings(ctx);
|
llama_reset_timings(ctx);
|
||||||
}
|
}
|
||||||
@ -589,7 +586,7 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Apply penalties
|
// Apply penalties
|
||||||
float nl_logit = logits[llama_token_nl()];
|
float nl_logit = logits[llama_token_nl(ctx)];
|
||||||
auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx);
|
auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx);
|
||||||
llama_sample_repetition_penalty(ctx, &candidates_p,
|
llama_sample_repetition_penalty(ctx, &candidates_p,
|
||||||
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
||||||
@ -598,7 +595,7 @@ int main(int argc, char ** argv) {
|
|||||||
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
||||||
last_n_repeat, alpha_frequency, alpha_presence);
|
last_n_repeat, alpha_frequency, alpha_presence);
|
||||||
if (!penalize_nl) {
|
if (!penalize_nl) {
|
||||||
logits[llama_token_nl()] = nl_logit;
|
logits[llama_token_nl(ctx)] = nl_logit;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (grammar != NULL) {
|
if (grammar != NULL) {
|
||||||
@ -672,7 +669,7 @@ int main(int argc, char ** argv) {
|
|||||||
// display text
|
// display text
|
||||||
if (input_echo) {
|
if (input_echo) {
|
||||||
for (auto id : embd) {
|
for (auto id : embd) {
|
||||||
printf("%s", llama_token_to_str(ctx, id));
|
printf("%s", llama_token_to_str(ctx, id).c_str());
|
||||||
}
|
}
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
}
|
}
|
||||||
@ -714,7 +711,7 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// deal with end of text token in interactive mode
|
// deal with end of text token in interactive mode
|
||||||
if (last_n_tokens.back() == llama_token_eos()) {
|
if (last_n_tokens.back() == llama_token_eos(ctx)) {
|
||||||
if (params.interactive) {
|
if (params.interactive) {
|
||||||
if (params.antiprompt.size() != 0) {
|
if (params.antiprompt.size() != 0) {
|
||||||
// tokenize and inject first reverse prompt
|
// tokenize and inject first reverse prompt
|
||||||
@ -739,7 +736,7 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (params.input_prefix_bos) {
|
if (params.input_prefix_bos) {
|
||||||
embd_inp.push_back(llama_token_bos());
|
embd_inp.push_back(llama_token_bos(ctx));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string buffer;
|
std::string buffer;
|
||||||
@ -795,8 +792,7 @@ int main(int argc, char ** argv) {
|
|||||||
if (grammar != NULL) {
|
if (grammar != NULL) {
|
||||||
llama_grammar_free(grammar);
|
llama_grammar_free(grammar);
|
||||||
|
|
||||||
std::vector<const llama_grammar_element *> grammar_rules(
|
std::vector<const llama_grammar_element *> grammar_rules( parsed_grammar.c_rules());
|
||||||
parsed_grammar.c_rules());
|
|
||||||
grammar = llama_grammar_init(
|
grammar = llama_grammar_init(
|
||||||
grammar_rules.data(), grammar_rules.size(),
|
grammar_rules.data(), grammar_rules.size(),
|
||||||
parsed_grammar.symbol_ids.at("root"));
|
parsed_grammar.symbol_ids.at("root"));
|
||||||
@ -807,7 +803,7 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// end of text token
|
// end of text token
|
||||||
if (!embd.empty() && embd.back() == llama_token_eos() && !(params.instruct || params.interactive)) {
|
if (!embd.empty() && embd.back() == llama_token_eos(ctx) && !(params.instruct || params.interactive)) {
|
||||||
fprintf(stderr, " [end of text]\n");
|
fprintf(stderr, " [end of text]\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
1
examples/make-ggml.py
Normal file → Executable file
1
examples/make-ggml.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
"""
|
"""
|
||||||
This script converts Hugging Face llama models to GGML and quantizes them.
|
This script converts Hugging Face llama models to GGML and quantizes them.
|
||||||
|
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
//
|
//
|
||||||
// - First, export a LLaMA graph:
|
// - First, export a LLaMA graph:
|
||||||
//
|
//
|
||||||
// $ ./bin/main -m ../models/7B/ggml-model-q4_0.bin --export
|
// $ ./bin/main -m ../models/7B/ggml-model-q4_0.gguf --export
|
||||||
//
|
//
|
||||||
// - Run this tool to evaluate the exported graph:
|
// - Run this tool to evaluate the exported graph:
|
||||||
//
|
//
|
||||||
|
@ -27,7 +27,121 @@ std::vector<float> softmax(const std::vector<float>& logits) {
|
|||||||
return probs;
|
return probs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void perplexity_v2(llama_context * ctx, const gpt_params & params) {
|
||||||
|
|
||||||
|
// Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
|
||||||
|
// Run `./perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
|
||||||
|
// Output: `perplexity: 13.5106 [114/114]`
|
||||||
|
// BOS tokens will be added for each chunk before eval
|
||||||
|
|
||||||
|
if (params.ppl_stride <= 0) {
|
||||||
|
fprintf(stderr, "%s: stride is %d but must be greater than zero!\n",__func__,params.ppl_stride);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto tokens = ::llama_tokenize(ctx, params.prompt, true);
|
||||||
|
|
||||||
|
const int calc_chunk = params.n_ctx;
|
||||||
|
|
||||||
|
fprintf(stderr, "%s: have %zu tokens. Calculation chunk = %d\n", __func__, tokens.size(), calc_chunk);
|
||||||
|
|
||||||
|
if (int(tokens.size()) <= calc_chunk) {
|
||||||
|
fprintf(stderr, "%s: there are only %zu tokens, this is not enough for a context size of %d and stride %d\n",__func__,
|
||||||
|
tokens.size(), params.n_ctx, params.ppl_stride);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int n_chunk_max = (tokens.size() - calc_chunk + params.ppl_stride - 1) / params.ppl_stride;
|
||||||
|
|
||||||
|
const int n_chunk = params.n_chunks < 0 ? n_chunk_max : std::min(params.n_chunks, n_chunk_max);
|
||||||
|
const int n_vocab = llama_n_vocab(ctx);
|
||||||
|
const int n_batch = params.n_batch;
|
||||||
|
|
||||||
|
int count = 0;
|
||||||
|
double nll = 0.0;
|
||||||
|
|
||||||
|
fprintf(stderr, "%s: calculating perplexity over %d chunks, batch_size=%d\n", __func__, n_chunk, n_batch);
|
||||||
|
|
||||||
|
for (int i = 0; i < n_chunk; ++i) {
|
||||||
|
const int start = i * params.ppl_stride;
|
||||||
|
const int end = start + calc_chunk;
|
||||||
|
|
||||||
|
const int num_batches = (calc_chunk + n_batch - 1) / n_batch;
|
||||||
|
//fprintf(stderr, "%s: evaluating %d...%d using %d batches\n", __func__, start, end, num_batches);
|
||||||
|
|
||||||
|
std::vector<float> logits;
|
||||||
|
|
||||||
|
const auto t_start = std::chrono::high_resolution_clock::now();
|
||||||
|
|
||||||
|
for (int j = 0; j < num_batches; ++j) {
|
||||||
|
const int batch_start = start + j * n_batch;
|
||||||
|
const int batch_size = std::min(end - batch_start, n_batch);
|
||||||
|
|
||||||
|
//fprintf(stderr, " Batch %d: starts at %d, size is %d, n_past is %d\n",j,batch_start,batch_size,j * n_batch);
|
||||||
|
if (llama_eval(ctx, tokens.data() + batch_start, batch_size, j * n_batch, params.n_threads)) {
|
||||||
|
//fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// save original token and restore it after eval
|
||||||
|
const auto token_org = tokens[batch_start];
|
||||||
|
|
||||||
|
// add BOS token for the first batch of each chunk
|
||||||
|
if (j == 0) {
|
||||||
|
tokens[batch_start] = llama_token_bos(ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto batch_logits = llama_get_logits(ctx);
|
||||||
|
logits.insert(logits.end(), batch_logits, batch_logits + batch_size * n_vocab);
|
||||||
|
|
||||||
|
if (j == 0) {
|
||||||
|
tokens[batch_start] = token_org;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto t_end = std::chrono::high_resolution_clock::now();
|
||||||
|
|
||||||
|
if (i == 0) {
|
||||||
|
const float t_total = std::chrono::duration<float>(t_end - t_start).count();
|
||||||
|
fprintf(stderr, "%s: %.2f seconds per pass - ETA ", __func__, t_total);
|
||||||
|
int total_seconds = (int)(t_total * n_chunk);
|
||||||
|
if (total_seconds >= 60*60) {
|
||||||
|
fprintf(stderr, "%d hours ", total_seconds / (60*60));
|
||||||
|
total_seconds = total_seconds % (60*60);
|
||||||
|
}
|
||||||
|
fprintf(stderr, "%.2f minutes\n", total_seconds / 60.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
//fprintf(stderr, "%s: using tokens %d...%d\n",__func__,params.n_ctx - params.ppl_stride + start, params.n_ctx + start);
|
||||||
|
for (int j = params.n_ctx - params.ppl_stride - 1; j < params.n_ctx - 1; ++j) {
|
||||||
|
|
||||||
|
// Calculate probability of next token, given the previous ones.
|
||||||
|
const std::vector<float> tok_logits(
|
||||||
|
logits.begin() + (j + 0) * n_vocab,
|
||||||
|
logits.begin() + (j + 1) * n_vocab);
|
||||||
|
|
||||||
|
const float prob = softmax(tok_logits)[tokens[start + j + 1]];
|
||||||
|
|
||||||
|
nll += -std::log(prob);
|
||||||
|
++count;
|
||||||
|
}
|
||||||
|
// perplexity is e^(average negative log-likelihood)
|
||||||
|
if (params.ppl_output_type == 0) {
|
||||||
|
printf("[%d]%.4lf,", i + 1, std::exp(nll / count));
|
||||||
|
} else {
|
||||||
|
printf("%8d %.4lf\n", i*params.ppl_stride, std::exp(nll / count));
|
||||||
|
}
|
||||||
|
fflush(stdout);
|
||||||
|
}
|
||||||
|
printf("\n");
|
||||||
|
}
|
||||||
|
|
||||||
void perplexity(llama_context * ctx, const gpt_params & params) {
|
void perplexity(llama_context * ctx, const gpt_params & params) {
|
||||||
|
|
||||||
|
if (params.ppl_stride > 0) {
|
||||||
|
perplexity_v2(ctx, params);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
|
// Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
|
||||||
// Run `./perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
|
// Run `./perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
|
||||||
// Output: `perplexity: 13.5106 [114/114]`
|
// Output: `perplexity: 13.5106 [114/114]`
|
||||||
@ -64,7 +178,7 @@ void perplexity(llama_context * ctx, const gpt_params & params) {
|
|||||||
|
|
||||||
// add BOS token for the first batch of each chunk
|
// add BOS token for the first batch of each chunk
|
||||||
if (j == 0) {
|
if (j == 0) {
|
||||||
tokens[batch_start] = llama_token_bos();
|
tokens[batch_start] = llama_token_bos(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (llama_eval(ctx, tokens.data() + batch_start, batch_size, j * n_batch, params.n_threads)) {
|
if (llama_eval(ctx, tokens.data() + batch_start, batch_size, j * n_batch, params.n_threads)) {
|
||||||
@ -116,7 +230,11 @@ void perplexity(llama_context * ctx, const gpt_params & params) {
|
|||||||
++count;
|
++count;
|
||||||
}
|
}
|
||||||
// perplexity is e^(average negative log-likelihood)
|
// perplexity is e^(average negative log-likelihood)
|
||||||
|
if (params.ppl_output_type == 0) {
|
||||||
printf("[%d]%.4lf,", i + 1, std::exp(nll / count));
|
printf("[%d]%.4lf,", i + 1, std::exp(nll / count));
|
||||||
|
} else {
|
||||||
|
printf("%8d %.4lf\n", i*params.n_ctx, std::exp(nll / count));
|
||||||
|
}
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
}
|
}
|
||||||
printf("\n");
|
printf("\n");
|
||||||
@ -369,6 +487,12 @@ int main(int argc, char ** argv) {
|
|||||||
params.perplexity = true;
|
params.perplexity = true;
|
||||||
params.n_batch = std::min(params.n_batch, params.n_ctx);
|
params.n_batch = std::min(params.n_batch, params.n_ctx);
|
||||||
|
|
||||||
|
if (params.ppl_stride > 0) {
|
||||||
|
fprintf(stderr, "Will perform strided perplexity calculation -> adjusting context size from %d to %d\n",
|
||||||
|
params.n_ctx, params.n_ctx + params.ppl_stride/2);
|
||||||
|
params.n_ctx += params.ppl_stride/2;
|
||||||
|
}
|
||||||
|
|
||||||
if (params.n_ctx > 2048) {
|
if (params.n_ctx > 2048) {
|
||||||
fprintf(stderr, "%s: warning: model might not support context sizes greater than 2048 tokens (%d specified);"
|
fprintf(stderr, "%s: warning: model might not support context sizes greater than 2048 tokens (%d specified);"
|
||||||
"expect poor results\n", __func__, params.n_ctx);
|
"expect poor results\n", __func__, params.n_ctx);
|
||||||
|
@ -24,7 +24,7 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct quantize_stats_params {
|
struct quantize_stats_params {
|
||||||
std::string model = "models/7B/ggml-model-f16.bin";
|
std::string model = "models/7B/ggml-model-f16.gguf";
|
||||||
bool verbose = false;
|
bool verbose = false;
|
||||||
bool per_layer_stats = false;
|
bool per_layer_stats = false;
|
||||||
bool print_histogram = false;
|
bool print_histogram = false;
|
||||||
|
@ -14,25 +14,25 @@ struct quant_option {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static const std::vector<struct quant_option> QUANT_OPTIONS = {
|
static const std::vector<struct quant_option> QUANT_OPTIONS = {
|
||||||
{ "Q4_0", LLAMA_FTYPE_MOSTLY_Q4_0, " 3.50G, +0.2499 ppl @ 7B", },
|
{ "Q4_0", LLAMA_FTYPE_MOSTLY_Q4_0, " 3.56G, +0.2166 ppl @ LLaMA-v1-7B", },
|
||||||
{ "Q4_1", LLAMA_FTYPE_MOSTLY_Q4_1, " 3.90G, +0.1846 ppl @ 7B", },
|
{ "Q4_1", LLAMA_FTYPE_MOSTLY_Q4_1, " 3.90G, +0.1585 ppl @ LLaMA-v1-7B", },
|
||||||
{ "Q5_0", LLAMA_FTYPE_MOSTLY_Q5_0, " 4.30G, +0.0796 ppl @ 7B", },
|
{ "Q5_0", LLAMA_FTYPE_MOSTLY_Q5_0, " 4.33G, +0.0683 ppl @ LLaMA-v1-7B", },
|
||||||
{ "Q5_1", LLAMA_FTYPE_MOSTLY_Q5_1, " 4.70G, +0.0415 ppl @ 7B", },
|
{ "Q5_1", LLAMA_FTYPE_MOSTLY_Q5_1, " 4.70G, +0.0349 ppl @ LLaMA-v1-7B", },
|
||||||
#ifdef GGML_USE_K_QUANTS
|
#ifdef GGML_USE_K_QUANTS
|
||||||
{ "Q2_K", LLAMA_FTYPE_MOSTLY_Q2_K, " 2.67G, +0.8698 ppl @ 7B", },
|
{ "Q2_K", LLAMA_FTYPE_MOSTLY_Q2_K, " 2.63G, +0.6717 ppl @ LLaMA-v1-7B", },
|
||||||
{ "Q3_K", LLAMA_FTYPE_MOSTLY_Q3_K_M, "alias for Q3_K_M" },
|
{ "Q3_K", LLAMA_FTYPE_MOSTLY_Q3_K_M, "alias for Q3_K_M" },
|
||||||
{ "Q3_K_S", LLAMA_FTYPE_MOSTLY_Q3_K_S, " 2.75G, +0.5505 ppl @ 7B", },
|
{ "Q3_K_S", LLAMA_FTYPE_MOSTLY_Q3_K_S, " 2.75G, +0.5551 ppl @ LLaMA-v1-7B", },
|
||||||
{ "Q3_K_M", LLAMA_FTYPE_MOSTLY_Q3_K_M, " 3.06G, +0.2437 ppl @ 7B", },
|
{ "Q3_K_M", LLAMA_FTYPE_MOSTLY_Q3_K_M, " 3.07G, +0.2496 ppl @ LLaMA-v1-7B", },
|
||||||
{ "Q3_K_L", LLAMA_FTYPE_MOSTLY_Q3_K_L, " 3.35G, +0.1803 ppl @ 7B", },
|
{ "Q3_K_L", LLAMA_FTYPE_MOSTLY_Q3_K_L, " 3.35G, +0.1764 ppl @ LLaMA-v1-7B", },
|
||||||
{ "Q4_K", LLAMA_FTYPE_MOSTLY_Q4_K_M, "alias for Q4_K_M", },
|
{ "Q4_K", LLAMA_FTYPE_MOSTLY_Q4_K_M, "alias for Q4_K_M", },
|
||||||
{ "Q4_K_S", LLAMA_FTYPE_MOSTLY_Q4_K_S, " 3.56G, +0.1149 ppl @ 7B", },
|
{ "Q4_K_S", LLAMA_FTYPE_MOSTLY_Q4_K_S, " 3.59G, +0.0992 ppl @ LLaMA-v1-7B", },
|
||||||
{ "Q4_K_M", LLAMA_FTYPE_MOSTLY_Q4_K_M, " 3.80G, +0.0535 ppl @ 7B", },
|
{ "Q4_K_M", LLAMA_FTYPE_MOSTLY_Q4_K_M, " 3.80G, +0.0532 ppl @ LLaMA-v1-7B", },
|
||||||
{ "Q5_K", LLAMA_FTYPE_MOSTLY_Q5_K_M, "alias for Q5_K_M", },
|
{ "Q5_K", LLAMA_FTYPE_MOSTLY_Q5_K_M, "alias for Q5_K_M", },
|
||||||
{ "Q5_K_S", LLAMA_FTYPE_MOSTLY_Q5_K_S, " 4.33G, +0.0353 ppl @ 7B", },
|
{ "Q5_K_S", LLAMA_FTYPE_MOSTLY_Q5_K_S, " 4.33G, +0.0400 ppl @ LLaMA-v1-7B", },
|
||||||
{ "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 4.45G, +0.0142 ppl @ 7B", },
|
{ "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 4.45G, +0.0122 ppl @ LLaMA-v1-7B", },
|
||||||
{ "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 5.15G, +0.0044 ppl @ 7B", },
|
{ "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 5.15G, -0.0008 ppl @ LLaMA-v1-7B", },
|
||||||
#endif
|
#endif
|
||||||
{ "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 6.70G, +0.0004 ppl @ 7B", },
|
{ "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 6.70G, +0.0004 ppl @ LLaMA-v1-7B", },
|
||||||
{ "F16", LLAMA_FTYPE_MOSTLY_F16, "13.00G @ 7B", },
|
{ "F16", LLAMA_FTYPE_MOSTLY_F16, "13.00G @ 7B", },
|
||||||
{ "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", },
|
{ "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", },
|
||||||
};
|
};
|
||||||
@ -68,10 +68,10 @@ bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftype, std:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// usage:
|
// usage:
|
||||||
// ./quantize [--allow-requantize] [--leave-output-tensor] models/llama/ggml-model.bin [models/llama/ggml-model-quant.bin] type [nthreads]
|
// ./quantize [--allow-requantize] [--leave-output-tensor] models/llama/ggml-model.gguf [models/llama/ggml-model-quant.gguf] type [nthreads]
|
||||||
//
|
//
|
||||||
void usage(const char * executable) {
|
void usage(const char * executable) {
|
||||||
fprintf(stderr, "usage: %s [--help] [--allow-requantize] [--leave-output-tensor] model-f32.bin [model-quant.bin] type [nthreads]\n\n", executable);
|
fprintf(stderr, "usage: %s [--help] [--allow-requantize] [--leave-output-tensor] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable);
|
||||||
fprintf(stderr, " --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
|
fprintf(stderr, " --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
|
||||||
fprintf(stderr, " --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
|
fprintf(stderr, " --leave-output-tensor: Will leave output.weight un(re)quantized. Increases model size but may also increase quality, especially when requantizing\n");
|
||||||
fprintf(stderr, "\nAllowed quantization types:\n");
|
fprintf(stderr, "\nAllowed quantization types:\n");
|
||||||
@ -118,8 +118,8 @@ int main(int argc, char ** argv) {
|
|||||||
if (pos != std::string::npos) {
|
if (pos != std::string::npos) {
|
||||||
fpath = fname_inp.substr(0, pos + 1);
|
fpath = fname_inp.substr(0, pos + 1);
|
||||||
}
|
}
|
||||||
// export as [inp path]/ggml-model-[ftype].bin
|
// export as [inp path]/ggml-model-[ftype].gguf
|
||||||
fname_out = fpath + "ggml-model-" + ftype_str + ".bin";
|
fname_out = fpath + "ggml-model-" + ftype_str + ".gguf";
|
||||||
arg_idx++;
|
arg_idx++;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
|
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
cd `dirname $0`
|
cd `dirname $0`
|
||||||
|
@ -26,7 +26,6 @@ int main(int argc, char ** argv) {
|
|||||||
auto lparams = llama_context_default_params();
|
auto lparams = llama_context_default_params();
|
||||||
|
|
||||||
lparams.n_ctx = params.n_ctx;
|
lparams.n_ctx = params.n_ctx;
|
||||||
lparams.n_gqa = params.n_gqa;
|
|
||||||
lparams.seed = params.seed;
|
lparams.seed = params.seed;
|
||||||
lparams.f16_kv = params.memory_f16;
|
lparams.f16_kv = params.memory_f16;
|
||||||
lparams.use_mmap = params.use_mmap;
|
lparams.use_mmap = params.use_mmap;
|
||||||
@ -45,9 +44,8 @@ int main(int argc, char ** argv) {
|
|||||||
llama_free_model(model);
|
llama_free_model(model);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
auto tokens = std::vector<llama_token>(params.n_ctx);
|
auto tokens = llama_tokenize(ctx, params.prompt.c_str(), true);
|
||||||
auto n_prompt_tokens = llama_tokenize(ctx, params.prompt.c_str(), tokens.data(), int(tokens.size()), true);
|
auto n_prompt_tokens = tokens.size();
|
||||||
|
|
||||||
if (n_prompt_tokens < 1) {
|
if (n_prompt_tokens < 1) {
|
||||||
fprintf(stderr, "%s : failed to tokenize prompt\n", __func__);
|
fprintf(stderr, "%s : failed to tokenize prompt\n", __func__);
|
||||||
llama_free(ctx);
|
llama_free(ctx);
|
||||||
@ -92,7 +90,7 @@ int main(int argc, char ** argv) {
|
|||||||
auto next_token_str = llama_token_to_str(ctx, next_token);
|
auto next_token_str = llama_token_to_str(ctx, next_token);
|
||||||
last_n_tokens_data.push_back(next_token);
|
last_n_tokens_data.push_back(next_token);
|
||||||
|
|
||||||
printf("%s", next_token_str);
|
printf("%s", next_token_str.c_str());
|
||||||
if (llama_eval(ctx, &next_token, 1, n_past, params.n_threads)) {
|
if (llama_eval(ctx, &next_token, 1, n_past, params.n_threads)) {
|
||||||
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
|
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
|
||||||
llama_free(ctx);
|
llama_free(ctx);
|
||||||
@ -152,7 +150,7 @@ int main(int argc, char ** argv) {
|
|||||||
auto next_token_str = llama_token_to_str(ctx2, next_token);
|
auto next_token_str = llama_token_to_str(ctx2, next_token);
|
||||||
last_n_tokens_data.push_back(next_token);
|
last_n_tokens_data.push_back(next_token);
|
||||||
|
|
||||||
printf("%s", next_token_str);
|
printf("%s", next_token_str.c_str());
|
||||||
if (llama_eval(ctx2, &next_token, 1, n_past, params.n_threads)) {
|
if (llama_eval(ctx2, &next_token, 1, n_past, params.n_threads)) {
|
||||||
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
|
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
|
||||||
llama_free(ctx2);
|
llama_free(ctx2);
|
||||||
|
0
examples/server-llama2-13B.sh
Normal file → Executable file
0
examples/server-llama2-13B.sh
Normal file → Executable file
@ -5,7 +5,7 @@ This example demonstrates a simple HTTP API server and a simple web front end to
|
|||||||
Command line options:
|
Command line options:
|
||||||
|
|
||||||
- `--threads N`, `-t N`: Set the number of threads to use during computation.
|
- `--threads N`, `-t N`: Set the number of threads to use during computation.
|
||||||
- `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.bin`).
|
- `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.gguf`).
|
||||||
- `-m ALIAS`, `--alias ALIAS`: Set an alias for the model. The alias will be returned in API responses.
|
- `-m ALIAS`, `--alias ALIAS`: Set an alias for the model. The alias will be returned in API responses.
|
||||||
- `-c N`, `--ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference. The size may differ in other models, for example, baichuan models were build with a context of 4096.
|
- `-c N`, `--ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference. The size may differ in other models, for example, baichuan models were build with a context of 4096.
|
||||||
- `-ngl N`, `--n-gpu-layers N`: When compiled with appropriate support (currently CLBlast or cuBLAS), this option allows offloading some layers to the GPU for computation. Generally results in increased performance.
|
- `-ngl N`, `--n-gpu-layers N`: When compiled with appropriate support (currently CLBlast or cuBLAS), this option allows offloading some layers to the GPU for computation. Generally results in increased performance.
|
||||||
@ -48,15 +48,14 @@ To get started right away, run the following command, making sure to use the cor
|
|||||||
### Unix-based systems (Linux, macOS, etc.):
|
### Unix-based systems (Linux, macOS, etc.):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./server -m models/7B/ggml-model.bin -c 2048
|
./server -m models/7B/ggml-model.gguf -c 2048
|
||||||
```
|
```
|
||||||
|
|
||||||
### Windows:
|
### Windows:
|
||||||
|
|
||||||
```powershell
|
```powershell
|
||||||
server.exe -m models\7B\ggml-model.bin -c 2048
|
server.exe -m models\7B\ggml-model.gguf -c 2048
|
||||||
```
|
```
|
||||||
|
|
||||||
The above command will start a server that by default listens on `127.0.0.1:8080`.
|
The above command will start a server that by default listens on `127.0.0.1:8080`.
|
||||||
You can consume the endpoints with Postman or NodeJS with axios library. You can visit the web front end at the same url.
|
You can consume the endpoints with Postman or NodeJS with axios library. You can visit the web front end at the same url.
|
||||||
|
|
||||||
@ -127,7 +126,7 @@ node .
|
|||||||
|
|
||||||
`stream`: It allows receiving each predicted token in real-time instead of waiting for the completion to finish. To enable this, set to `true`.
|
`stream`: It allows receiving each predicted token in real-time instead of waiting for the completion to finish. To enable this, set to `true`.
|
||||||
|
|
||||||
`prompt`: Provide a prompt. Internally, the prompt is compared, and it detects if a part has already been evaluated, and the remaining part will be evaluate. A space is inserted in the front like main.cpp does.
|
`prompt`: Provide a prompt as a string, or as an array of strings and numbers representing tokens. Internally, the prompt is compared, and it detects if a part has already been evaluated, and the remaining part will be evaluate. If the prompt is a string, or an array with the first element given as a string, a space is inserted in the front like main.cpp does.
|
||||||
|
|
||||||
`stop`: Specify a JSON array of stopping strings.
|
`stop`: Specify a JSON array of stopping strings.
|
||||||
These words will not be included in the completion, so make sure to add them to the prompt for the next iteration (default: []).
|
These words will not be included in the completion, so make sure to add them to the prompt for the next iteration (default: []).
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
import argparse
|
import argparse
|
||||||
from flask import Flask, jsonify, request, Response
|
from flask import Flask, jsonify, request, Response
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
0
examples/server/chat-llama2.sh
Normal file → Executable file
0
examples/server/chat-llama2.sh
Normal file → Executable file
0
examples/server/chat.sh
Normal file → Executable file
0
examples/server/chat.sh
Normal file → Executable file
@ -190,6 +190,7 @@ struct llama_server_context
|
|||||||
size_t n_past = 0;
|
size_t n_past = 0;
|
||||||
size_t n_remain = 0;
|
size_t n_remain = 0;
|
||||||
|
|
||||||
|
json prompt;
|
||||||
std::vector<llama_token> embd;
|
std::vector<llama_token> embd;
|
||||||
std::vector<llama_token> last_n_tokens;
|
std::vector<llama_token> last_n_tokens;
|
||||||
|
|
||||||
@ -267,6 +268,53 @@ struct llama_server_context
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::vector<llama_token> tokenize(json json_prompt, bool add_bos)
|
||||||
|
{
|
||||||
|
// If `add_bos` is true, we only add BOS, when json_prompt is a string,
|
||||||
|
// or the first element of the json_prompt array is a string.
|
||||||
|
std::vector<llama_token> prompt_tokens;
|
||||||
|
|
||||||
|
if (json_prompt.is_array())
|
||||||
|
{
|
||||||
|
bool first = true;
|
||||||
|
for (const auto& p : json_prompt)
|
||||||
|
{
|
||||||
|
if (p.is_string())
|
||||||
|
{
|
||||||
|
auto s = p.template get<std::string>();
|
||||||
|
std::vector<llama_token> p;
|
||||||
|
if (first)
|
||||||
|
{
|
||||||
|
s.insert(0, 1, ' '); // add a space if it's the first
|
||||||
|
p = ::llama_tokenize(ctx, s, add_bos);
|
||||||
|
first = false;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
p = ::llama_tokenize(ctx, s, false);
|
||||||
|
}
|
||||||
|
prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (first)
|
||||||
|
{
|
||||||
|
first = false;
|
||||||
|
}
|
||||||
|
prompt_tokens.push_back(p.template get<llama_token>());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
auto s = json_prompt.template get<std::string>();
|
||||||
|
s.insert(0, 1, ' '); // always add a first space
|
||||||
|
prompt_tokens = ::llama_tokenize(ctx, s, add_bos);
|
||||||
|
}
|
||||||
|
|
||||||
|
return prompt_tokens;
|
||||||
|
}
|
||||||
|
|
||||||
bool loadGrammar()
|
bool loadGrammar()
|
||||||
{
|
{
|
||||||
if (!params.grammar.empty()) {
|
if (!params.grammar.empty()) {
|
||||||
@ -279,7 +327,7 @@ struct llama_server_context
|
|||||||
grammar_parser::print_grammar(stderr, parsed_grammar);
|
grammar_parser::print_grammar(stderr, parsed_grammar);
|
||||||
|
|
||||||
{
|
{
|
||||||
auto it = params.logit_bias.find(llama_token_eos());
|
auto it = params.logit_bias.find(llama_token_eos(ctx));
|
||||||
if (it != params.logit_bias.end() && it->second == -INFINITY) {
|
if (it != params.logit_bias.end() && it->second == -INFINITY) {
|
||||||
LOG_WARNING("EOS token is disabled, which will cause most grammars to fail", {});
|
LOG_WARNING("EOS token is disabled, which will cause most grammars to fail", {});
|
||||||
}
|
}
|
||||||
@ -294,8 +342,8 @@ struct llama_server_context
|
|||||||
|
|
||||||
void loadPrompt()
|
void loadPrompt()
|
||||||
{
|
{
|
||||||
params.prompt.insert(0, 1, ' '); // always add a first space
|
auto prompt_tokens = tokenize(prompt, true); // always add BOS
|
||||||
std::vector<llama_token> prompt_tokens = ::llama_tokenize(ctx, params.prompt, true);
|
|
||||||
num_prompt_tokens = prompt_tokens.size();
|
num_prompt_tokens = prompt_tokens.size();
|
||||||
|
|
||||||
if (params.n_keep < 0)
|
if (params.n_keep < 0)
|
||||||
@ -402,7 +450,7 @@ struct llama_server_context
|
|||||||
if (params.n_predict == 0)
|
if (params.n_predict == 0)
|
||||||
{
|
{
|
||||||
has_next_token = false;
|
has_next_token = false;
|
||||||
result.tok = llama_token_eos();
|
result.tok = llama_token_eos(ctx);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -442,7 +490,7 @@ struct llama_server_context
|
|||||||
llama_token_data_array candidates_p = {candidates.data(), candidates.size(), false};
|
llama_token_data_array candidates_p = {candidates.data(), candidates.size(), false};
|
||||||
|
|
||||||
// Apply penalties
|
// Apply penalties
|
||||||
float nl_logit = logits[llama_token_nl()];
|
float nl_logit = logits[llama_token_nl(ctx)];
|
||||||
auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), params.n_ctx);
|
auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), params.n_ctx);
|
||||||
llama_sample_repetition_penalty(ctx, &candidates_p,
|
llama_sample_repetition_penalty(ctx, &candidates_p,
|
||||||
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
||||||
@ -452,7 +500,7 @@ struct llama_server_context
|
|||||||
last_n_repeat, alpha_frequency, alpha_presence);
|
last_n_repeat, alpha_frequency, alpha_presence);
|
||||||
if (!penalize_nl)
|
if (!penalize_nl)
|
||||||
{
|
{
|
||||||
logits[llama_token_nl()] = nl_logit;
|
logits[llama_token_nl(ctx)] = nl_logit;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (grammar != nullptr) {
|
if (grammar != nullptr) {
|
||||||
@ -515,7 +563,7 @@ struct llama_server_context
|
|||||||
// decrement remaining sampling budget
|
// decrement remaining sampling budget
|
||||||
--n_remain;
|
--n_remain;
|
||||||
|
|
||||||
if (!embd.empty() && embd.back() == llama_token_eos())
|
if (!embd.empty() && embd.back() == llama_token_eos(ctx))
|
||||||
{
|
{
|
||||||
// stopping_word = llama_token_to_str(ctx, embd.back());
|
// stopping_word = llama_token_to_str(ctx, embd.back());
|
||||||
has_next_token = false;
|
has_next_token = false;
|
||||||
@ -652,8 +700,6 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms,
|
|||||||
fprintf(stdout, " -v, --verbose verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled");
|
fprintf(stdout, " -v, --verbose verbose output (default: %s)\n", server_verbose ? "enabled" : "disabled");
|
||||||
fprintf(stdout, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
|
fprintf(stdout, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads);
|
||||||
fprintf(stdout, " -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx);
|
fprintf(stdout, " -c N, --ctx-size N size of the prompt context (default: %d)\n", params.n_ctx);
|
||||||
fprintf(stdout, " -gqa N, --gqa N grouped-query attention factor (TEMP!!! use 8 for LLaMAv2 70B) (default: %d)\n", params.n_gqa);
|
|
||||||
fprintf(stdout, " -eps N, --rms-norm-eps N rms norm eps (TEMP!!! use 1e-5 for LLaMAv2) (default: %.1e)\n", params.rms_norm_eps);
|
|
||||||
fprintf(stdout, " --rope-freq-base N RoPE base frequency (default: %.1f)\n", params.rope_freq_base);
|
fprintf(stdout, " --rope-freq-base N RoPE base frequency (default: %.1f)\n", params.rope_freq_base);
|
||||||
fprintf(stdout, " --rope-freq-scale N RoPE frequency scaling factor (default: %g)\n", params.rope_freq_scale);
|
fprintf(stdout, " --rope-freq-scale N RoPE frequency scaling factor (default: %g)\n", params.rope_freq_scale);
|
||||||
fprintf(stdout, " -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
|
fprintf(stdout, " -b N, --batch-size N batch size for prompt processing (default: %d)\n", params.n_batch);
|
||||||
@ -673,12 +719,11 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms,
|
|||||||
fprintf(stdout, " number of layers to store in VRAM\n");
|
fprintf(stdout, " number of layers to store in VRAM\n");
|
||||||
fprintf(stdout, " -ts SPLIT --tensor-split SPLIT\n");
|
fprintf(stdout, " -ts SPLIT --tensor-split SPLIT\n");
|
||||||
fprintf(stdout, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
|
fprintf(stdout, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
|
||||||
fprintf(stdout, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
|
|
||||||
fprintf(stdout, " -mg i, --main-gpu i the GPU to use for scratch and small tensors\n");
|
fprintf(stdout, " -mg i, --main-gpu i the GPU to use for scratch and small tensors\n");
|
||||||
fprintf(stdout, " -lv, --low-vram don't allocate VRAM scratch buffer\n");
|
fprintf(stdout, " -lv, --low-vram don't allocate VRAM scratch buffer\n");
|
||||||
fprintf(stdout, " -mmq, --mul-mat-q use experimental mul_mat_q CUDA kernels instead of cuBLAS. TEMP!!!\n" );
|
fprintf(stdout, " -nommq, --no-mul-mat-q\n");
|
||||||
fprintf(stdout, " Reduces VRAM usage by 700/970/1430 MiB for 7b/13b/33b but prompt processing speed\n" );
|
fprintf(stdout, " use cuBLAS instead of custom mul_mat_q CUDA kernels.\n");
|
||||||
fprintf(stdout, " is still suboptimal, especially q2_K, q3_K, q5_K, and q6_K.\n" );
|
fprintf(stdout, " Not recommended since this is both slower and uses more VRAM.\n");
|
||||||
#endif
|
#endif
|
||||||
fprintf(stdout, " -m FNAME, --model FNAME\n");
|
fprintf(stdout, " -m FNAME, --model FNAME\n");
|
||||||
fprintf(stdout, " model path (default: %s)\n", params.model.c_str());
|
fprintf(stdout, " model path (default: %s)\n", params.model.c_str());
|
||||||
@ -774,23 +819,6 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
}
|
}
|
||||||
params.n_ctx = std::stoi(argv[i]);
|
params.n_ctx = std::stoi(argv[i]);
|
||||||
}
|
}
|
||||||
else if (arg == "-gqa" || arg == "--gqa")
|
|
||||||
{
|
|
||||||
if (++i >= argc)
|
|
||||||
{
|
|
||||||
invalid_param = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
params.n_gqa = std::stoi(argv[i]);
|
|
||||||
}
|
|
||||||
else if (arg == "-eps" || arg == "--rms-norm-eps") {
|
|
||||||
if (++i >= argc)
|
|
||||||
{
|
|
||||||
invalid_param = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
params.rms_norm_eps = std::stof(argv[i]);
|
|
||||||
}
|
|
||||||
else if (arg == "--rope-freq-base")
|
else if (arg == "--rope-freq-base")
|
||||||
{
|
{
|
||||||
if (++i >= argc)
|
if (++i >= argc)
|
||||||
@ -886,12 +914,12 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
LOG_WARNING("warning: llama.cpp was compiled without cuBLAS. It is not possible to set lower vram usage.\n", {});
|
LOG_WARNING("warning: llama.cpp was compiled without cuBLAS. It is not possible to set lower vram usage.\n", {});
|
||||||
#endif // GGML_USE_CUBLAS
|
#endif // GGML_USE_CUBLAS
|
||||||
}
|
}
|
||||||
else if (arg == "--mul-mat-q" || arg == "-mmq")
|
else if (arg == "--no-mul-mat-q" || arg == "-nommq")
|
||||||
{
|
{
|
||||||
#ifdef GGML_USE_CUBLAS
|
#ifdef GGML_USE_CUBLAS
|
||||||
params.mul_mat_q = true;
|
params.mul_mat_q = false;
|
||||||
#else
|
#else
|
||||||
LOG_WARNING("warning: llama.cpp was compiled without cuBLAS. It is not possible to use mul_mat_q kernels.\n", {});
|
LOG_WARNING("warning: llama.cpp was compiled without cuBLAS. Disabling mul_mat_q kernels has no effect.\n", {});
|
||||||
#endif // GGML_USE_CUBLAS
|
#endif // GGML_USE_CUBLAS
|
||||||
}
|
}
|
||||||
else if (arg == "--main-gpu" || arg == "-mg")
|
else if (arg == "--main-gpu" || arg == "-mg")
|
||||||
@ -968,7 +996,7 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
|
|
||||||
static json format_generation_settings(llama_server_context &llama)
|
static json format_generation_settings(llama_server_context &llama)
|
||||||
{
|
{
|
||||||
const auto eos_bias = llama.params.logit_bias.find(llama_token_eos());
|
const auto eos_bias = llama.params.logit_bias.find(llama_token_eos(llama.ctx));
|
||||||
const bool ignore_eos = eos_bias != llama.params.logit_bias.end() &&
|
const bool ignore_eos = eos_bias != llama.params.logit_bias.end() &&
|
||||||
eos_bias->second < 0.0f && std::isinf(eos_bias->second);
|
eos_bias->second < 0.0f && std::isinf(eos_bias->second);
|
||||||
|
|
||||||
@ -1036,7 +1064,7 @@ static json format_final_response(llama_server_context &llama, const std::string
|
|||||||
{"tokens_predicted", llama.num_tokens_predicted},
|
{"tokens_predicted", llama.num_tokens_predicted},
|
||||||
{"tokens_evaluated", llama.num_prompt_tokens},
|
{"tokens_evaluated", llama.num_prompt_tokens},
|
||||||
{"generation_settings", format_generation_settings(llama)},
|
{"generation_settings", format_generation_settings(llama)},
|
||||||
{"prompt", llama.params.prompt},
|
{"prompt", llama.prompt},
|
||||||
{"truncated", llama.truncated},
|
{"truncated", llama.truncated},
|
||||||
{"stopped_eos", llama.stopped_eos},
|
{"stopped_eos", llama.stopped_eos},
|
||||||
{"stopped_word", llama.stopped_word},
|
{"stopped_word", llama.stopped_word},
|
||||||
@ -1075,35 +1103,52 @@ static json format_tokenizer_response(const std::vector<llama_token> &tokens)
|
|||||||
{"tokens", tokens}};
|
{"tokens", tokens}};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
static T json_value(const json &body, const std::string &key, const T &default_value)
|
||||||
|
{
|
||||||
|
// Fallback null to default value
|
||||||
|
return body.contains(key) && !body.at(key).is_null()
|
||||||
|
? body.value(key, default_value)
|
||||||
|
: default_value;
|
||||||
|
}
|
||||||
|
|
||||||
static void parse_options_completion(const json &body, llama_server_context &llama)
|
static void parse_options_completion(const json &body, llama_server_context &llama)
|
||||||
{
|
{
|
||||||
gpt_params default_params;
|
gpt_params default_params;
|
||||||
|
|
||||||
llama.stream = body.value("stream", false);
|
llama.stream = json_value(body, "stream", false);
|
||||||
llama.params.n_predict = body.value("n_predict", default_params.n_predict);
|
llama.params.n_predict = json_value(body, "n_predict", default_params.n_predict);
|
||||||
llama.params.top_k = body.value("top_k", default_params.top_k);
|
llama.params.top_k = json_value(body, "top_k", default_params.top_k);
|
||||||
llama.params.top_p = body.value("top_p", default_params.top_p);
|
llama.params.top_p = json_value(body, "top_p", default_params.top_p);
|
||||||
llama.params.tfs_z = body.value("tfs_z", default_params.tfs_z);
|
llama.params.tfs_z = json_value(body, "tfs_z", default_params.tfs_z);
|
||||||
llama.params.typical_p = body.value("typical_p", default_params.typical_p);
|
llama.params.typical_p = json_value(body, "typical_p", default_params.typical_p);
|
||||||
llama.params.repeat_last_n = body.value("repeat_last_n", default_params.repeat_last_n);
|
llama.params.repeat_last_n = json_value(body, "repeat_last_n", default_params.repeat_last_n);
|
||||||
llama.params.temp = body.value("temperature", default_params.temp);
|
llama.params.temp = json_value(body, "temperature", default_params.temp);
|
||||||
llama.params.repeat_penalty = body.value("repeat_penalty", default_params.repeat_penalty);
|
llama.params.repeat_penalty = json_value(body, "repeat_penalty", default_params.repeat_penalty);
|
||||||
llama.params.presence_penalty = body.value("presence_penalty", default_params.presence_penalty);
|
llama.params.presence_penalty = json_value(body, "presence_penalty", default_params.presence_penalty);
|
||||||
llama.params.frequency_penalty = body.value("frequency_penalty", default_params.frequency_penalty);
|
llama.params.frequency_penalty = json_value(body, "frequency_penalty", default_params.frequency_penalty);
|
||||||
llama.params.mirostat = body.value("mirostat", default_params.mirostat);
|
llama.params.mirostat = json_value(body, "mirostat", default_params.mirostat);
|
||||||
llama.params.mirostat_tau = body.value("mirostat_tau", default_params.mirostat_tau);
|
llama.params.mirostat_tau = json_value(body, "mirostat_tau", default_params.mirostat_tau);
|
||||||
llama.params.mirostat_eta = body.value("mirostat_eta", default_params.mirostat_eta);
|
llama.params.mirostat_eta = json_value(body, "mirostat_eta", default_params.mirostat_eta);
|
||||||
llama.params.penalize_nl = body.value("penalize_nl", default_params.penalize_nl);
|
llama.params.penalize_nl = json_value(body, "penalize_nl", default_params.penalize_nl);
|
||||||
llama.params.n_keep = body.value("n_keep", default_params.n_keep);
|
llama.params.n_keep = json_value(body, "n_keep", default_params.n_keep);
|
||||||
llama.params.seed = body.value("seed", default_params.seed);
|
llama.params.seed = json_value(body, "seed", default_params.seed);
|
||||||
llama.params.prompt = body.value("prompt", default_params.prompt);
|
llama.params.grammar = json_value(body, "grammar", default_params.grammar);
|
||||||
llama.params.grammar = body.value("grammar", default_params.grammar);
|
llama.params.n_probs = json_value(body, "n_probs", default_params.n_probs);
|
||||||
llama.params.n_probs = body.value("n_probs", default_params.n_probs);
|
|
||||||
|
if (body.count("prompt") != 0)
|
||||||
|
{
|
||||||
|
llama.prompt = body["prompt"];
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
llama.prompt = "";
|
||||||
|
}
|
||||||
|
|
||||||
llama.params.logit_bias.clear();
|
llama.params.logit_bias.clear();
|
||||||
if (body.value("ignore_eos", false))
|
if (json_value(body, "ignore_eos", false))
|
||||||
{
|
{
|
||||||
llama.params.logit_bias[llama_token_eos()] = -INFINITY;
|
llama.params.logit_bias[llama_token_eos(llama.ctx)] = -INFINITY;
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto &logit_bias = body.find("logit_bias");
|
const auto &logit_bias = body.find("logit_bias");
|
||||||
@ -1356,8 +1401,11 @@ int main(int argc, char **argv)
|
|||||||
auto lock = llama.lock();
|
auto lock = llama.lock();
|
||||||
|
|
||||||
const json body = json::parse(req.body);
|
const json body = json::parse(req.body);
|
||||||
const std::string content = body.value("content", "");
|
std::vector<llama_token> tokens;
|
||||||
const std::vector<llama_token> tokens = llama_tokenize(llama.ctx, content, false);
|
if (body.count("content") != 0)
|
||||||
|
{
|
||||||
|
tokens = llama.tokenize(body["content"], false);
|
||||||
|
}
|
||||||
const json data = format_tokenizer_response(tokens);
|
const json data = format_tokenizer_response(tokens);
|
||||||
return res.set_content(data.dump(), "application/json"); });
|
return res.set_content(data.dump(), "application/json"); });
|
||||||
|
|
||||||
@ -1369,7 +1417,14 @@ int main(int argc, char **argv)
|
|||||||
|
|
||||||
llama.rewind();
|
llama.rewind();
|
||||||
llama_reset_timings(llama.ctx);
|
llama_reset_timings(llama.ctx);
|
||||||
llama.params.prompt = body.value("content", "");
|
if (body.count("content") != 0)
|
||||||
|
{
|
||||||
|
llama.prompt = body["content"];
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
llama.prompt = "";
|
||||||
|
}
|
||||||
llama.params.n_predict = 0;
|
llama.params.n_predict = 0;
|
||||||
llama.loadPrompt();
|
llama.loadPrompt();
|
||||||
llama.beginCompletion();
|
llama.beginCompletion();
|
||||||
@ -1398,7 +1453,7 @@ int main(int argc, char **argv)
|
|||||||
{
|
{
|
||||||
if (res.status == 400) {
|
if (res.status == 400) {
|
||||||
res.set_content("Invalid request", "text/plain");
|
res.set_content("Invalid request", "text/plain");
|
||||||
} else {
|
} else if (res.status != 500) {
|
||||||
res.set_content("File Not Found", "text/plain");
|
res.set_content("File Not Found", "text/plain");
|
||||||
res.status = 404;
|
res.status = 404;
|
||||||
} });
|
} });
|
||||||
|
@ -2,180 +2,129 @@
|
|||||||
#define _GNU_SOURCE
|
#define _GNU_SOURCE
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include "common.h"
|
|
||||||
#include "llama.h"
|
|
||||||
#include "build-info.h"
|
#include "build-info.h"
|
||||||
|
|
||||||
#include <cassert>
|
#include "common.h"
|
||||||
#include <cinttypes>
|
#include "llama.h"
|
||||||
|
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
#include <cstring>
|
|
||||||
#include <ctime>
|
|
||||||
#include <fstream>
|
|
||||||
#include <iostream>
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
|
int main(int argc, char ** argv) {
|
||||||
#include <signal.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
#elif defined (_WIN32)
|
|
||||||
#define WIN32_LEAN_AND_MEAN
|
|
||||||
#define NOMINMAX
|
|
||||||
#include <windows.h>
|
|
||||||
#include <signal.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
int main(int argc, char ** argv)
|
|
||||||
{
|
|
||||||
gpt_params params;
|
gpt_params params;
|
||||||
|
|
||||||
//---------------------------------
|
if (argc == 1 || argv[1][0] == '-') {
|
||||||
// Print help :
|
printf("usage: %s MODEL_PATH [PROMPT]\n" , argv[0]);
|
||||||
//---------------------------------
|
|
||||||
|
|
||||||
if ( argc == 1 || argv[1][0] == '-' )
|
|
||||||
{
|
|
||||||
printf( "usage: %s MODEL_PATH [PROMPT]\n" , argv[0] );
|
|
||||||
return 1 ;
|
return 1 ;
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------
|
if (argc >= 2) {
|
||||||
// Load parameters :
|
|
||||||
//---------------------------------
|
|
||||||
|
|
||||||
if ( argc >= 2 )
|
|
||||||
{
|
|
||||||
params.model = argv[1];
|
params.model = argv[1];
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( argc >= 3 )
|
if (argc >= 3) {
|
||||||
{
|
|
||||||
params.prompt = argv[2];
|
params.prompt = argv[2];
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( params.prompt.empty() )
|
if (params.prompt.empty()) {
|
||||||
{
|
|
||||||
params.prompt = "Hello my name is";
|
params.prompt = "Hello my name is";
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------
|
// init LLM
|
||||||
// Init LLM :
|
|
||||||
//---------------------------------
|
|
||||||
|
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init(params.numa);
|
||||||
|
|
||||||
llama_model * model;
|
llama_context_params ctx_params = llama_context_default_params();
|
||||||
llama_context * ctx;
|
|
||||||
|
|
||||||
std::tie(model, ctx) = llama_init_from_gpt_params( params );
|
llama_model * model = llama_load_model_from_file(params.model.c_str(), ctx_params);
|
||||||
|
|
||||||
if ( model == NULL )
|
if (model == NULL) {
|
||||||
{
|
fprintf(stderr , "%s: error: unable to load model\n" , __func__);
|
||||||
fprintf( stderr , "%s: error: unable to load model\n" , __func__ );
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------------
|
llama_context * ctx = llama_new_context_with_model(model, ctx_params);
|
||||||
// Tokenize the prompt :
|
|
||||||
//---------------------------------
|
// tokenize the prompt
|
||||||
|
|
||||||
std::vector<llama_token> tokens_list;
|
std::vector<llama_token> tokens_list;
|
||||||
tokens_list = ::llama_tokenize( ctx , params.prompt , true );
|
tokens_list = ::llama_tokenize(ctx, params.prompt, true);
|
||||||
|
|
||||||
const int max_context_size = llama_n_ctx( ctx );
|
const int max_context_size = llama_n_ctx(ctx);
|
||||||
const int max_tokens_list_size = max_context_size - 4 ;
|
const int max_tokens_list_size = max_context_size - 4;
|
||||||
|
|
||||||
if ( (int)tokens_list.size() > max_tokens_list_size )
|
if ((int) tokens_list.size() > max_tokens_list_size) {
|
||||||
{
|
fprintf(stderr, "%s: error: prompt too long (%d tokens, max %d)\n", __func__, (int) tokens_list.size(), max_tokens_list_size);
|
||||||
fprintf( stderr , "%s: error: prompt too long (%d tokens, max %d)\n" ,
|
|
||||||
__func__ , (int)tokens_list.size() , max_tokens_list_size );
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
fprintf( stderr, "\n\n" );
|
fprintf(stderr, "\n\n");
|
||||||
|
|
||||||
// Print the tokens from the prompt :
|
for (auto id : tokens_list) {
|
||||||
|
fprintf(stderr, "%s", llama_token_to_str(ctx, id).c_str());
|
||||||
for( auto id : tokens_list )
|
|
||||||
{
|
|
||||||
printf( "%s" , llama_token_to_str( ctx , id ) );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fflush(stdout);
|
fflush(stderr);
|
||||||
|
|
||||||
|
// main loop
|
||||||
//---------------------------------
|
|
||||||
// Main prediction loop :
|
|
||||||
//---------------------------------
|
|
||||||
|
|
||||||
// The LLM keeps a contextual cache memory of previous token evaluation.
|
// The LLM keeps a contextual cache memory of previous token evaluation.
|
||||||
// Usually, once this cache is full, it is required to recompute a compressed context based on previous
|
// Usually, once this cache is full, it is required to recompute a compressed context based on previous
|
||||||
// tokens (see "infinite text generation via context swapping" in the main example), but in this minimalist
|
// tokens (see "infinite text generation via context swapping" in the main example), but in this minimalist
|
||||||
// example, we will just stop the loop once this cache is full or once an end of stream is detected.
|
// example, we will just stop the loop once this cache is full or once an end of stream is detected.
|
||||||
|
|
||||||
while ( llama_get_kv_cache_token_count( ctx ) < max_context_size )
|
const int n_gen = std::min(32, max_context_size);
|
||||||
{
|
|
||||||
//---------------------------------
|
|
||||||
// Evaluate the tokens :
|
|
||||||
//---------------------------------
|
|
||||||
|
|
||||||
if ( llama_eval( ctx , tokens_list.data() , int(tokens_list.size()) , llama_get_kv_cache_token_count( ctx ) , params.n_threads ) )
|
while (llama_get_kv_cache_token_count(ctx) < n_gen) {
|
||||||
{
|
// evaluate the transformer
|
||||||
fprintf( stderr, "%s : failed to eval\n" , __func__ );
|
|
||||||
|
if (llama_eval(ctx, tokens_list.data(), int(tokens_list.size()), llama_get_kv_cache_token_count(ctx), params.n_threads)) {
|
||||||
|
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
tokens_list.clear();
|
tokens_list.clear();
|
||||||
|
|
||||||
//---------------------------------
|
// sample the next token
|
||||||
// Select the best prediction :
|
|
||||||
//---------------------------------
|
|
||||||
|
|
||||||
llama_token new_token_id = 0;
|
llama_token new_token_id = 0;
|
||||||
|
|
||||||
auto logits = llama_get_logits( ctx );
|
auto logits = llama_get_logits(ctx);
|
||||||
auto n_vocab = llama_n_vocab( ctx ); // the size of the LLM vocabulary (in tokens)
|
auto n_vocab = llama_n_vocab(ctx);
|
||||||
|
|
||||||
std::vector<llama_token_data> candidates;
|
std::vector<llama_token_data> candidates;
|
||||||
candidates.reserve( n_vocab );
|
candidates.reserve(n_vocab);
|
||||||
|
|
||||||
for( llama_token token_id = 0 ; token_id < n_vocab ; token_id++ )
|
for (llama_token token_id = 0; token_id < n_vocab; token_id++) {
|
||||||
{
|
candidates.emplace_back(llama_token_data{ token_id, logits[token_id], 0.0f });
|
||||||
candidates.emplace_back( llama_token_data{ token_id , logits[ token_id ] , 0.0f } );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
|
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
|
||||||
|
|
||||||
// Select it using the "Greedy sampling" method :
|
new_token_id = llama_sample_token_greedy(ctx , &candidates_p);
|
||||||
new_token_id = llama_sample_token_greedy( ctx , &candidates_p );
|
|
||||||
|
|
||||||
|
|
||||||
// is it an end of stream ?
|
// is it an end of stream ?
|
||||||
if ( new_token_id == llama_token_eos() )
|
if (new_token_id == llama_token_eos(ctx)) {
|
||||||
{
|
|
||||||
fprintf(stderr, " [end of text]\n");
|
fprintf(stderr, " [end of text]\n");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Print the new token :
|
// print the new token :
|
||||||
printf( "%s" , llama_token_to_str( ctx , new_token_id ) );
|
printf("%s", llama_token_to_str(ctx, new_token_id).c_str());
|
||||||
fflush( stdout );
|
fflush(stdout);
|
||||||
|
|
||||||
// Push this new token for next evaluation :
|
// push this new token for next evaluation
|
||||||
tokens_list.push_back( new_token_id );
|
tokens_list.push_back(new_token_id);
|
||||||
|
}
|
||||||
|
|
||||||
} // wend of main loop
|
llama_free(ctx);
|
||||||
|
llama_free_model(model);
|
||||||
llama_free( ctx );
|
|
||||||
llama_free_model( model );
|
|
||||||
|
|
||||||
llama_backend_free();
|
llama_backend_free();
|
||||||
|
|
||||||
|
fprintf(stderr, "\n\n");
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// EOF
|
|
||||||
|
@ -1,4 +1,5 @@
|
|||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
|
#include "common.h"
|
||||||
#include "llama.h"
|
#include "llama.h"
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
@ -16,7 +17,7 @@
|
|||||||
#pragma warning(disable: 4244 4267) // possible loss of data
|
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static const float rms_norm_eps = LLAMA_DEFAULT_RMS_EPS;
|
static const float rms_norm_eps = 1e-5f;
|
||||||
|
|
||||||
struct random_normal_distribution {
|
struct random_normal_distribution {
|
||||||
std::mt19937 gen;
|
std::mt19937 gen;
|
||||||
@ -169,14 +170,16 @@ struct ggml_tensor * randomize_tensor_uniform(struct ggml_tensor * tensor, struc
|
|||||||
struct llama_vocab {
|
struct llama_vocab {
|
||||||
using id = int32_t;
|
using id = int32_t;
|
||||||
using token = std::string;
|
using token = std::string;
|
||||||
|
using ttype = llama_token_type;
|
||||||
|
|
||||||
struct token_score {
|
struct token_data {
|
||||||
token tok;
|
token text;
|
||||||
float score;
|
float score;
|
||||||
|
ttype type;
|
||||||
};
|
};
|
||||||
|
|
||||||
std::unordered_map<token, id> token_to_id;
|
std::unordered_map<token, id> token_to_id;
|
||||||
std::vector<token_score> id_to_token;
|
std::vector<token_data> id_to_token;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct my_llama_hparams {
|
struct my_llama_hparams {
|
||||||
@ -1865,10 +1868,10 @@ struct ggml_tensor * forward_batch_wo_cache_flash_attn_train(
|
|||||||
t12->grad = expand(gb, ggml_permute(ctx0, t15->grad, 0, 2, 3, 1)); assert_shape_4d(t12->grad, N, n_batch, n_embd/n_head, n_head);
|
t12->grad = expand(gb, ggml_permute(ctx0, t15->grad, 0, 2, 3, 1)); assert_shape_4d(t12->grad, N, n_batch, n_embd/n_head, n_head);
|
||||||
t11->grad = expand(gb, ggml_reshape_2d(ctx0, ggml_cont(ctx0, t12->grad), N*n_batch, n_embd)); assert_shape_2d(t11->grad, N*n_batch, n_embd);
|
t11->grad = expand(gb, ggml_reshape_2d(ctx0, ggml_cont(ctx0, t12->grad), N*n_batch, n_embd)); assert_shape_2d(t11->grad, N*n_batch, n_embd);
|
||||||
t10->grad = expand(gb, ggml_permute(ctx0, t14->grad, 0, 2, 1, 3)); assert_shape_4d(t10->grad, n_embd/n_head, n_head, N, n_batch);
|
t10->grad = expand(gb, ggml_permute(ctx0, t14->grad, 0, 2, 1, 3)); assert_shape_4d(t10->grad, n_embd/n_head, n_head, N, n_batch);
|
||||||
t09->grad = expand(gb, ggml_rope_back(ctx0, t10->grad, n_past, n_rot, rope_mode, n_ctx)); assert_shape_4d(t09->grad, n_embd/n_head, n_head, N, n_batch);
|
t09->grad = expand(gb, ggml_rope_back(ctx0, t10->grad, n_past, n_rot, rope_mode, n_ctx, 10000.0f, 1.0f, 0.0f, false)); assert_shape_4d(t09->grad, n_embd/n_head, n_head, N, n_batch);
|
||||||
t08->grad = expand(gb, ggml_reshape_2d(ctx0, t09->grad, n_embd, N*n_batch)); assert_shape_2d(t08->grad, n_embd, N*n_batch);
|
t08->grad = expand(gb, ggml_reshape_2d(ctx0, t09->grad, n_embd, N*n_batch)); assert_shape_2d(t08->grad, n_embd, N*n_batch);
|
||||||
t07->grad = expand(gb, ggml_permute(ctx0, t13->grad, 0, 2, 1, 3)); assert_shape_4d(t07->grad, n_embd/n_head, n_head, N, n_batch);
|
t07->grad = expand(gb, ggml_permute(ctx0, t13->grad, 0, 2, 1, 3)); assert_shape_4d(t07->grad, n_embd/n_head, n_head, N, n_batch);
|
||||||
t06->grad = expand(gb, ggml_rope_back(ctx0, t07->grad, n_past, n_rot, rope_mode, n_ctx)); assert_shape_4d(t06->grad, n_embd/n_head, n_head, N, n_batch);
|
t06->grad = expand(gb, ggml_rope_back(ctx0, t07->grad, n_past, n_rot, rope_mode, n_ctx, 10000.0f, 1.0f, 0.0f, false)); assert_shape_4d(t06->grad, n_embd/n_head, n_head, N, n_batch);
|
||||||
t05->grad = expand(gb, ggml_reshape_2d(ctx0, t06->grad, n_embd, N*n_batch)); assert_shape_2d(t05->grad, n_embd, N*n_batch);
|
t05->grad = expand(gb, ggml_reshape_2d(ctx0, t06->grad, n_embd, N*n_batch)); assert_shape_2d(t05->grad, n_embd, N*n_batch);
|
||||||
t04->grad = expand(gb, ggml_add_inplace(ctx0,
|
t04->grad = expand(gb, ggml_add_inplace(ctx0,
|
||||||
ggml_add_inplace(ctx0,
|
ggml_add_inplace(ctx0,
|
||||||
@ -1961,7 +1964,7 @@ void print_matrix(struct ggml_tensor * probs) {
|
|||||||
|
|
||||||
|
|
||||||
void print_token(struct llama_context * ctx, llama_token token) {
|
void print_token(struct llama_context * ctx, llama_token token) {
|
||||||
printf("%s", llama_token_to_str(ctx, token));
|
printf("%s", llama_token_to_str(ctx, token).c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
void print_tokens(struct llama_context* ctx, struct ggml_tensor * tokens) {
|
void print_tokens(struct llama_context* ctx, struct ggml_tensor * tokens) {
|
||||||
@ -1995,7 +1998,7 @@ void print_tokens_batch(struct llama_context* ctx, struct ggml_tensor * tokens)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_example_targets(const int * train_samples, size_t n_train_samples, const llama_token * train_data, size_t n_train_data, int example_id, struct ggml_tensor * tokens_input, struct ggml_tensor * target_logits, struct ggml_tensor * target_probs) {
|
void get_example_targets(struct llama_context * lctx, const int * train_samples, size_t n_train_samples, const llama_token * train_data, size_t n_train_data, int example_id, struct ggml_tensor * tokens_input, struct ggml_tensor * target_logits, struct ggml_tensor * target_probs) {
|
||||||
int n_tokens = tokens_input->ne[0];
|
int n_tokens = tokens_input->ne[0];
|
||||||
int n_vocab = target_logits->ne[0];
|
int n_vocab = target_logits->ne[0];
|
||||||
|
|
||||||
@ -2004,7 +2007,7 @@ void get_example_targets(const int * train_samples, size_t n_train_samples, cons
|
|||||||
|
|
||||||
ggml_set_f32(target_logits, -1.0f/n_vocab);
|
ggml_set_f32(target_logits, -1.0f/n_vocab);
|
||||||
ggml_set_f32(target_probs, 0.0f);
|
ggml_set_f32(target_probs, 0.0f);
|
||||||
ggml_set_i32_1d(tokens_input, 0, llama_token_bos());
|
ggml_set_i32_1d(tokens_input, 0, llama_token_bos(lctx));
|
||||||
for (int i=1; i<n_tokens+1; ++i) {
|
for (int i=1; i<n_tokens+1; ++i) {
|
||||||
int token = clamp(train_data[sample+i-1], 0, n_vocab-1);
|
int token = clamp(train_data[sample+i-1], 0, n_vocab-1);
|
||||||
set_f32_2d(target_logits, token, i-1, +1.0f);
|
set_f32_2d(target_logits, token, i-1, +1.0f);
|
||||||
@ -2015,7 +2018,7 @@ void get_example_targets(const int * train_samples, size_t n_train_samples, cons
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void get_example_targets_batch(struct llama_context * /*lctx*/, const int * train_samples, size_t n_train_samples, const llama_token * train_data, size_t n_train_data, int example_id, struct ggml_tensor * tokens_input, struct ggml_tensor * target_logits, struct ggml_tensor * target_probs) {
|
void get_example_targets_batch(struct llama_context * lctx, const int * train_samples, size_t n_train_samples, const llama_token * train_data, size_t n_train_data, int example_id, struct ggml_tensor * tokens_input, struct ggml_tensor * target_logits, struct ggml_tensor * target_probs) {
|
||||||
GGML_ASSERT(tokens_input->n_dims == 2);
|
GGML_ASSERT(tokens_input->n_dims == 2);
|
||||||
GGML_ASSERT(target_logits->n_dims == 3);
|
GGML_ASSERT(target_logits->n_dims == 3);
|
||||||
GGML_ASSERT(target_probs->n_dims == 3);
|
GGML_ASSERT(target_probs->n_dims == 3);
|
||||||
@ -2035,7 +2038,7 @@ void get_example_targets_batch(struct llama_context * /*lctx*/, const int * trai
|
|||||||
size_t sample = train_samples[(example_id*n_batch + k) % n_train_samples];
|
size_t sample = train_samples[(example_id*n_batch + k) % n_train_samples];
|
||||||
GGML_ASSERT(sample+n_tokens-1 < n_train_data);
|
GGML_ASSERT(sample+n_tokens-1 < n_train_data);
|
||||||
|
|
||||||
set_i32_2d(tokens_input, 0, k, llama_token_bos());
|
set_i32_2d(tokens_input, 0, k, llama_token_bos(lctx));
|
||||||
for (int i=1; i<n_tokens+1; ++i) {
|
for (int i=1; i<n_tokens+1; ++i) {
|
||||||
int token = clamp(train_data[sample+i-1], 0, n_vocab-1);
|
int token = clamp(train_data[sample+i-1], 0, n_vocab-1);
|
||||||
// print_token(lctx, token);
|
// print_token(lctx, token);
|
||||||
@ -2188,11 +2191,10 @@ int tokenize_file(struct llama_context * lctx, const char * filename, std::vecto
|
|||||||
f.read_raw(buf.data(), f.size);
|
f.read_raw(buf.data(), f.size);
|
||||||
buf[f.size] = '\0';
|
buf[f.size] = '\0';
|
||||||
|
|
||||||
out.resize(buf.size());
|
int n_tokens = llama_tokenize(lctx, buf.data(), out.data(), out.size(), false);
|
||||||
|
if (n_tokens < 0) {
|
||||||
int n_tokens = llama_tokenize(lctx, buf.data(), out.data(), buf.size(), false);
|
out.resize(-n_tokens);
|
||||||
if (n_tokens >= 0) {
|
llama_tokenize(lctx, buf.data(), out.data(), out.size(), false);
|
||||||
out.resize(n_tokens);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool verify = false;
|
bool verify = false;
|
||||||
@ -2200,17 +2202,17 @@ int tokenize_file(struct llama_context * lctx, const char * filename, std::vecto
|
|||||||
const char * in = buf.data();
|
const char * in = buf.data();
|
||||||
const char * end = buf.data() + buf.size();
|
const char * end = buf.data() + buf.size();
|
||||||
for (int i = 0; i < (int) out.size(); ++i) {
|
for (int i = 0; i < (int) out.size(); ++i) {
|
||||||
const char * s = llama_token_to_str(lctx, out[i]);
|
std::string s = llama_token_to_str(lctx, out[i]);
|
||||||
int len = strlen(s);
|
int len = s.length();
|
||||||
if (in >= end) {
|
if (in >= end) {
|
||||||
printf("%s: unexpected end of original text.\n", __func__);
|
printf("%s: unexpected end of original text.\n", __func__);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
const bool matches = (strncmp(in, s, len) == 0);
|
const bool matches = (strncmp(in, s.c_str(), len) == 0);
|
||||||
if (matches) {
|
if (matches) {
|
||||||
in += len;
|
in += len;
|
||||||
} else {
|
} else {
|
||||||
printf("%s: mismatch: expected '%s', but got '%s'\n", __func__, std::string(in, len).c_str(), s);
|
printf("%s: mismatch: expected '%s', but got '%s'\n", __func__, std::string(in, len).c_str(), s.c_str());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2294,7 +2296,7 @@ llama_token sample(struct my_llama_sampler * sampler, float * logits, const llam
|
|||||||
const auto params = sampler->params;
|
const auto params = sampler->params;
|
||||||
|
|
||||||
// Apply penalties
|
// Apply penalties
|
||||||
const float nl_logit = logits[llama_token_nl()];
|
const float nl_logit = logits[llama_token_nl(ctx)];
|
||||||
|
|
||||||
const int n_last = std::min(std::min(n_last_tokens, params.repeat_last_n), sampler->n_ctx);
|
const int n_last = std::min(std::min(n_last_tokens, params.repeat_last_n), sampler->n_ctx);
|
||||||
|
|
||||||
@ -2313,7 +2315,7 @@ llama_token sample(struct my_llama_sampler * sampler, float * logits, const llam
|
|||||||
params.alpha_presence);
|
params.alpha_presence);
|
||||||
|
|
||||||
if (!params.penalize_nl) {
|
if (!params.penalize_nl) {
|
||||||
logits[llama_token_nl()] = nl_logit;
|
logits[llama_token_nl(ctx)] = nl_logit;
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_token token = 0;
|
llama_token token = 0;
|
||||||
@ -2612,42 +2614,45 @@ void save_as_llama_model(struct llama_vocab * vocab, struct my_llama_model * mod
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// write_magic
|
#pragma message("TODO: implement file saving using gguf")
|
||||||
file.write_u32(LLAMA_FILE_MAGIC); // magic
|
(void) vocab;
|
||||||
file.write_u32(LLAMA_FILE_VERSION); // version
|
(void) model;
|
||||||
// write_hparams
|
// // write_magic
|
||||||
file.write_u32(model->hparams.n_vocab);
|
// file.write_u32(LLAMA_FILE_MAGIC); // magic
|
||||||
file.write_u32(model->hparams.n_embd);
|
// file.write_u32(LLAMA_FILE_VERSION); // version
|
||||||
file.write_u32(model->hparams.n_mult);
|
// // write_hparams
|
||||||
file.write_u32(model->hparams.n_head);
|
// file.write_u32(model->hparams.n_vocab);
|
||||||
file.write_u32(model->hparams.n_layer);
|
// file.write_u32(model->hparams.n_embd);
|
||||||
file.write_u32(model->hparams.n_rot);
|
// file.write_u32(model->hparams.n_mult);
|
||||||
file.write_u32(LLAMA_FTYPE_ALL_F32);
|
// file.write_u32(model->hparams.n_head);
|
||||||
// write_vocab
|
// file.write_u32(model->hparams.n_layer);
|
||||||
uint32_t n_vocab = model->hparams.n_vocab;
|
// file.write_u32(model->hparams.n_rot);
|
||||||
for (uint32_t i = 0; i < n_vocab; i++) {
|
// file.write_u32(LLAMA_FTYPE_ALL_F32);
|
||||||
const auto & token_score = vocab->id_to_token.at(i);
|
// // write_vocab
|
||||||
file.write_u32((uint32_t) token_score.tok.size());
|
// uint32_t n_vocab = model->hparams.n_vocab;
|
||||||
file.write_raw(token_score.tok.data(), token_score.tok.size());
|
// for (uint32_t i = 0; i < n_vocab; i++) {
|
||||||
file.write_raw(&token_score.score, sizeof(token_score.score));
|
// const auto & token_data = vocab->id_to_token.at(i);
|
||||||
}
|
// file.write_u32((uint32_t) token_data.tok.size());
|
||||||
// write tensors
|
// file.write_raw(token_data.tok.data(), token_data.tok.size());
|
||||||
write_tensor(&file, model->tok_embeddings);
|
// file.write_raw(&token_data.score, sizeof(token_data.score));
|
||||||
write_tensor(&file, model->norm);
|
// }
|
||||||
write_tensor(&file, model->output);
|
// // write tensors
|
||||||
for (uint32_t i = 0; i < model->hparams.n_layer; ++i) {
|
// write_tensor(&file, model->tok_embeddings);
|
||||||
auto & layer = model->layers[i];
|
// write_tensor(&file, model->norm);
|
||||||
|
// write_tensor(&file, model->output);
|
||||||
write_tensor(&file, layer.attention_norm);
|
// for (uint32_t i = 0; i < model->hparams.n_layer; ++i) {
|
||||||
write_tensor(&file, layer.wq);
|
// auto & layer = model->layers[i];
|
||||||
write_tensor(&file, layer.wk);
|
//
|
||||||
write_tensor(&file, layer.wv);
|
// write_tensor(&file, layer.attention_norm);
|
||||||
write_tensor(&file, layer.wo);
|
// write_tensor(&file, layer.wq);
|
||||||
write_tensor(&file, layer.ffn_norm);
|
// write_tensor(&file, layer.wk);
|
||||||
write_tensor(&file, layer.w1);
|
// write_tensor(&file, layer.wv);
|
||||||
write_tensor(&file, layer.w2);
|
// write_tensor(&file, layer.wo);
|
||||||
write_tensor(&file, layer.w3);
|
// write_tensor(&file, layer.ffn_norm);
|
||||||
}
|
// write_tensor(&file, layer.w1);
|
||||||
|
// write_tensor(&file, layer.w2);
|
||||||
|
// write_tensor(&file, layer.w3);
|
||||||
|
// }
|
||||||
}
|
}
|
||||||
|
|
||||||
float cosine_decay(const int decay_steps, const float alpha, int step) {
|
float cosine_decay(const int decay_steps, const float alpha, int step) {
|
||||||
@ -3052,20 +3057,13 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
struct llama_vocab vocab;
|
struct llama_vocab vocab;
|
||||||
{
|
{
|
||||||
std::vector<const char *> strings;
|
const int n_vocab = llama_n_vocab(lctx);
|
||||||
std::vector<float> scores;
|
|
||||||
int n_vocab = llama_n_vocab(lctx);
|
|
||||||
strings.resize(n_vocab, NULL);
|
|
||||||
scores.resize(n_vocab, 0);
|
|
||||||
n_vocab = llama_get_vocab(lctx, strings.data(), scores.data(), n_vocab);
|
|
||||||
GGML_ASSERT(n_vocab == llama_n_vocab(lctx));
|
|
||||||
vocab.id_to_token.resize(n_vocab);
|
vocab.id_to_token.resize(n_vocab);
|
||||||
for (int i=0; i<n_vocab; ++i) {
|
for (int i=0; i<n_vocab; ++i) {
|
||||||
std::string tok = std::string(strings[i]);
|
vocab.id_to_token[i].text = llama_token_get_text(lctx, i);
|
||||||
float score = scores[i];
|
vocab.id_to_token[i].score = llama_token_get_score(lctx, i);
|
||||||
vocab.id_to_token[i].tok = tok;
|
vocab.id_to_token[i].type = llama_token_get_type(lctx, i);
|
||||||
vocab.id_to_token[i].score = score;
|
vocab.token_to_id.emplace(vocab.id_to_token[i].text, i);
|
||||||
vocab.token_to_id.emplace(tok, i);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3178,7 +3176,7 @@ int main(int argc, char ** argv) {
|
|||||||
std::vector<int> train_samples;
|
std::vector<int> train_samples;
|
||||||
train_samples.push_back(0);
|
train_samples.push_back(0);
|
||||||
for (int i = 1; i < (int) train_tokens.size() - n_tokens; ++i) {
|
for (int i = 1; i < (int) train_tokens.size() - n_tokens; ++i) {
|
||||||
if (!params.samples_start_after_nl || (train_tokens[i-1] == llama_token_nl())) {
|
if (!params.samples_start_after_nl || (train_tokens[i-1] == llama_token_nl(lctx))) {
|
||||||
train_samples.push_back(i);
|
train_samples.push_back(i);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3338,7 +3336,7 @@ int main(int argc, char ** argv) {
|
|||||||
struct ggml_tensor * target_logits = ggml_new_tensor_2d(model.ctx, GGML_TYPE_F32, n_vocab, n_tokens);
|
struct ggml_tensor * target_logits = ggml_new_tensor_2d(model.ctx, GGML_TYPE_F32, n_vocab, n_tokens);
|
||||||
struct ggml_tensor * target_probs = ggml_new_tensor_2d(model.ctx, GGML_TYPE_F32, n_vocab, n_tokens);
|
struct ggml_tensor * target_probs = ggml_new_tensor_2d(model.ctx, GGML_TYPE_F32, n_vocab, n_tokens);
|
||||||
|
|
||||||
get_example_targets(train_samples.data(), train_samples.size(), train_tokens.data(), train_tokens.size(), rand()%train_samples.size(), tokens_input, target_logits, target_probs);
|
get_example_targets(lctx, train_samples.data(), train_samples.size(), train_tokens.data(), train_tokens.size(), rand()%train_samples.size(), tokens_input, target_logits, target_probs);
|
||||||
for (int i=sample_ctx; i<n_tokens; ++i) {
|
for (int i=sample_ctx; i<n_tokens; ++i) {
|
||||||
ggml_set_i32_1d(tokens_input, i, n_vocab/2);
|
ggml_set_i32_1d(tokens_input, i, n_vocab/2);
|
||||||
}
|
}
|
||||||
|
@ -76,7 +76,7 @@ struct ggml_allocr {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#ifdef GGML_ALLOCATOR_DEBUG
|
#ifdef GGML_ALLOCATOR_DEBUG
|
||||||
static void add_allocated_tensor(struct ggml_allocator * alloc, struct ggml_tensor * tensor) {
|
static void add_allocated_tensor(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
|
||||||
for (int i = 0; i < 1024; i++) {
|
for (int i = 0; i < 1024; i++) {
|
||||||
if (alloc->allocated_tensors[i] == NULL) {
|
if (alloc->allocated_tensors[i] == NULL) {
|
||||||
alloc->allocated_tensors[i] = tensor;
|
alloc->allocated_tensors[i] = tensor;
|
||||||
@ -85,7 +85,7 @@ static void add_allocated_tensor(struct ggml_allocator * alloc, struct ggml_tens
|
|||||||
}
|
}
|
||||||
GGML_ASSERT(!"out of allocated_tensors");
|
GGML_ASSERT(!"out of allocated_tensors");
|
||||||
}
|
}
|
||||||
static void remove_allocated_tensor(struct ggml_allocator * alloc, struct ggml_tensor * tensor) {
|
static void remove_allocated_tensor(struct ggml_allocr * alloc, struct ggml_tensor * tensor) {
|
||||||
for (int i = 0; i < 1024; i++) {
|
for (int i = 0; i < 1024; i++) {
|
||||||
if (alloc->allocated_tensors[i] == tensor ||
|
if (alloc->allocated_tensors[i] == tensor ||
|
||||||
(alloc->allocated_tensors[i] != NULL && alloc->allocated_tensors[i]->data == tensor->data)) {
|
(alloc->allocated_tensors[i] != NULL && alloc->allocated_tensors[i]->data == tensor->data)) {
|
||||||
|
187
ggml-cuda.cu
187
ggml-cuda.cu
@ -259,6 +259,7 @@ static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + 13*QK_K/16, "wrong q6_
|
|||||||
#define CUDA_CPY_BLOCK_SIZE 32
|
#define CUDA_CPY_BLOCK_SIZE 32
|
||||||
#define CUDA_SCALE_BLOCK_SIZE 256
|
#define CUDA_SCALE_BLOCK_SIZE 256
|
||||||
#define CUDA_ROPE_BLOCK_SIZE 256
|
#define CUDA_ROPE_BLOCK_SIZE 256
|
||||||
|
#define CUDA_ALIBI_BLOCK_SIZE 32
|
||||||
#define CUDA_DIAG_MASK_INF_BLOCK_SIZE 32
|
#define CUDA_DIAG_MASK_INF_BLOCK_SIZE 32
|
||||||
#define CUDA_QUANTIZE_BLOCK_SIZE 256
|
#define CUDA_QUANTIZE_BLOCK_SIZE 256
|
||||||
#define CUDA_DEQUANTIZE_BLOCK_SIZE 256
|
#define CUDA_DEQUANTIZE_BLOCK_SIZE 256
|
||||||
@ -286,7 +287,7 @@ static int g_device_count = -1;
|
|||||||
static int g_main_device = 0;
|
static int g_main_device = 0;
|
||||||
static int g_compute_capabilities[GGML_CUDA_MAX_DEVICES];
|
static int g_compute_capabilities[GGML_CUDA_MAX_DEVICES];
|
||||||
static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0};
|
static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0};
|
||||||
static bool g_mul_mat_q = false;
|
static bool g_mul_mat_q = true;
|
||||||
|
|
||||||
static void * g_scratch_buffer = nullptr;
|
static void * g_scratch_buffer = nullptr;
|
||||||
static size_t g_scratch_size = 1024*1024*1024; // 1 GB by default
|
static size_t g_scratch_size = 1024*1024*1024; // 1 GB by default
|
||||||
@ -3886,13 +3887,13 @@ static __global__ void cpy_f32_f16(const char * cx, char * cdst, const int ne,
|
|||||||
// rope == RoPE == rotary positional embedding
|
// rope == RoPE == rotary positional embedding
|
||||||
static __global__ void rope_f32(const float * x, float * dst, const int ncols, const float p0,
|
static __global__ void rope_f32(const float * x, float * dst, const int ncols, const float p0,
|
||||||
const float p_delta, const int p_delta_rows, const float theta_scale) {
|
const float p_delta, const int p_delta_rows, const float theta_scale) {
|
||||||
const int col = 2*(blockDim.x*blockIdx.x + threadIdx.x);
|
const int col = 2*(blockDim.y*blockIdx.y + threadIdx.y);
|
||||||
|
|
||||||
if (col >= ncols) {
|
if (col >= ncols) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const int row = blockDim.y*blockIdx.y + threadIdx.y;
|
const int row = blockDim.x*blockIdx.x + threadIdx.x;
|
||||||
const int i = row*ncols + col;
|
const int i = row*ncols + col;
|
||||||
|
|
||||||
const float theta = (p0 + p_delta * (row/p_delta_rows))*powf(theta_scale, col/2);
|
const float theta = (p0 + p_delta * (row/p_delta_rows))*powf(theta_scale, col/2);
|
||||||
@ -3940,9 +3941,32 @@ static __global__ void rope_glm_f32(const float * x, float * dst, const int ncol
|
|||||||
dst[i + half_n_dims * 3] = x2*sin_block_theta + x3*cos_block_theta;
|
dst[i + half_n_dims * 3] = x2*sin_block_theta + x3*cos_block_theta;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __global__ void diag_mask_inf_f32(const float * x, float * dst, const int ncols, const int rows_per_channel, const int n_past) {
|
static __global__ void alibi_f32(const float * x, float * dst, const int ncols, const int k_rows,
|
||||||
|
const int n_heads_log2_floor, const float m0, const float m1) {
|
||||||
const int col = blockDim.x*blockIdx.x + threadIdx.x;
|
const int col = blockDim.x*blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
|
if (col >= ncols) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
const int row = blockDim.y*blockIdx.y + threadIdx.y;
|
const int row = blockDim.y*blockIdx.y + threadIdx.y;
|
||||||
|
const int i = row*ncols + col;
|
||||||
|
|
||||||
|
const int k = row/k_rows;
|
||||||
|
|
||||||
|
float m_k;
|
||||||
|
if (k < n_heads_log2_floor) {
|
||||||
|
m_k = powf(m0, k + 1);
|
||||||
|
} else {
|
||||||
|
m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
dst[i] = col * m_k + x[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
static __global__ void diag_mask_inf_f32(const float * x, float * dst, const int ncols, const int rows_per_channel, const int n_past) {
|
||||||
|
const int col = blockDim.y*blockIdx.y + threadIdx.y;
|
||||||
|
const int row = blockDim.x*blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
if (col >= ncols) {
|
if (col >= ncols) {
|
||||||
return;
|
return;
|
||||||
@ -3955,24 +3979,29 @@ static __global__ void diag_mask_inf_f32(const float * x, float * dst, const int
|
|||||||
|
|
||||||
// the CUDA soft max implementation differs from the CPU implementation
|
// the CUDA soft max implementation differs from the CPU implementation
|
||||||
// instead of doubles floats are used
|
// instead of doubles floats are used
|
||||||
// values are also not normalized to the maximum value by subtracting it in the exponential function
|
|
||||||
// theoretically these changes could cause problems with rounding error and arithmetic overflow but for LLaMa it seems to be fine
|
|
||||||
static __global__ void soft_max_f32(const float * x, float * dst, const int ncols) {
|
static __global__ void soft_max_f32(const float * x, float * dst, const int ncols) {
|
||||||
const int row = blockDim.y*blockIdx.y + threadIdx.y;
|
const int row = blockDim.x*blockIdx.x + threadIdx.x;
|
||||||
const int block_size = blockDim.x;
|
const int block_size = blockDim.y;
|
||||||
const int tid = threadIdx.x;
|
const int tid = threadIdx.y;
|
||||||
|
|
||||||
float tmp = 0.0;
|
float max_val = -INFINITY;
|
||||||
|
|
||||||
for (int block_start = 0; block_start < ncols; block_start += block_size) {
|
for (int col = tid; col < ncols; col += block_size) {
|
||||||
const int col = block_start + tid;
|
const int i = row*ncols + col;
|
||||||
|
max_val = max(max_val, x[i]);
|
||||||
if (col >= ncols) {
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// find the max value in the block
|
||||||
|
#pragma unroll
|
||||||
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
|
max_val = max(max_val, __shfl_xor_sync(0xffffffff, max_val, mask, 32));
|
||||||
|
}
|
||||||
|
|
||||||
|
float tmp = 0.f;
|
||||||
|
|
||||||
|
for (int col = tid; col < ncols; col += block_size) {
|
||||||
const int i = row*ncols + col;
|
const int i = row*ncols + col;
|
||||||
const float val = expf(x[i]);
|
const float val = expf(x[i] - max_val);
|
||||||
tmp += val;
|
tmp += val;
|
||||||
dst[i] = val;
|
dst[i] = val;
|
||||||
}
|
}
|
||||||
@ -3983,15 +4012,11 @@ static __global__ void soft_max_f32(const float * x, float * dst, const int ncol
|
|||||||
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int block_start = 0; block_start < ncols; block_start += block_size) {
|
const float inv_tmp = 1.f / tmp;
|
||||||
const int col = block_start + tid;
|
|
||||||
|
|
||||||
if (col >= ncols) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
for (int col = tid; col < ncols; col += block_size) {
|
||||||
const int i = row*ncols + col;
|
const int i = row*ncols + col;
|
||||||
dst[i] /= tmp;
|
dst[i] *= inv_tmp;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4752,9 +4777,9 @@ static void scale_f32_cuda(const float * x, float * dst, const float scale, cons
|
|||||||
static void rope_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float p0,
|
static void rope_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float p0,
|
||||||
const float p_delta, const int p_delta_rows, const float theta_scale, cudaStream_t stream) {
|
const float p_delta, const int p_delta_rows, const float theta_scale, cudaStream_t stream) {
|
||||||
GGML_ASSERT(nrows % 2 == 0);
|
GGML_ASSERT(nrows % 2 == 0);
|
||||||
const dim3 block_dims(2*CUDA_ROPE_BLOCK_SIZE, 1, 1);
|
const dim3 block_dims(1, 2*CUDA_ROPE_BLOCK_SIZE, 1);
|
||||||
const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
|
const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
|
||||||
const dim3 block_nums(num_blocks_x, nrows, 1);
|
const dim3 block_nums(nrows, num_blocks_x, 1);
|
||||||
rope_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, p0, p_delta, p_delta_rows, theta_scale);
|
rope_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, p0, p_delta, p_delta_rows, theta_scale);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4766,16 +4791,25 @@ static void rope_glm_f32_cuda(const float * x, float * dst, const int ncols, con
|
|||||||
rope_glm_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, p, block_p, theta_scale);
|
rope_glm_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, p, block_p, theta_scale);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void alibi_f32_cuda(const float * x, float * dst, const int ncols, const int nrows,
|
||||||
|
const int k_rows, const int n_heads_log2_floor, const float m0,
|
||||||
|
const float m1, cudaStream_t stream) {
|
||||||
|
const dim3 block_dims(CUDA_ALIBI_BLOCK_SIZE, 1, 1);
|
||||||
|
const int num_blocks_x = (ncols + CUDA_ALIBI_BLOCK_SIZE - 1) / (CUDA_ALIBI_BLOCK_SIZE);
|
||||||
|
const dim3 block_nums(num_blocks_x, nrows, 1);
|
||||||
|
alibi_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, k_rows, n_heads_log2_floor, m0, m1);
|
||||||
|
}
|
||||||
|
|
||||||
static void diag_mask_inf_f32_cuda(const float * x, float * dst, const int ncols_x, const int nrows_x, const int rows_per_channel, const int n_past, cudaStream_t stream) {
|
static void diag_mask_inf_f32_cuda(const float * x, float * dst, const int ncols_x, const int nrows_x, const int rows_per_channel, const int n_past, cudaStream_t stream) {
|
||||||
const dim3 block_dims(CUDA_DIAG_MASK_INF_BLOCK_SIZE, 1, 1);
|
const dim3 block_dims(1, CUDA_DIAG_MASK_INF_BLOCK_SIZE, 1);
|
||||||
const int block_num_x = (ncols_x + CUDA_DIAG_MASK_INF_BLOCK_SIZE - 1) / CUDA_DIAG_MASK_INF_BLOCK_SIZE;
|
const int block_num_x = (ncols_x + CUDA_DIAG_MASK_INF_BLOCK_SIZE - 1) / CUDA_DIAG_MASK_INF_BLOCK_SIZE;
|
||||||
const dim3 block_nums(block_num_x, nrows_x, 1);
|
const dim3 block_nums(nrows_x, block_num_x, 1);
|
||||||
diag_mask_inf_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols_x, rows_per_channel, n_past);
|
diag_mask_inf_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols_x, rows_per_channel, n_past);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void soft_max_f32_cuda(const float * x, float * dst, const int ncols_x, const int nrows_x, cudaStream_t stream) {
|
static void soft_max_f32_cuda(const float * x, float * dst, const int ncols_x, const int nrows_x, cudaStream_t stream) {
|
||||||
const dim3 block_dims(WARP_SIZE, 1, 1);
|
const dim3 block_dims(1, WARP_SIZE, 1);
|
||||||
const dim3 block_nums(1, nrows_x, 1);
|
const dim3 block_nums(nrows_x, 1, 1);
|
||||||
soft_max_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols_x);
|
soft_max_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols_x);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5501,6 +5535,41 @@ inline void ggml_cuda_op_rope(
|
|||||||
(void) i1;
|
(void) i1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void ggml_cuda_op_alibi(
|
||||||
|
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
|
||||||
|
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
|
||||||
|
cudaStream_t & cudaStream_main){
|
||||||
|
|
||||||
|
GGML_ASSERT(src0_ddf_i != nullptr);
|
||||||
|
GGML_ASSERT(dst_ddf_i != nullptr);
|
||||||
|
|
||||||
|
const int64_t ne00 = src0->ne[0];
|
||||||
|
const int64_t ne01 = src0->ne[1];
|
||||||
|
const int64_t ne02 = src0->ne[2];
|
||||||
|
const int64_t i01_diff = i01_high - i01_low;
|
||||||
|
|
||||||
|
const int n_past = ((int32_t *) dst->op_params)[0];
|
||||||
|
const int n_head = ((int32_t *) dst->op_params)[1];
|
||||||
|
float max_bias;
|
||||||
|
memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
|
||||||
|
|
||||||
|
GGML_ASSERT(ne01 + n_past == ne00);
|
||||||
|
GGML_ASSERT(n_head == ne02);
|
||||||
|
|
||||||
|
const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
|
||||||
|
|
||||||
|
const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
|
||||||
|
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
|
||||||
|
|
||||||
|
// compute
|
||||||
|
alibi_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, ne01, n_heads_log2_floor, m0, m1, cudaStream_main);
|
||||||
|
|
||||||
|
(void) src1;
|
||||||
|
(void) src0_ddq_i;
|
||||||
|
(void) src1_ddf_i;
|
||||||
|
(void) i1;
|
||||||
|
}
|
||||||
|
|
||||||
inline void ggml_cuda_op_diag_mask_inf(
|
inline void ggml_cuda_op_diag_mask_inf(
|
||||||
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
|
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
|
||||||
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
|
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
|
||||||
@ -6121,6 +6190,11 @@ void ggml_cuda_rope(const ggml_tensor * src0, const ggml_tensor * src1, ggml_ten
|
|||||||
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_rope, true, !is_glm); // flatten support not implemented for glm
|
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_rope, true, !is_glm); // flatten support not implemented for glm
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_alibi(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
||||||
|
GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
|
||||||
|
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_alibi, true, true);
|
||||||
|
}
|
||||||
|
|
||||||
void ggml_cuda_nop(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
void ggml_cuda_nop(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
||||||
(void) src0;
|
(void) src0;
|
||||||
(void) src1;
|
(void) src1;
|
||||||
@ -6240,7 +6314,7 @@ static struct ggml_tensor_extra_gpu * ggml_cuda_alloc_temp_tensor_extra() {
|
|||||||
return extra;
|
return extra;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_cuda_assign_buffers_impl(struct ggml_tensor * tensor, bool scratch, bool force_inplace) {
|
void ggml_cuda_assign_buffers_impl(struct ggml_tensor * tensor, bool scratch, bool force_inplace, bool no_alloc) {
|
||||||
if (scratch && g_scratch_size == 0) {
|
if (scratch && g_scratch_size == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -6249,14 +6323,19 @@ void ggml_cuda_assign_buffers_impl(struct ggml_tensor * tensor, bool scratch, bo
|
|||||||
if (tensor->src[0] != nullptr && tensor->src[0]->backend == GGML_BACKEND_CPU) {
|
if (tensor->src[0] != nullptr && tensor->src[0]->backend == GGML_BACKEND_CPU) {
|
||||||
const ggml_op src0_op = tensor->src[0]->op;
|
const ggml_op src0_op = tensor->src[0]->op;
|
||||||
if (src0_op == GGML_OP_RESHAPE || src0_op == GGML_OP_TRANSPOSE || src0_op == GGML_OP_VIEW || src0_op == GGML_OP_PERMUTE) {
|
if (src0_op == GGML_OP_RESHAPE || src0_op == GGML_OP_TRANSPOSE || src0_op == GGML_OP_VIEW || src0_op == GGML_OP_PERMUTE) {
|
||||||
ggml_cuda_assign_buffers_impl(tensor->src[0], scratch, force_inplace);
|
ggml_cuda_assign_buffers_impl(tensor->src[0], scratch, force_inplace, no_alloc);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (tensor->op == GGML_OP_CPY && tensor->src[1]->backend == GGML_BACKEND_CPU) {
|
if (tensor->op == GGML_OP_CPY && tensor->src[1]->backend == GGML_BACKEND_CPU) {
|
||||||
ggml_cuda_assign_buffers_impl(tensor->src[1], scratch, force_inplace);
|
ggml_cuda_assign_buffers_impl(tensor->src[1], scratch, force_inplace, no_alloc);
|
||||||
}
|
}
|
||||||
|
|
||||||
tensor->backend = GGML_BACKEND_GPU;
|
tensor->backend = GGML_BACKEND_GPU;
|
||||||
|
|
||||||
|
if (scratch && no_alloc) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
struct ggml_tensor_extra_gpu * extra;
|
struct ggml_tensor_extra_gpu * extra;
|
||||||
|
|
||||||
const bool inplace = (tensor->src[0] != nullptr && tensor->src[0]->data == tensor->data) ||
|
const bool inplace = (tensor->src[0] != nullptr && tensor->src[0]->data == tensor->data) ||
|
||||||
@ -6308,16 +6387,48 @@ void ggml_cuda_assign_buffers_impl(struct ggml_tensor * tensor, bool scratch, bo
|
|||||||
tensor->extra = extra;
|
tensor->extra = extra;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_assign_scratch_offset(struct ggml_tensor * tensor, size_t offset) {
|
||||||
|
if (g_scratch_size == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (g_scratch_buffer == nullptr) {
|
||||||
|
CUDA_CHECK(cudaMalloc(&g_scratch_buffer, g_scratch_size));
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor_extra_gpu * extra = ggml_cuda_alloc_temp_tensor_extra();
|
||||||
|
|
||||||
|
const bool inplace = (tensor->src[0] != nullptr && tensor->src[0]->data == tensor->data) ||
|
||||||
|
tensor->op == GGML_OP_VIEW;
|
||||||
|
|
||||||
|
if (inplace && (tensor->src[0]->backend == GGML_BACKEND_GPU || tensor->src[0]->backend == GGML_BACKEND_GPU_SPLIT)) {
|
||||||
|
struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu * ) tensor->src[0]->extra;
|
||||||
|
char * src0_ddc = (char *) src0_extra->data_device[g_main_device];
|
||||||
|
size_t view_offset = 0;
|
||||||
|
if (tensor->op == GGML_OP_VIEW) {
|
||||||
|
memcpy(&view_offset, tensor->op_params, sizeof(size_t));
|
||||||
|
}
|
||||||
|
extra->data_device[g_main_device] = src0_ddc + view_offset;
|
||||||
|
} else {
|
||||||
|
extra->data_device[g_main_device] = (char *) g_scratch_buffer + offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
tensor->extra = extra;
|
||||||
|
}
|
||||||
|
|
||||||
void ggml_cuda_assign_buffers(struct ggml_tensor * tensor) {
|
void ggml_cuda_assign_buffers(struct ggml_tensor * tensor) {
|
||||||
ggml_cuda_assign_buffers_impl(tensor, true, false);
|
ggml_cuda_assign_buffers_impl(tensor, true, false, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_assign_buffers_no_alloc(struct ggml_tensor * tensor) {
|
||||||
|
ggml_cuda_assign_buffers_impl(tensor, true, false, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor) {
|
void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor) {
|
||||||
ggml_cuda_assign_buffers_impl(tensor, false, false);
|
ggml_cuda_assign_buffers_impl(tensor, false, false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_cuda_assign_buffers_force_inplace(struct ggml_tensor * tensor) {
|
void ggml_cuda_assign_buffers_force_inplace(struct ggml_tensor * tensor) {
|
||||||
ggml_cuda_assign_buffers_impl(tensor, false, true);
|
ggml_cuda_assign_buffers_impl(tensor, false, true, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_cuda_set_main_device(int main_device) {
|
void ggml_cuda_set_main_device(int main_device) {
|
||||||
@ -6456,6 +6567,12 @@ bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_
|
|||||||
}
|
}
|
||||||
func = ggml_cuda_rope;
|
func = ggml_cuda_rope;
|
||||||
break;
|
break;
|
||||||
|
case GGML_OP_ALIBI:
|
||||||
|
if (!any_on_device) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
func = ggml_cuda_alibi;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -16,9 +16,14 @@ GGML_API bool ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const str
|
|||||||
GGML_API void ggml_cuda_set_tensor_split(const float * tensor_split);
|
GGML_API void ggml_cuda_set_tensor_split(const float * tensor_split);
|
||||||
GGML_API void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor);
|
GGML_API void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor);
|
||||||
GGML_API void ggml_cuda_free_data(struct ggml_tensor * tensor);
|
GGML_API void ggml_cuda_free_data(struct ggml_tensor * tensor);
|
||||||
|
|
||||||
GGML_API void ggml_cuda_assign_buffers(struct ggml_tensor * tensor);
|
GGML_API void ggml_cuda_assign_buffers(struct ggml_tensor * tensor);
|
||||||
GGML_API void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor);
|
GGML_API void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor);
|
||||||
GGML_API void ggml_cuda_assign_buffers_force_inplace(struct ggml_tensor * tensor);
|
GGML_API void ggml_cuda_assign_buffers_force_inplace(struct ggml_tensor * tensor);
|
||||||
|
|
||||||
|
GGML_API void ggml_cuda_assign_buffers_no_alloc(struct ggml_tensor * tensor);
|
||||||
|
GGML_API void ggml_cuda_assign_scratch_offset(struct ggml_tensor * tensor, size_t offset);
|
||||||
|
|
||||||
GGML_API void ggml_cuda_set_main_device(int main_device);
|
GGML_API void ggml_cuda_set_main_device(int main_device);
|
||||||
GGML_API void ggml_cuda_set_mul_mat_q(bool mul_mat_q);
|
GGML_API void ggml_cuda_set_mul_mat_q(bool mul_mat_q);
|
||||||
GGML_API void ggml_cuda_set_scratch_size(size_t scratch_size);
|
GGML_API void ggml_cuda_set_scratch_size(size_t scratch_size);
|
||||||
|
@ -38,6 +38,9 @@ struct ggml_metal_context;
|
|||||||
struct ggml_metal_context * ggml_metal_init(int n_cb);
|
struct ggml_metal_context * ggml_metal_init(int n_cb);
|
||||||
void ggml_metal_free(struct ggml_metal_context * ctx);
|
void ggml_metal_free(struct ggml_metal_context * ctx);
|
||||||
|
|
||||||
|
void * ggml_metal_host_malloc(size_t n);
|
||||||
|
void ggml_metal_host_free (void * data);
|
||||||
|
|
||||||
// set the number of command buffers to use
|
// set the number of command buffers to use
|
||||||
void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb);
|
void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb);
|
||||||
|
|
||||||
|
15
ggml-metal.m
15
ggml-metal.m
@ -237,6 +237,21 @@ void ggml_metal_free(struct ggml_metal_context * ctx) {
|
|||||||
free(ctx);
|
free(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void * ggml_metal_host_malloc(size_t n) {
|
||||||
|
void * data = NULL;
|
||||||
|
const int result = posix_memalign((void **) &data, getpagesize(), n);
|
||||||
|
if (result != 0) {
|
||||||
|
fprintf(stderr, "%s: error: posix_memalign failed\n", __func__);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
void ggml_metal_host_free(void * data) {
|
||||||
|
free(data);
|
||||||
|
}
|
||||||
|
|
||||||
void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb) {
|
void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb) {
|
||||||
ctx->n_cb = n_cb;
|
ctx->n_cb = n_cb;
|
||||||
}
|
}
|
||||||
|
@ -1850,6 +1850,7 @@ kernel void kernel_mul_mm(device const uchar * src0,
|
|||||||
//load data and store to threadgroup memory
|
//load data and store to threadgroup memory
|
||||||
half4x4 temp_a;
|
half4x4 temp_a;
|
||||||
dequantize_func(x, il, temp_a);
|
dequantize_func(x, il, temp_a);
|
||||||
|
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
#pragma unroll(16)
|
#pragma unroll(16)
|
||||||
for (int i = 0; i < 16; i++) {
|
for (int i = 0; i < 16; i++) {
|
||||||
*(sa + SG_MAT_SIZE * ((tiitg / THREAD_PER_ROW / 8) \
|
*(sa + SG_MAT_SIZE * ((tiitg / THREAD_PER_ROW / 8) \
|
||||||
@ -1895,14 +1896,14 @@ kernel void kernel_mul_mm(device const uchar * src0,
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// block is smaller than 64x32, we should avoid writing data outside of the matrix
|
// block is smaller than 64x32, we should avoid writing data outside of the matrix
|
||||||
|
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
threadgroup float *temp_str = ((threadgroup float *)shared_memory) \
|
threadgroup float *temp_str = ((threadgroup float *)shared_memory) \
|
||||||
+ 32 * (sgitg&1) + (16 * (sgitg>>1)) * BLOCK_SIZE_M;
|
+ 32 * (sgitg&1) + (16 * (sgitg>>1)) * BLOCK_SIZE_M;
|
||||||
for (int i = 0; i < 8; i++) {
|
for (int i = 0; i < 8; i++) {
|
||||||
threadgroup_barrier(mem_flags::mem_device);
|
|
||||||
simdgroup_store(c_res[i], temp_str + 8 * (i%4) + 8 * BLOCK_SIZE_M * (i/4), BLOCK_SIZE_M);
|
simdgroup_store(c_res[i], temp_str + 8 * (i%4) + 8 * BLOCK_SIZE_M * (i/4), BLOCK_SIZE_M);
|
||||||
}
|
}
|
||||||
|
|
||||||
threadgroup_barrier(mem_flags::mem_device);
|
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
device float *C = dst + BLOCK_SIZE_M * r0 + (BLOCK_SIZE_N * r1) * ne0 + im*ne1*ne0;
|
device float *C = dst + BLOCK_SIZE_M * r0 + (BLOCK_SIZE_N * r1) * ne0 + im*ne1*ne0;
|
||||||
if (sgitg==0) {
|
if (sgitg==0) {
|
||||||
for (int i = 0; i < n_rows; i++) {
|
for (int i = 0; i < n_rows; i++) {
|
||||||
|
238
ggml.h
238
ggml.h
@ -207,7 +207,7 @@
|
|||||||
#define GGML_MAX_PARAMS 256
|
#define GGML_MAX_PARAMS 256
|
||||||
#define GGML_MAX_CONTEXTS 64
|
#define GGML_MAX_CONTEXTS 64
|
||||||
#define GGML_MAX_SRC 6
|
#define GGML_MAX_SRC 6
|
||||||
#define GGML_MAX_NAME 48
|
#define GGML_MAX_NAME 64
|
||||||
#define GGML_MAX_OP_PARAMS 32
|
#define GGML_MAX_OP_PARAMS 32
|
||||||
#define GGML_DEFAULT_N_THREADS 4
|
#define GGML_DEFAULT_N_THREADS 4
|
||||||
|
|
||||||
@ -215,6 +215,11 @@
|
|||||||
#define GGML_EXIT_SUCCESS 0
|
#define GGML_EXIT_SUCCESS 0
|
||||||
#define GGML_EXIT_ABORTED 1
|
#define GGML_EXIT_ABORTED 1
|
||||||
|
|
||||||
|
#define GGUF_MAGIC 0x46554747 // "GGUF"
|
||||||
|
#define GGUF_VERSION 1
|
||||||
|
|
||||||
|
#define GGUF_DEFAULT_ALIGNMENT 32
|
||||||
|
|
||||||
#define GGML_UNUSED(x) (void)(x)
|
#define GGML_UNUSED(x) (void)(x)
|
||||||
|
|
||||||
#define GGML_PAD(x, n) (((x) + (n) - 1) & ~((n) - 1))
|
#define GGML_PAD(x, n) (((x) + (n) - 1) & ~((n) - 1))
|
||||||
@ -255,8 +260,9 @@
|
|||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __ARM_NEON
|
#if defined(__ARM_NEON) && defined(__CUDACC__)
|
||||||
// we use the built-in 16-bit float type
|
typedef half ggml_fp16_t;
|
||||||
|
#elif defined(__ARM_NEON)
|
||||||
typedef __fp16 ggml_fp16_t;
|
typedef __fp16 ggml_fp16_t;
|
||||||
#else
|
#else
|
||||||
typedef uint16_t ggml_fp16_t;
|
typedef uint16_t ggml_fp16_t;
|
||||||
@ -340,10 +346,12 @@ extern "C" {
|
|||||||
GGML_OP_ARGMAX,
|
GGML_OP_ARGMAX,
|
||||||
GGML_OP_REPEAT,
|
GGML_OP_REPEAT,
|
||||||
GGML_OP_REPEAT_BACK,
|
GGML_OP_REPEAT_BACK,
|
||||||
|
GGML_OP_CONCAT,
|
||||||
GGML_OP_SILU_BACK,
|
GGML_OP_SILU_BACK,
|
||||||
GGML_OP_NORM, // normalize
|
GGML_OP_NORM, // normalize
|
||||||
GGML_OP_RMS_NORM,
|
GGML_OP_RMS_NORM,
|
||||||
GGML_OP_RMS_NORM_BACK,
|
GGML_OP_RMS_NORM_BACK,
|
||||||
|
GGML_OP_GROUP_NORM,
|
||||||
|
|
||||||
GGML_OP_MUL_MAT,
|
GGML_OP_MUL_MAT,
|
||||||
GGML_OP_OUT_PROD,
|
GGML_OP_OUT_PROD,
|
||||||
@ -369,14 +377,19 @@ extern "C" {
|
|||||||
GGML_OP_CLAMP,
|
GGML_OP_CLAMP,
|
||||||
GGML_OP_CONV_1D,
|
GGML_OP_CONV_1D,
|
||||||
GGML_OP_CONV_2D,
|
GGML_OP_CONV_2D,
|
||||||
|
GGML_OP_CONV_TRANSPOSE_2D,
|
||||||
GGML_OP_POOL_1D,
|
GGML_OP_POOL_1D,
|
||||||
GGML_OP_POOL_2D,
|
GGML_OP_POOL_2D,
|
||||||
|
|
||||||
|
GGML_OP_UPSCALE, // nearest interpolate
|
||||||
|
|
||||||
GGML_OP_FLASH_ATTN,
|
GGML_OP_FLASH_ATTN,
|
||||||
GGML_OP_FLASH_FF,
|
GGML_OP_FLASH_FF,
|
||||||
GGML_OP_FLASH_ATTN_BACK,
|
GGML_OP_FLASH_ATTN_BACK,
|
||||||
GGML_OP_WIN_PART,
|
GGML_OP_WIN_PART,
|
||||||
GGML_OP_WIN_UNPART,
|
GGML_OP_WIN_UNPART,
|
||||||
|
GGML_OP_GET_REL_POS,
|
||||||
|
GGML_OP_ADD_REL_POS,
|
||||||
|
|
||||||
GGML_OP_UNARY,
|
GGML_OP_UNARY,
|
||||||
|
|
||||||
@ -562,6 +575,7 @@ extern "C" {
|
|||||||
GGML_API int64_t ggml_nelements (const struct ggml_tensor * tensor);
|
GGML_API int64_t ggml_nelements (const struct ggml_tensor * tensor);
|
||||||
GGML_API int64_t ggml_nrows (const struct ggml_tensor * tensor);
|
GGML_API int64_t ggml_nrows (const struct ggml_tensor * tensor);
|
||||||
GGML_API size_t ggml_nbytes (const struct ggml_tensor * tensor);
|
GGML_API size_t ggml_nbytes (const struct ggml_tensor * tensor);
|
||||||
|
GGML_API size_t ggml_nbytes_pad (const struct ggml_tensor * tensor); // same as ggml_nbytes() but padded to GGML_MEM_ALIGN
|
||||||
GGML_API size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split);
|
GGML_API size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split);
|
||||||
|
|
||||||
GGML_API int ggml_blck_size (enum ggml_type type);
|
GGML_API int ggml_blck_size (enum ggml_type type);
|
||||||
@ -799,6 +813,13 @@ extern "C" {
|
|||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * b);
|
struct ggml_tensor * b);
|
||||||
|
|
||||||
|
// concat a and b on dim 2
|
||||||
|
// used in stable-diffusion
|
||||||
|
GGML_API struct ggml_tensor * ggml_concat(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a,
|
||||||
|
struct ggml_tensor * b);
|
||||||
|
|
||||||
GGML_API struct ggml_tensor * ggml_abs(
|
GGML_API struct ggml_tensor * ggml_abs(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a);
|
struct ggml_tensor * a);
|
||||||
@ -907,6 +928,19 @@ extern "C" {
|
|||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
float eps);
|
float eps);
|
||||||
|
|
||||||
|
// group normalize along ne0*ne1*n_groups
|
||||||
|
// used in stable-diffusion
|
||||||
|
// TODO: eps is hardcoded to 1e-6 for now
|
||||||
|
GGML_API struct ggml_tensor * ggml_group_norm(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a,
|
||||||
|
int n_groups);
|
||||||
|
|
||||||
|
GGML_API struct ggml_tensor * ggml_group_norm_inplace(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a,
|
||||||
|
int n_groups);
|
||||||
|
|
||||||
// a - x
|
// a - x
|
||||||
// b - dy
|
// b - dy
|
||||||
// TODO: update with configurable eps
|
// TODO: update with configurable eps
|
||||||
@ -1207,6 +1241,15 @@ extern "C" {
|
|||||||
float freq_base,
|
float freq_base,
|
||||||
float freq_scale);
|
float freq_scale);
|
||||||
|
|
||||||
|
// xPos RoPE, in-place, returns view(a)
|
||||||
|
GGML_API struct ggml_tensor * ggml_rope_xpos_inplace(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a,
|
||||||
|
int n_past,
|
||||||
|
int n_dims,
|
||||||
|
float base,
|
||||||
|
bool down);
|
||||||
|
|
||||||
// rotary position embedding backward, i.e compute dx from dy
|
// rotary position embedding backward, i.e compute dx from dy
|
||||||
// a - dy
|
// a - dy
|
||||||
GGML_API struct ggml_tensor * ggml_rope_back(
|
GGML_API struct ggml_tensor * ggml_rope_back(
|
||||||
@ -1215,7 +1258,11 @@ extern "C" {
|
|||||||
int n_past,
|
int n_past,
|
||||||
int n_dims,
|
int n_dims,
|
||||||
int mode,
|
int mode,
|
||||||
int n_ctx);
|
int n_ctx,
|
||||||
|
float freq_base,
|
||||||
|
float freq_scale,
|
||||||
|
float xpos_base,
|
||||||
|
bool xpos_down);
|
||||||
|
|
||||||
// alibi position embedding
|
// alibi position embedding
|
||||||
// in-place, returns view(a)
|
// in-place, returns view(a)
|
||||||
@ -1242,6 +1289,15 @@ extern "C" {
|
|||||||
int p0, // padding
|
int p0, // padding
|
||||||
int d0); // dilation
|
int d0); // dilation
|
||||||
|
|
||||||
|
// conv_1d with padding = half
|
||||||
|
// alias for ggml_conv_1d(a, b, s, a->ne[0]/2, d)
|
||||||
|
GGML_API struct ggml_tensor* ggml_conv_1d_ph(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a,
|
||||||
|
struct ggml_tensor * b,
|
||||||
|
int s,
|
||||||
|
int d);
|
||||||
|
|
||||||
GGML_API struct ggml_tensor * ggml_conv_2d(
|
GGML_API struct ggml_tensor * ggml_conv_2d(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
@ -1253,14 +1309,38 @@ extern "C" {
|
|||||||
int d0,
|
int d0,
|
||||||
int d1);
|
int d1);
|
||||||
|
|
||||||
// conv_1d with padding = half
|
|
||||||
// alias for ggml_conv_1d(a, b, s, a->ne[0]/2, d)
|
// kernel size is a->ne[0] x a->ne[1]
|
||||||
GGML_API struct ggml_tensor * ggml_conv_1d_ph(
|
// stride is equal to kernel size
|
||||||
|
// padding is zero
|
||||||
|
// example:
|
||||||
|
// a: 16 16 3 768
|
||||||
|
// b: 1024 1024 3 1
|
||||||
|
// res: 64 64 768 1
|
||||||
|
// used in sam
|
||||||
|
GGML_API struct ggml_tensor * ggml_conv_2d_sk_p0(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a,
|
||||||
|
struct ggml_tensor * b);
|
||||||
|
|
||||||
|
// kernel size is a->ne[0] x a->ne[1]
|
||||||
|
// stride is 1
|
||||||
|
// padding is half
|
||||||
|
// example:
|
||||||
|
// a: 3 3 256 256
|
||||||
|
// b: 64 64 256 1
|
||||||
|
// res: 64 64 256 1
|
||||||
|
// used in sam
|
||||||
|
GGML_API struct ggml_tensor * ggml_conv_2d_s1_ph(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a,
|
||||||
|
struct ggml_tensor * b);
|
||||||
|
|
||||||
|
GGML_API struct ggml_tensor * ggml_conv_transpose_2d_p0(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * b,
|
struct ggml_tensor * b,
|
||||||
int s,
|
int stride);
|
||||||
int d);
|
|
||||||
|
|
||||||
enum ggml_op_pool {
|
enum ggml_op_pool {
|
||||||
GGML_OP_POOL_MAX,
|
GGML_OP_POOL_MAX,
|
||||||
@ -1287,6 +1367,13 @@ extern "C" {
|
|||||||
int p0,
|
int p0,
|
||||||
int p1);
|
int p1);
|
||||||
|
|
||||||
|
// nearest interpolate
|
||||||
|
// used in stable-diffusion
|
||||||
|
GGML_API struct ggml_tensor * ggml_upscale(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a,
|
||||||
|
int scale_factor);
|
||||||
|
|
||||||
GGML_API struct ggml_tensor * ggml_flash_attn(
|
GGML_API struct ggml_tensor * ggml_flash_attn(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * q,
|
struct ggml_tensor * q,
|
||||||
@ -1340,6 +1427,27 @@ extern "C" {
|
|||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
enum ggml_unary_op op);
|
enum ggml_unary_op op);
|
||||||
|
|
||||||
|
// used in sam
|
||||||
|
GGML_API struct ggml_tensor * ggml_get_rel_pos(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a,
|
||||||
|
int qh,
|
||||||
|
int kh);
|
||||||
|
|
||||||
|
// used in sam
|
||||||
|
|
||||||
|
GGML_API struct ggml_tensor * ggml_add_rel_pos(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a,
|
||||||
|
struct ggml_tensor * pw,
|
||||||
|
struct ggml_tensor * ph);
|
||||||
|
|
||||||
|
GGML_API struct ggml_tensor * ggml_add_rel_pos_inplace(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a,
|
||||||
|
struct ggml_tensor * pw,
|
||||||
|
struct ggml_tensor * ph);
|
||||||
|
|
||||||
// custom operators
|
// custom operators
|
||||||
|
|
||||||
typedef void (*ggml_unary_op_f32_t) (const int, float *, const float *);
|
typedef void (*ggml_unary_op_f32_t) (const int, float *, const float *);
|
||||||
@ -1703,6 +1811,118 @@ extern "C" {
|
|||||||
|
|
||||||
GGML_API size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist);
|
GGML_API size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, int start, int n, int64_t * hist);
|
||||||
|
|
||||||
|
//
|
||||||
|
// gguf
|
||||||
|
//
|
||||||
|
|
||||||
|
enum gguf_type {
|
||||||
|
GGUF_TYPE_UINT8 = 0,
|
||||||
|
GGUF_TYPE_INT8 = 1,
|
||||||
|
GGUF_TYPE_UINT16 = 2,
|
||||||
|
GGUF_TYPE_INT16 = 3,
|
||||||
|
GGUF_TYPE_UINT32 = 4,
|
||||||
|
GGUF_TYPE_INT32 = 5,
|
||||||
|
GGUF_TYPE_FLOAT32 = 6,
|
||||||
|
GGUF_TYPE_BOOL = 7,
|
||||||
|
GGUF_TYPE_STRING = 8,
|
||||||
|
GGUF_TYPE_ARRAY = 9,
|
||||||
|
GGUF_TYPE_COUNT, // marks the end of the enum
|
||||||
|
};
|
||||||
|
|
||||||
|
struct gguf_context;
|
||||||
|
|
||||||
|
struct gguf_init_params {
|
||||||
|
bool no_alloc;
|
||||||
|
|
||||||
|
// if not NULL, create a ggml_context and allocate the tensor data in it
|
||||||
|
struct ggml_context ** ctx;
|
||||||
|
};
|
||||||
|
|
||||||
|
GGML_API struct gguf_context * gguf_init_empty(void);
|
||||||
|
GGML_API struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params);
|
||||||
|
//GGML_API struct gguf_context * gguf_init_from_buffer(..);
|
||||||
|
|
||||||
|
GGML_API void gguf_free(struct gguf_context * ctx);
|
||||||
|
|
||||||
|
GGML_API const char * gguf_type_name(enum gguf_type type);
|
||||||
|
|
||||||
|
GGML_API int gguf_get_version (struct gguf_context * ctx);
|
||||||
|
GGML_API size_t gguf_get_alignment (struct gguf_context * ctx);
|
||||||
|
GGML_API size_t gguf_get_data_offset(struct gguf_context * ctx);
|
||||||
|
GGML_API void * gguf_get_data (struct gguf_context * ctx);
|
||||||
|
|
||||||
|
GGML_API int gguf_get_n_kv(struct gguf_context * ctx);
|
||||||
|
GGML_API int gguf_find_key(struct gguf_context * ctx, const char * key);
|
||||||
|
GGML_API const char * gguf_get_key (struct gguf_context * ctx, int i);
|
||||||
|
|
||||||
|
GGML_API enum gguf_type gguf_get_kv_type (struct gguf_context * ctx, int i);
|
||||||
|
GGML_API enum gguf_type gguf_get_arr_type(struct gguf_context * ctx, int i);
|
||||||
|
|
||||||
|
// results are undefined if the wrong type is used for the key
|
||||||
|
GGML_API uint8_t gguf_get_val_u8 (struct gguf_context * ctx, int i);
|
||||||
|
GGML_API int8_t gguf_get_val_i8 (struct gguf_context * ctx, int i);
|
||||||
|
GGML_API uint16_t gguf_get_val_u16 (struct gguf_context * ctx, int i);
|
||||||
|
GGML_API int16_t gguf_get_val_i16 (struct gguf_context * ctx, int i);
|
||||||
|
GGML_API uint32_t gguf_get_val_u32 (struct gguf_context * ctx, int i);
|
||||||
|
GGML_API int32_t gguf_get_val_i32 (struct gguf_context * ctx, int i);
|
||||||
|
GGML_API float gguf_get_val_f32 (struct gguf_context * ctx, int i);
|
||||||
|
GGML_API bool gguf_get_val_bool(struct gguf_context * ctx, int i);
|
||||||
|
GGML_API const char * gguf_get_val_str (struct gguf_context * ctx, int i);
|
||||||
|
GGML_API int gguf_get_arr_n (struct gguf_context * ctx, int i);
|
||||||
|
GGML_API const void * gguf_get_arr_data(struct gguf_context * ctx, int i);
|
||||||
|
GGML_API const char * gguf_get_arr_str (struct gguf_context * ctx, int key_id, int i);
|
||||||
|
|
||||||
|
GGML_API int gguf_get_n_tensors (struct gguf_context * ctx);
|
||||||
|
GGML_API int gguf_find_tensor (struct gguf_context * ctx, const char * name);
|
||||||
|
GGML_API size_t gguf_get_tensor_offset(struct gguf_context * ctx, int i);
|
||||||
|
GGML_API char * gguf_get_tensor_name (struct gguf_context * ctx, int i);
|
||||||
|
|
||||||
|
// overrides existing values or adds a new one
|
||||||
|
GGML_API void gguf_set_val_u8 (struct gguf_context * ctx, const char * key, uint8_t val);
|
||||||
|
GGML_API void gguf_set_val_i8 (struct gguf_context * ctx, const char * key, int8_t val);
|
||||||
|
GGML_API void gguf_set_val_u16 (struct gguf_context * ctx, const char * key, uint16_t val);
|
||||||
|
GGML_API void gguf_set_val_i16 (struct gguf_context * ctx, const char * key, int16_t val);
|
||||||
|
GGML_API void gguf_set_val_u32 (struct gguf_context * ctx, const char * key, uint32_t val);
|
||||||
|
GGML_API void gguf_set_val_i32 (struct gguf_context * ctx, const char * key, int32_t val);
|
||||||
|
GGML_API void gguf_set_val_f32 (struct gguf_context * ctx, const char * key, float val);
|
||||||
|
GGML_API void gguf_set_val_bool(struct gguf_context * ctx, const char * key, bool val);
|
||||||
|
GGML_API void gguf_set_val_str (struct gguf_context * ctx, const char * key, const char * val);
|
||||||
|
GGML_API void gguf_set_arr_data(struct gguf_context * ctx, const char * key, enum gguf_type type, const void * data, int n);
|
||||||
|
GGML_API void gguf_set_arr_str (struct gguf_context * ctx, const char * key, const char ** data, int n);
|
||||||
|
|
||||||
|
// set or add KV pairs from another context
|
||||||
|
GGML_API void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src);
|
||||||
|
|
||||||
|
// manage tensor info
|
||||||
|
GGML_API void gguf_add_tensor(struct gguf_context * ctx, const struct ggml_tensor * tensor);
|
||||||
|
GGML_API void gguf_set_tensor_type(struct gguf_context * ctx, const char * name, enum ggml_type type);
|
||||||
|
GGML_API void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const void * data, size_t size);
|
||||||
|
|
||||||
|
// writing gguf files can be done in 2 ways:
|
||||||
|
//
|
||||||
|
// - write the entire gguf_context to a binary file in a single pass:
|
||||||
|
//
|
||||||
|
// gguf_write_to_file(ctx, fname);
|
||||||
|
//
|
||||||
|
// - first prepare a file with a placeholder for the meta data, write the tensor data, then write the meta data:
|
||||||
|
//
|
||||||
|
// FILE * f = fopen(fname, "wb");
|
||||||
|
// fseek(f, gguf_get_meta_size(ctx), SEEK_SET);
|
||||||
|
// fwrite(f, ...);
|
||||||
|
// void * data = gguf_meta_get_meta_data(ctx);
|
||||||
|
// fseek(f, 0, SEEK_SET);
|
||||||
|
// fwrite(f, data, gguf_get_meta_size(ctx));
|
||||||
|
// free(data);
|
||||||
|
// fclose(f);
|
||||||
|
//
|
||||||
|
|
||||||
|
// write the entire context to a binary file
|
||||||
|
GGML_API void gguf_write_to_file(struct gguf_context * ctx, const char * fname, bool only_meta);
|
||||||
|
|
||||||
|
// get the size in bytes of the meta data (header, kv pairs, tensor info) including padding
|
||||||
|
GGML_API size_t gguf_get_meta_size(struct gguf_context * ctx);
|
||||||
|
GGML_API void gguf_get_meta_data(struct gguf_context * ctx, void * data);
|
||||||
|
|
||||||
//
|
//
|
||||||
// system info
|
// system info
|
||||||
//
|
//
|
||||||
|
723
gguf.py
Executable file
723
gguf.py
Executable file
@ -0,0 +1,723 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
import shutil
|
||||||
|
import sys
|
||||||
|
import struct
|
||||||
|
import tempfile
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from enum import IntEnum, auto
|
||||||
|
from typing import Any, IO, List, Optional
|
||||||
|
|
||||||
|
#
|
||||||
|
# constants
|
||||||
|
#
|
||||||
|
|
||||||
|
GGUF_MAGIC = 0x46554747
|
||||||
|
GGUF_VERSION = 1
|
||||||
|
GGUF_DEFAULT_ALIGNMENT = 32
|
||||||
|
|
||||||
|
# general
|
||||||
|
KEY_GENERAL_ARCHITECTURE = "general.architecture"
|
||||||
|
KEY_GENERAL_QUANTIZATION_VERSION = "general.quantization_version"
|
||||||
|
KEY_GENERAL_ALIGNMENT = "general.alignment"
|
||||||
|
KEY_GENERAL_NAME = "general.name"
|
||||||
|
KEY_GENERAL_AUTHOR = "general.author"
|
||||||
|
KEY_GENERAL_URL = "general.url"
|
||||||
|
KEY_GENERAL_DESCRIPTION = "general.description"
|
||||||
|
KEY_GENERAL_LICENSE = "general.license"
|
||||||
|
KEY_GENERAL_SOURCE_URL = "general.source.url"
|
||||||
|
KEY_GENERAL_SOURCE_HF_REPO = "general.source.hugginface.repository"
|
||||||
|
KEY_GENERAL_FILE_TYPE = "general.file_type"
|
||||||
|
|
||||||
|
# LLM
|
||||||
|
KEY_LLM_CONTEXT_LENGTH = "{arch}.context_length"
|
||||||
|
KEY_LLM_EMBEDDING_LENGTH = "{arch}.embedding_length"
|
||||||
|
KEY_LLM_BLOCK_COUNT = "{arch}.block_count"
|
||||||
|
KEY_LLM_FEED_FORWARD_LENGTH = "{arch}.feed_forward_length"
|
||||||
|
KEY_LLM_USE_PARALLEL_RESIDUAL = "{arch}.use_parallel_residual"
|
||||||
|
KEY_LLM_TENSOR_DATA_LAYOUT = "{arch}.tensor_data_layout"
|
||||||
|
|
||||||
|
# attention
|
||||||
|
KEY_ATTENTION_HEAD_COUNT = "{arch}.attention.head_count"
|
||||||
|
KEY_ATTENTION_HEAD_COUNT_KV = "{arch}.attention.head_count_kv"
|
||||||
|
KEY_ATTENTION_MAX_ALIBI_BIAS = "{arch}.attention.max_alibi_bias"
|
||||||
|
KEY_ATTENTION_CLAMP_KQV = "{arch}.attention.clamp_kqv"
|
||||||
|
KEY_ATTENTION_LAYERNORM_EPS = "{arch}.attention.layer_norm_epsilon"
|
||||||
|
KEY_ATTENTION_LAYERNORM_RMS_EPS = "{arch}.attention.layer_norm_rms_epsilon"
|
||||||
|
|
||||||
|
# RoPE
|
||||||
|
KEY_ROPE_DIMENSION_COUNT = "{arch}.rope.dimension_count"
|
||||||
|
KEY_ROPE_SCALE_LINEAR = "{arch}.rope.scale_linear"
|
||||||
|
|
||||||
|
# tokenization
|
||||||
|
KEY_TOKENIZER_MODEL = "tokenizer.ggml.model"
|
||||||
|
KEY_TOKENIZER_LIST = "tokenizer.ggml.tokens"
|
||||||
|
KEY_TOKENIZER_TOKEN_TYPE = "tokenizer.ggml.token_type"
|
||||||
|
KEY_TOKENIZER_SCORES = "tokenizer.ggml.scores"
|
||||||
|
KEY_TOKENIZER_MERGES = "tokenizer.ggml.merges"
|
||||||
|
KEY_TOKENIZER_BOS_ID = "tokenizer.ggml.bos_token_id"
|
||||||
|
KEY_TOKENIZER_EOS_ID = "tokenizer.ggml.eos_token_id"
|
||||||
|
KEY_TOKENIZER_UNK_ID = "tokenizer.ggml.unknown_token_id"
|
||||||
|
KEY_TOKENIZER_SEP_ID = "tokenizer.ggml.seperator_token_id"
|
||||||
|
KEY_TOKENIZER_PAD_ID = "tokenizer.ggml.padding_token_id"
|
||||||
|
KEY_TOKENIZER_HF_JSON = "tokenizer.huggingface.json"
|
||||||
|
KEY_TOKENIZER_RWKV = "tokenizer.rwkv.world"
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# recommended mapping of model tensor names for storage in gguf
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
class MODEL_ARCH(IntEnum):
|
||||||
|
LLAMA = auto()
|
||||||
|
FALCON = auto()
|
||||||
|
GPT2 = auto()
|
||||||
|
GPTJ = auto()
|
||||||
|
GPTNEOX = auto()
|
||||||
|
MPT = auto()
|
||||||
|
|
||||||
|
|
||||||
|
class MODEL_TENSOR(IntEnum):
|
||||||
|
TOKEN_EMBD = auto()
|
||||||
|
POS_EMBD = auto()
|
||||||
|
OUTPUT = auto()
|
||||||
|
OUTPUT_NORM = auto()
|
||||||
|
ROPE_FREQS = auto()
|
||||||
|
ATTN_Q = auto()
|
||||||
|
ATTN_K = auto()
|
||||||
|
ATTN_V = auto()
|
||||||
|
ATTN_QKV = auto()
|
||||||
|
ATTN_OUT = auto()
|
||||||
|
ATTN_NORM = auto()
|
||||||
|
ATTN_NORM_2 = auto()
|
||||||
|
ATTN_ROT_EMBD = auto()
|
||||||
|
FFN_GATE = auto()
|
||||||
|
FFN_DOWN = auto()
|
||||||
|
FFN_UP = auto()
|
||||||
|
FFN_NORM = auto()
|
||||||
|
|
||||||
|
|
||||||
|
MODEL_ARCH_NAMES = {
|
||||||
|
MODEL_ARCH.LLAMA: "llama",
|
||||||
|
MODEL_ARCH.FALCON: "falcon",
|
||||||
|
MODEL_ARCH.GPT2: "gpt2",
|
||||||
|
MODEL_ARCH.GPTJ: "gptj",
|
||||||
|
MODEL_ARCH.GPTNEOX: "gptneox",
|
||||||
|
MODEL_ARCH.MPT: "mpt",
|
||||||
|
}
|
||||||
|
|
||||||
|
MODEL_TENSOR_NAMES = {
|
||||||
|
MODEL_ARCH.LLAMA: {
|
||||||
|
MODEL_TENSOR.TOKEN_EMBD: "token_embd",
|
||||||
|
MODEL_TENSOR.OUTPUT_NORM: "output_norm",
|
||||||
|
MODEL_TENSOR.OUTPUT: "output",
|
||||||
|
MODEL_TENSOR.ROPE_FREQS: "rope_freqs",
|
||||||
|
MODEL_TENSOR.ATTN_NORM: "blk.{bid}.attn_norm",
|
||||||
|
MODEL_TENSOR.ATTN_Q: "blk.{bid}.attn_q",
|
||||||
|
MODEL_TENSOR.ATTN_K: "blk.{bid}.attn_k",
|
||||||
|
MODEL_TENSOR.ATTN_V: "blk.{bid}.attn_v",
|
||||||
|
MODEL_TENSOR.ATTN_OUT: "blk.{bid}.attn_output",
|
||||||
|
MODEL_TENSOR.ATTN_ROT_EMBD: "blk.{bid}.attn_rot_embd",
|
||||||
|
MODEL_TENSOR.FFN_NORM: "blk.{bid}.ffn_norm",
|
||||||
|
MODEL_TENSOR.FFN_GATE: "blk.{bid}.ffn_gate",
|
||||||
|
MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down",
|
||||||
|
MODEL_TENSOR.FFN_UP: "blk.{bid}.ffn_up",
|
||||||
|
},
|
||||||
|
MODEL_ARCH.GPTNEOX: {
|
||||||
|
MODEL_TENSOR.TOKEN_EMBD: "token_embd",
|
||||||
|
MODEL_TENSOR.OUTPUT_NORM: "output_norm",
|
||||||
|
MODEL_TENSOR.OUTPUT: "output",
|
||||||
|
MODEL_TENSOR.ATTN_NORM: "blk.{bid}.attn_norm",
|
||||||
|
MODEL_TENSOR.ATTN_QKV: "blk.{bid}.attn_qkv",
|
||||||
|
MODEL_TENSOR.ATTN_OUT: "blk.{bid}.attn_output",
|
||||||
|
MODEL_TENSOR.FFN_NORM: "blk.{bid}.ffn_norm",
|
||||||
|
MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down",
|
||||||
|
MODEL_TENSOR.FFN_UP: "blk.{bid}.ffn_up",
|
||||||
|
},
|
||||||
|
MODEL_ARCH.FALCON: {
|
||||||
|
MODEL_TENSOR.TOKEN_EMBD: "token_embd",
|
||||||
|
MODEL_TENSOR.OUTPUT_NORM: "output_norm",
|
||||||
|
MODEL_TENSOR.OUTPUT: "output",
|
||||||
|
MODEL_TENSOR.ATTN_NORM: "blk.{bid}.attn_norm",
|
||||||
|
MODEL_TENSOR.ATTN_NORM_2: "blk.{bid}.attn_norm_2",
|
||||||
|
MODEL_TENSOR.ATTN_QKV: "blk.{bid}.attn_qkv",
|
||||||
|
MODEL_TENSOR.ATTN_OUT: "blk.{bid}.attn_output",
|
||||||
|
MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down",
|
||||||
|
MODEL_TENSOR.FFN_UP: "blk.{bid}.ffn_up",
|
||||||
|
},
|
||||||
|
MODEL_ARCH.GPT2: {
|
||||||
|
# TODO
|
||||||
|
},
|
||||||
|
# TODO
|
||||||
|
}
|
||||||
|
|
||||||
|
# tensors that will not be serialized
|
||||||
|
MODEL_TENSOR_SKIP = {
|
||||||
|
MODEL_ARCH.LLAMA: [
|
||||||
|
MODEL_TENSOR.ROPE_FREQS,
|
||||||
|
MODEL_TENSOR.ATTN_ROT_EMBD,
|
||||||
|
],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: the following helper functions should be removed
|
||||||
|
# instead, get_tensor_name_map should return tuples of (name, MODEL_TENSOR)
|
||||||
|
# however, my Python is very bad, and I couldn't figure out how to do this, hence these functions
|
||||||
|
# REMOVE
|
||||||
|
def should_skip_tensor_TMP(arch: MODEL_ARCH, n_blocks: int, name: str) -> bool:
|
||||||
|
for skip in MODEL_TENSOR_SKIP.get(arch, []):
|
||||||
|
for i in range(n_blocks):
|
||||||
|
if name == MODEL_TENSOR_NAMES[arch][skip].format(bid=i):
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_tensor_name_map(arch: MODEL_ARCH, n_blocks: int) -> dict:
|
||||||
|
tensor_map = {}
|
||||||
|
|
||||||
|
# Token embeddings
|
||||||
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.TOKEN_EMBD, None)
|
||||||
|
|
||||||
|
tensor_map["gpt_neox.embed_in"] = mapped_to # gptneox
|
||||||
|
tensor_map["transformer.wte"] = mapped_to # gpt2 mpt
|
||||||
|
tensor_map["transformer.word_embeddings"] = mapped_to # falcon
|
||||||
|
tensor_map["model.embed_tokens"] = mapped_to # llama-hf
|
||||||
|
tensor_map["tok_embeddings"] = mapped_to # llama-pth
|
||||||
|
|
||||||
|
# Position embeddings
|
||||||
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.POS_EMBD, None)
|
||||||
|
|
||||||
|
tensor_map["transformer.wpe"] = mapped_to # gpt2
|
||||||
|
|
||||||
|
# Output
|
||||||
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.OUTPUT, None)
|
||||||
|
|
||||||
|
tensor_map["embed_out"] = mapped_to # gptneox
|
||||||
|
tensor_map["lm_head"] = mapped_to # gpt2 mpt falcon llama-hf
|
||||||
|
tensor_map["output"] = mapped_to # llama-pth
|
||||||
|
|
||||||
|
# Output norm
|
||||||
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.OUTPUT_NORM, None)
|
||||||
|
|
||||||
|
tensor_map["gpt_neox.final_layer_norm"] = mapped_to # gptneox
|
||||||
|
tensor_map["transformer.ln_f"] = mapped_to # gpt2 falcon
|
||||||
|
tensor_map["transformer.norm_f"] = mapped_to # mpt
|
||||||
|
tensor_map["model.norm"] = mapped_to # llama-hf
|
||||||
|
tensor_map["norm"] = mapped_to # llama-pth
|
||||||
|
|
||||||
|
# Rope frequencies
|
||||||
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ROPE_FREQS, None)
|
||||||
|
|
||||||
|
tensor_map["rope.freqs"] = mapped_to # llama-pth
|
||||||
|
|
||||||
|
# Attention and feed-forward blocks
|
||||||
|
for i in range(0, n_blocks):
|
||||||
|
# Attention norm
|
||||||
|
# TODO: is there are simpler way to write these 2 lines in Python?
|
||||||
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_NORM, None)
|
||||||
|
mapped_to = mapped_to.format(bid=i) if mapped_to else None
|
||||||
|
|
||||||
|
tensor_map["gpt_neox.layers."+str(i)+".input_layernorm"] = mapped_to # gptneox
|
||||||
|
tensor_map["transformer.h."+str(i)+".ln_1"] = mapped_to # gpt2
|
||||||
|
tensor_map["transformer.blocks."+str(i)+".norm_1"] = mapped_to # mpt
|
||||||
|
tensor_map["transformer.h."+str(i)+".input_layernorm"] = mapped_to # falcon7b
|
||||||
|
tensor_map["transformer.h."+str(i)+".ln_mlp"] = mapped_to # falcon40b
|
||||||
|
tensor_map["model.layers."+str(i)+".input_layernorm"] = mapped_to # llama-hf
|
||||||
|
tensor_map["layers."+str(i)+".attention_norm"] = mapped_to # llama-pth
|
||||||
|
|
||||||
|
# Attention norm 2
|
||||||
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_NORM_2, None)
|
||||||
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
|
tensor_map["transformer.h."+str(i)+".ln_attn"] = mapped_to # falcon40b
|
||||||
|
|
||||||
|
# Attention query-key-value
|
||||||
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_QKV, None)
|
||||||
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
|
tensor_map["gpt_neox.layers."+str(i)+".attention.query_key_value"] = mapped_to # gptneox
|
||||||
|
tensor_map["transformer.h."+str(i)+".attn.c_attn"] = mapped_to # gpt2
|
||||||
|
tensor_map["transformer.blocks."+str(i)+".attn.Wqkv"] = mapped_to # mpt
|
||||||
|
tensor_map["transformer.h."+str(i)+".self_attention.query_key_value"] = mapped_to # falcon
|
||||||
|
|
||||||
|
# Attention query
|
||||||
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_Q, None)
|
||||||
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
|
tensor_map["model.layers."+str(i)+".self_attn.q_proj"] = mapped_to # llama-hf
|
||||||
|
tensor_map["layers."+str(i)+".attention.wq"] = mapped_to # llama-pth
|
||||||
|
|
||||||
|
# Attention key
|
||||||
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_K, None)
|
||||||
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
|
tensor_map["model.layers."+str(i)+".self_attn.k_proj"] = mapped_to # llama-hf
|
||||||
|
tensor_map["layers."+str(i)+".attention.wk"] = mapped_to # llama-pth
|
||||||
|
|
||||||
|
# Attention value
|
||||||
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_V, None)
|
||||||
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
|
tensor_map["model.layers."+str(i)+".self_attn.v_proj"] = mapped_to # llama-hf
|
||||||
|
tensor_map["layers."+str(i)+".attention.wv"] = mapped_to # llama-pth
|
||||||
|
|
||||||
|
# Attention output
|
||||||
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_OUT, None)
|
||||||
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
|
tensor_map["gpt_neox.layers."+str(i)+".attention.dense"] = mapped_to # gptneox
|
||||||
|
tensor_map["transformer.h."+str(i)+".attn.c_proj"] = mapped_to # gpt2
|
||||||
|
tensor_map["transformer.blocks."+str(i)+".attn.out_proj"] = mapped_to # mpt
|
||||||
|
tensor_map["transformer.h."+str(i)+".self_attention.dense"] = mapped_to # falcon
|
||||||
|
tensor_map["model.layers."+str(i)+".self_attn.o_proj"] = mapped_to # llama-hf
|
||||||
|
tensor_map["layers."+str(i)+".attention.wo"] = mapped_to # llama-pth
|
||||||
|
|
||||||
|
# Rotary embeddings
|
||||||
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.ATTN_ROT_EMBD, None)
|
||||||
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
|
tensor_map["model.layers."+str(i)+".self_attn.rotary_emb.inv_freq"] = mapped_to # llama-hf
|
||||||
|
tensor_map["layers."+str(i)+".attention.inner_attention.rope.freqs"] = mapped_to # llama-pth
|
||||||
|
|
||||||
|
# Feed-forward norm
|
||||||
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_NORM, None)
|
||||||
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
|
tensor_map["gpt_neox.layers."+str(i)+".post_attention_layernorm"] = mapped_to # gptneox
|
||||||
|
tensor_map["transformer.h."+str(i)+".ln_2"] = mapped_to # gpt2
|
||||||
|
tensor_map["transformer.blocks."+str(i)+".norm_2"] = mapped_to # mpt
|
||||||
|
tensor_map["model.layers."+str(i)+".post_attention_layernorm"] = mapped_to # llama-hf
|
||||||
|
tensor_map["layers."+str(i)+".ffn_norm"] = mapped_to # llama-pth
|
||||||
|
|
||||||
|
# Feed-forward up
|
||||||
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_UP, None)
|
||||||
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
|
tensor_map["gpt_neox.layers."+str(i)+".mlp.dense_h_to_4h"] = mapped_to # gptneox
|
||||||
|
tensor_map["transformer.h."+str(i)+".mlp.c_fc"] = mapped_to # gpt2
|
||||||
|
tensor_map["transformer.blocks."+str(i)+".ffn.up_proj"] = mapped_to # mpt
|
||||||
|
tensor_map["transformer.h."+str(i)+".mlp.dense_h_to_4h"] = mapped_to # falcon
|
||||||
|
tensor_map["model.layers."+str(i)+".mlp.up_proj"] = mapped_to # llama-hf
|
||||||
|
tensor_map["layers."+str(i)+".feed_forward.w3"] = mapped_to # llama-pth
|
||||||
|
|
||||||
|
# Feed-forward gate
|
||||||
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_GATE, None)
|
||||||
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
|
tensor_map["model.layers."+str(i)+".mlp.gate_proj"] = mapped_to # llama-hf
|
||||||
|
tensor_map["layers."+str(i)+".feed_forward.w1"] = mapped_to # llama-pth
|
||||||
|
|
||||||
|
# Feed-forward down
|
||||||
|
mapped_to = MODEL_TENSOR_NAMES[arch].get(MODEL_TENSOR.FFN_DOWN, None)
|
||||||
|
mapped_to = mapped_to.format(bid=i) if mapped_to is not None else None
|
||||||
|
|
||||||
|
tensor_map["gpt_neox.layers."+str(i)+".mlp.dense_4h_to_h"] = mapped_to # gptneox
|
||||||
|
tensor_map["transformer.h."+str(i)+".mlp.c_proj"] = mapped_to # gpt2
|
||||||
|
tensor_map["transformer.blocks."+str(i)+".ffn.down_proj"] = mapped_to # mpt
|
||||||
|
tensor_map["transformer.h."+str(i)+".mlp.dense_4h_to_h"] = mapped_to # falcon
|
||||||
|
tensor_map["model.layers."+str(i)+".mlp.down_proj"] = mapped_to # llama-hf
|
||||||
|
tensor_map["layers."+str(i)+".feed_forward.w2"] = mapped_to # llama-pth
|
||||||
|
|
||||||
|
return tensor_map
|
||||||
|
|
||||||
|
|
||||||
|
class TokenType(IntEnum):
|
||||||
|
NORMAL = 1
|
||||||
|
UNKNOWN = 2
|
||||||
|
CONTROL = 3
|
||||||
|
USER_DEFINED = 4
|
||||||
|
UNUSED = 5
|
||||||
|
BYTE = 6
|
||||||
|
|
||||||
|
#
|
||||||
|
# implementation
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
class GGMLQuantizationType(IntEnum):
|
||||||
|
F32 = 0
|
||||||
|
F16 = 1
|
||||||
|
Q4_0 = 2
|
||||||
|
Q4_1 = 3
|
||||||
|
Q5_0 = 6
|
||||||
|
Q5_1 = 7
|
||||||
|
Q8_0 = 8
|
||||||
|
Q8_1 = 9
|
||||||
|
Q2_K = 10
|
||||||
|
Q3_K = 11
|
||||||
|
Q4_K = 12
|
||||||
|
Q5_K = 13
|
||||||
|
Q6_K = 14
|
||||||
|
Q8_K = 15
|
||||||
|
|
||||||
|
|
||||||
|
class GGUFValueType(IntEnum):
|
||||||
|
UINT8 = 0
|
||||||
|
INT8 = 1
|
||||||
|
UINT16 = 2
|
||||||
|
INT16 = 3
|
||||||
|
UINT32 = 4
|
||||||
|
INT32 = 5
|
||||||
|
FLOAT32 = 6
|
||||||
|
BOOL = 7
|
||||||
|
STRING = 8
|
||||||
|
ARRAY = 9
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_type(val):
|
||||||
|
if isinstance(val, str) or isinstance(val, bytes) or isinstance(val, bytearray):
|
||||||
|
return GGUFValueType.STRING
|
||||||
|
elif isinstance(val, list):
|
||||||
|
return GGUFValueType.ARRAY
|
||||||
|
elif isinstance(val, float):
|
||||||
|
return GGUFValueType.FLOAT32
|
||||||
|
elif isinstance(val, bool):
|
||||||
|
return GGUFValueType.BOOL
|
||||||
|
elif isinstance(val, int):
|
||||||
|
return GGUFValueType.INT32
|
||||||
|
else:
|
||||||
|
print("Unknown type: "+str(type(val)))
|
||||||
|
sys.exit()
|
||||||
|
|
||||||
|
|
||||||
|
class GGUFWriter:
|
||||||
|
def __init__(self, path: str, arch: str, use_temp_file = True):
|
||||||
|
self.fout = open(path, "wb")
|
||||||
|
self.arch = arch
|
||||||
|
self.offset_tensor = 0
|
||||||
|
self.data_alignment = GGUF_DEFAULT_ALIGNMENT
|
||||||
|
self.kv_data = b""
|
||||||
|
self.kv_data_count = 0
|
||||||
|
self.ti_data = b""
|
||||||
|
self.ti_data_count = 0
|
||||||
|
self.add_architecture()
|
||||||
|
self.use_temp_file = use_temp_file
|
||||||
|
self.tensors = []
|
||||||
|
|
||||||
|
def write_header_to_file(self):
|
||||||
|
self.fout.write(struct.pack("<I", GGUF_MAGIC))
|
||||||
|
self.fout.write(struct.pack("<I", GGUF_VERSION))
|
||||||
|
self.fout.write(struct.pack("<I", self.ti_data_count))
|
||||||
|
self.fout.write(struct.pack("<I", self.kv_data_count))
|
||||||
|
self.flush()
|
||||||
|
# print("tensors " + str(self.ti_data_count) + " kv " + str(self.kv_data_count))
|
||||||
|
|
||||||
|
def write_kv_data_to_file(self):
|
||||||
|
self.fout.write(self.kv_data)
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
def write_ti_data_to_file(self):
|
||||||
|
self.fout.write(self.ti_data)
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
def add_key(self, key: str):
|
||||||
|
self.add_val(key, GGUFValueType.STRING, add_vtype=False)
|
||||||
|
|
||||||
|
def add_uint8(self, key: str, val: int):
|
||||||
|
self.add_key(key)
|
||||||
|
self.add_val(val, GGUFValueType.UINT8)
|
||||||
|
|
||||||
|
def add_int8(self, key: str, val: int):
|
||||||
|
self.add_key(key)
|
||||||
|
self.add_val(val, GGUFValueType.INT8)
|
||||||
|
|
||||||
|
def add_uint16(self, key: str, val: int):
|
||||||
|
self.add_key(key)
|
||||||
|
self.add_val(val, GGUFValueType.UINT16)
|
||||||
|
|
||||||
|
def add_int16(self, key: str, val: int):
|
||||||
|
self.add_key(key)
|
||||||
|
self.add_val(val, GGUFValueType.INT16)
|
||||||
|
|
||||||
|
def add_uint32(self, key: str, val: int):
|
||||||
|
self.add_key(key)
|
||||||
|
self.add_val(val, GGUFValueType.UINT32)
|
||||||
|
|
||||||
|
def add_int32(self, key: str, val: int):
|
||||||
|
self.add_key(key)
|
||||||
|
self.add_val(val, GGUFValueType.INT32)
|
||||||
|
|
||||||
|
def add_float32(self, key: str, val: float):
|
||||||
|
self.add_key(key)
|
||||||
|
self.add_val(val, GGUFValueType.FLOAT32)
|
||||||
|
|
||||||
|
def add_bool(self, key: str, val: bool):
|
||||||
|
self.add_key(key)
|
||||||
|
self.add_val(val, GGUFValueType.BOOL)
|
||||||
|
|
||||||
|
def add_string(self, key: str, val: str):
|
||||||
|
if len(val) == 0:
|
||||||
|
return
|
||||||
|
self.add_key(key)
|
||||||
|
self.add_val(val, GGUFValueType.STRING)
|
||||||
|
|
||||||
|
def add_array(self, key: str, val: list):
|
||||||
|
if not isinstance(val, list):
|
||||||
|
raise ValueError("Value must be a list for array type")
|
||||||
|
|
||||||
|
self.add_key(key)
|
||||||
|
self.add_val(val, GGUFValueType.ARRAY)
|
||||||
|
|
||||||
|
def add_val(self: str, val: Any, vtype: GGUFValueType = None, add_vtype: bool = True):
|
||||||
|
if vtype is None:
|
||||||
|
vtype = GGUFValueType.get_type(val)
|
||||||
|
|
||||||
|
if add_vtype:
|
||||||
|
self.kv_data += struct.pack("<I", vtype)
|
||||||
|
self.kv_data_count += 1
|
||||||
|
|
||||||
|
if vtype == GGUFValueType.UINT8:
|
||||||
|
self.kv_data += struct.pack("<B", val)
|
||||||
|
elif vtype == GGUFValueType.INT8:
|
||||||
|
self.kv_data += struct.pack("<b", val)
|
||||||
|
elif vtype == GGUFValueType.UINT16:
|
||||||
|
self.kv_data += struct.pack("<H", val)
|
||||||
|
elif vtype == GGUFValueType.INT16:
|
||||||
|
self.kv_data += struct.pack("<h", val)
|
||||||
|
elif vtype == GGUFValueType.UINT32:
|
||||||
|
self.kv_data += struct.pack("<I", val)
|
||||||
|
elif vtype == GGUFValueType.INT32:
|
||||||
|
self.kv_data += struct.pack("<i", val)
|
||||||
|
elif vtype == GGUFValueType.FLOAT32:
|
||||||
|
self.kv_data += struct.pack("<f", val)
|
||||||
|
elif vtype == GGUFValueType.BOOL:
|
||||||
|
self.kv_data += struct.pack("?", val)
|
||||||
|
elif vtype == GGUFValueType.STRING:
|
||||||
|
encoded_val = val.encode("utf8") if isinstance(val, str) else val
|
||||||
|
self.kv_data += struct.pack("<I", len(encoded_val))
|
||||||
|
self.kv_data += encoded_val
|
||||||
|
elif vtype == GGUFValueType.ARRAY:
|
||||||
|
ltype = set([GGUFValueType.get_type(item) for item in val])
|
||||||
|
assert len(ltype) == 1, "All items in a GGUF array should be of the same type"
|
||||||
|
self.kv_data += struct.pack("<I", list(ltype)[0])
|
||||||
|
self.kv_data += struct.pack("<I", len(val))
|
||||||
|
for item in val:
|
||||||
|
self.add_val(item, add_vtype=False)
|
||||||
|
else:
|
||||||
|
raise ValueError("Invalid GGUF metadata value type")
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def ggml_pad(x: int, n: int) -> int:
|
||||||
|
return ((x + n - 1) // n) * n
|
||||||
|
|
||||||
|
def add_tensor_info(self, name: str, tensor_shape: np.ndarray, tensor_dtype: np.dtype, tensor_nbytes: int, raw_dtype: Optional[GGMLQuantizationType] = None):
|
||||||
|
assert raw_dtype is not None or tensor_dtype in (np.float32, np.float16), "Only F32 and F16 tensors are supported for now"
|
||||||
|
|
||||||
|
encoded_name = name.encode("utf8")
|
||||||
|
self.ti_data += struct.pack("<I", len(encoded_name))
|
||||||
|
self.ti_data += encoded_name
|
||||||
|
n_dims = len(tensor_shape)
|
||||||
|
self.ti_data += struct.pack("<I", n_dims)
|
||||||
|
for i in range(n_dims):
|
||||||
|
self.ti_data += struct.pack("<I", tensor_shape[n_dims - 1 - i])
|
||||||
|
if raw_dtype is None:
|
||||||
|
dtype = GGMLQuantizationType.F32 if tensor_dtype == np.float32 else GGMLQuantizationType.F16
|
||||||
|
else:
|
||||||
|
dtype = raw_dtype
|
||||||
|
self.ti_data += struct.pack("<I", dtype)
|
||||||
|
self.ti_data += struct.pack("<Q", self.offset_tensor)
|
||||||
|
self.offset_tensor += GGUFWriter.ggml_pad(tensor_nbytes, self.data_alignment)
|
||||||
|
self.ti_data_count += 1
|
||||||
|
|
||||||
|
def add_tensor(self, name: str, tensor: np.ndarray, raw_shape: Optional[np.ndarray] = None, raw_dtype: Optional[GGMLQuantizationType] = None):
|
||||||
|
if self.use_temp_file and not hasattr(self, "temp_file"):
|
||||||
|
self.temp_file = tempfile.SpooledTemporaryFile(mode="w+b", max_size=256*1024*1024)
|
||||||
|
self.temp_file.seek(0)
|
||||||
|
|
||||||
|
self.add_tensor_info(name, raw_shape if raw_shape is not None else tensor.shape, tensor.dtype, tensor.nbytes, raw_dtype = raw_dtype)
|
||||||
|
|
||||||
|
pad = GGUFWriter.ggml_pad(tensor.nbytes, self.data_alignment) - tensor.nbytes
|
||||||
|
|
||||||
|
if not self.use_temp_file:
|
||||||
|
self.tensors.append((tensor, pad))
|
||||||
|
return
|
||||||
|
|
||||||
|
tensor.tofile(self.temp_file)
|
||||||
|
|
||||||
|
if pad != 0:
|
||||||
|
self.temp_file.write(bytes([0] * pad))
|
||||||
|
|
||||||
|
def write_tensor_data(self, tensor: np.ndarray):
|
||||||
|
pad = GGUFWriter.ggml_pad(self.fout.tell(), self.data_alignment) - self.fout.tell()
|
||||||
|
if pad != 0:
|
||||||
|
self.fout.write(bytes([0] * pad))
|
||||||
|
|
||||||
|
tensor.tofile(self.fout)
|
||||||
|
|
||||||
|
pad = GGUFWriter.ggml_pad(tensor.nbytes, self.data_alignment) - tensor.nbytes
|
||||||
|
if pad != 0:
|
||||||
|
self.fout.write(bytes([0] * pad))
|
||||||
|
|
||||||
|
def write_tensors_to_file(self):
|
||||||
|
self.write_ti_data_to_file()
|
||||||
|
|
||||||
|
pad = GGUFWriter.ggml_pad(self.fout.tell(), self.data_alignment) - self.fout.tell()
|
||||||
|
if pad != 0:
|
||||||
|
self.fout.write(bytes([0] * pad))
|
||||||
|
|
||||||
|
if not self.use_temp_file:
|
||||||
|
for (currtensor, currpad) in self.tensors:
|
||||||
|
currtensor.tofile(self.fout)
|
||||||
|
if currpad != 0:
|
||||||
|
self.fout.write(bytes([0] * currpad))
|
||||||
|
return
|
||||||
|
|
||||||
|
self.temp_file.seek(0)
|
||||||
|
|
||||||
|
shutil.copyfileobj(self.temp_file, self.fout)
|
||||||
|
self.flush()
|
||||||
|
self.temp_file.close()
|
||||||
|
|
||||||
|
def flush(self):
|
||||||
|
self.fout.flush()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.fout.close()
|
||||||
|
|
||||||
|
def add_architecture(self):
|
||||||
|
self.add_string(KEY_GENERAL_ARCHITECTURE, self.arch)
|
||||||
|
|
||||||
|
def add_author(self, author: str):
|
||||||
|
self.add_string(KEY_GENERAL_AUTHOR, author)
|
||||||
|
|
||||||
|
def add_tensor_data_layout(self, layout: str):
|
||||||
|
self.add_string(KEY_LLM_TENSOR_DATA_LAYOUT.format(arch=self.arch), layout)
|
||||||
|
|
||||||
|
def add_url(self, url: str):
|
||||||
|
self.add_string(KEY_GENERAL_URL, url)
|
||||||
|
|
||||||
|
def add_description(self, description: str):
|
||||||
|
self.add_string(KEY_GENERAL_DESCRIPTION, description)
|
||||||
|
|
||||||
|
def add_source_url(self, url: str):
|
||||||
|
self.add_string(KEY_GENERAL_SOURCE_URL, url)
|
||||||
|
|
||||||
|
def add_source_hf_repo(self, repo: str):
|
||||||
|
self.add_string(KEY_GENERAL_SOURCE_HF_REPO, repo)
|
||||||
|
|
||||||
|
def add_file_type(self, ftype: int):
|
||||||
|
self.add_uint32(KEY_GENERAL_FILE_TYPE, ftype)
|
||||||
|
|
||||||
|
def add_name(self, name: str):
|
||||||
|
self.add_string(KEY_GENERAL_NAME, name)
|
||||||
|
|
||||||
|
def add_quantization_version(self, quantization_version: GGMLQuantizationType):
|
||||||
|
self.add_uint32(
|
||||||
|
KEY_GENERAL_QUANTIZATION_VERSION, quantization_version)
|
||||||
|
|
||||||
|
def add_custom_alignment(self, alignment: int):
|
||||||
|
self.data_alignment = alignment
|
||||||
|
self.add_uint32(KEY_GENERAL_ALIGNMENT, alignment)
|
||||||
|
|
||||||
|
def add_context_length(self, length: int):
|
||||||
|
self.add_uint32(
|
||||||
|
KEY_LLM_CONTEXT_LENGTH.format(arch=self.arch), length)
|
||||||
|
|
||||||
|
def add_embedding_length(self, length: int):
|
||||||
|
self.add_uint32(
|
||||||
|
KEY_LLM_EMBEDDING_LENGTH.format(arch=self.arch), length)
|
||||||
|
|
||||||
|
def add_block_count(self, length: int):
|
||||||
|
self.add_uint32(
|
||||||
|
KEY_LLM_BLOCK_COUNT.format(arch=self.arch), length)
|
||||||
|
|
||||||
|
def add_feed_forward_length(self, length: int):
|
||||||
|
self.add_uint32(
|
||||||
|
KEY_LLM_FEED_FORWARD_LENGTH.format(arch=self.arch), length)
|
||||||
|
|
||||||
|
def add_parallel_residual(self, use: bool):
|
||||||
|
self.add_bool(
|
||||||
|
KEY_LLM_USE_PARALLEL_RESIDUAL.format(arch=self.arch), use)
|
||||||
|
|
||||||
|
def add_tensor_data_layout(self, layout: str):
|
||||||
|
self.add_string(
|
||||||
|
KEY_LLM_TENSOR_DATA_LAYOUT.format(arch=self.arch), layout)
|
||||||
|
|
||||||
|
def add_head_count(self, count: int):
|
||||||
|
self.add_uint32(
|
||||||
|
KEY_ATTENTION_HEAD_COUNT.format(arch=self.arch), count)
|
||||||
|
|
||||||
|
def add_head_count_kv(self, count: int):
|
||||||
|
self.add_uint32(
|
||||||
|
KEY_ATTENTION_HEAD_COUNT_KV.format(arch=self.arch), count)
|
||||||
|
|
||||||
|
def add_max_alibi_bias(self, bias: float):
|
||||||
|
self.add_float32(
|
||||||
|
KEY_ATTENTION_MAX_ALIBI_BIAS.format(arch=self.arch), bias)
|
||||||
|
|
||||||
|
def add_clamp_kqv(self, value: float):
|
||||||
|
self.add_float32(
|
||||||
|
KEY_ATTENTION_CLAMP_KQV.format(arch=self.arch), value)
|
||||||
|
|
||||||
|
def add_layer_norm_eps(self, value: float):
|
||||||
|
self.add_float32(
|
||||||
|
KEY_ATTENTION_LAYERNORM_EPS.format(arch=self.arch), value)
|
||||||
|
|
||||||
|
def add_layer_norm_rms_eps(self, value: float):
|
||||||
|
self.add_float32(
|
||||||
|
KEY_ATTENTION_LAYERNORM_RMS_EPS.format(arch=self.arch), value)
|
||||||
|
|
||||||
|
def add_rope_dimension_count(self, count: int):
|
||||||
|
self.add_uint32(
|
||||||
|
KEY_ROPE_DIMENSION_COUNT.format(arch=self.arch), count)
|
||||||
|
|
||||||
|
def add_rope_scale_linear(self, value: float):
|
||||||
|
self.add_float32(KEY_ROPE_SCALE_LINEAR.format(arch=self.arch), value)
|
||||||
|
|
||||||
|
def add_tokenizer_model(self, model: str):
|
||||||
|
self.add_string(KEY_TOKENIZER_MODEL, model)
|
||||||
|
|
||||||
|
def add_token_list(self, tokens: List):
|
||||||
|
self.add_array(KEY_TOKENIZER_LIST, tokens)
|
||||||
|
|
||||||
|
def add_token_merges(self, merges: List):
|
||||||
|
self.add_array(KEY_TOKENIZER_MERGES, merges)
|
||||||
|
|
||||||
|
def add_token_types(self, types: List[int]):
|
||||||
|
self.add_array(KEY_TOKENIZER_TOKEN_TYPE, types)
|
||||||
|
|
||||||
|
def add_token_scores(self, scores: List[float]):
|
||||||
|
self.add_array(KEY_TOKENIZER_SCORES, scores)
|
||||||
|
|
||||||
|
def add_bos_token_id(self, id: int):
|
||||||
|
self.add_uint32(KEY_TOKENIZER_BOS_ID, id)
|
||||||
|
|
||||||
|
def add_eos_token_id(self, id: int):
|
||||||
|
self.add_uint32(KEY_TOKENIZER_EOS_ID, id)
|
||||||
|
|
||||||
|
def add_unk_token_id(self, id: int):
|
||||||
|
self.add_uint32(KEY_TOKENIZER_UNK_ID, id)
|
||||||
|
|
||||||
|
def add_sep_token_id(self, id: int):
|
||||||
|
self.add_uint32(KEY_TOKENIZER_SEP_ID, id)
|
||||||
|
|
||||||
|
def add_pad_token_id(self, id: int):
|
||||||
|
self.add_uint32(KEY_TOKENIZER_PAD_ID, id)
|
||||||
|
|
||||||
|
|
||||||
|
# Example usage:
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Example usage with a file
|
||||||
|
gguf_writer = GGUFWriter("example.gguf", "llama")
|
||||||
|
|
||||||
|
gguf_writer.add_architecture()
|
||||||
|
gguf_writer.add_block_count(12)
|
||||||
|
gguf_writer.add_uint32("answer", 42) # Write a 32-bit integer
|
||||||
|
gguf_writer.add_float32("answer_in_float", 42.0) # Write a 32-bit float
|
||||||
|
gguf_writer.add_custom_alignment(64)
|
||||||
|
|
||||||
|
tensor1 = np.ones((32,), dtype=np.float32) * 100.0
|
||||||
|
tensor2 = np.ones((64,), dtype=np.float32) * 101.0
|
||||||
|
tensor3 = np.ones((96,), dtype=np.float32) * 102.0
|
||||||
|
|
||||||
|
gguf_writer.add_tensor("tensor1", tensor1)
|
||||||
|
gguf_writer.add_tensor("tensor2", tensor2)
|
||||||
|
gguf_writer.add_tensor("tensor3", tensor3)
|
||||||
|
|
||||||
|
gguf_writer.write_header_to_file()
|
||||||
|
gguf_writer.write_kv_data_to_file()
|
||||||
|
gguf_writer.write_tensors_to_file()
|
||||||
|
|
||||||
|
gguf_writer.close()
|
91
grammars/README.md
Normal file
91
grammars/README.md
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
# GBNF Guide
|
||||||
|
|
||||||
|
GBNF (GGML BNF) is a format for defining [formal grammars](https://en.wikipedia.org/wiki/Formal_grammar) to constrain model outputs in `llama.cpp`. For example, you can use it to force the model to generate valid JSON, or speak only in emojis. GBNF grammars are supported in various ways in `examples/main` and `examples/server`.
|
||||||
|
|
||||||
|
## Background
|
||||||
|
|
||||||
|
[Bakus-Naur Form (BNF)](https://en.wikipedia.org/wiki/Backus%E2%80%93Naur_form) is a notation for describing the syntax of formal languages like programming languages, file formats, and protocols. GBNF is an extension of BNF that primarily adds a few modern regex-like features.
|
||||||
|
|
||||||
|
## Basics
|
||||||
|
|
||||||
|
In GBNF, we define *production rules* that specify how a *non-terminal* (rule name) can be replaced with sequences of *terminals* (characters, specifically Unicode [code points](https://en.wikipedia.org/wiki/Code_point)) and other non-terminals. The basic format of a production rule is `nonterminal ::= sequence...`.
|
||||||
|
|
||||||
|
## Example
|
||||||
|
|
||||||
|
Before going deeper, let's look at some of the features demonstrated in `grammars/chess.gbnf`, a small chess notation grammar:
|
||||||
|
```
|
||||||
|
# `root` specifies the pattern for the overall output
|
||||||
|
root ::= (
|
||||||
|
# it must start with the characters "1. " followed by a sequence
|
||||||
|
# of characters that match the `move` rule, followed by a space, followed
|
||||||
|
# by another move, and then a newline
|
||||||
|
"1. " move " " move "\n"
|
||||||
|
|
||||||
|
# it's followed by one or more subsequent moves, numbered with one or two digits
|
||||||
|
([1-9] [0-9]? ". " move " " move "\n")+
|
||||||
|
)
|
||||||
|
|
||||||
|
# `move` is an abstract representation, which can be a pawn, nonpawn, or castle.
|
||||||
|
# The `[+#]?` denotes the possibility of checking or mate signs after moves
|
||||||
|
move ::= (pawn | nonpawn | castle) [+#]?
|
||||||
|
|
||||||
|
pawn ::= ...
|
||||||
|
nonpawn ::= ...
|
||||||
|
castle ::= ...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Non-Terminals and Terminals
|
||||||
|
|
||||||
|
Non-terminal symbols (rule names) stand for a pattern of terminals and other non-terminals. They are required to be a dashed lowercase word, like `move`, `castle`, or `check-mate`.
|
||||||
|
|
||||||
|
Terminals are actual characters ([code points](https://en.wikipedia.org/wiki/Code_point)). They can be specified as a sequence like `"1"` or `"O-O"` or as ranges like `[1-9]` or `[NBKQR]`.
|
||||||
|
|
||||||
|
## Characters and character ranges
|
||||||
|
|
||||||
|
Terminals support the full range of Unicode. Unicode characters can be specified directly in the grammar, for example `hiragana ::= [ぁ-ゟ]`, or with escapes: 8-bit (`\xXX`), 16-bit (`\uXXXX`) or 32-bit (`\UXXXXXXXX`).
|
||||||
|
|
||||||
|
Character ranges can be negated with `^`:
|
||||||
|
```
|
||||||
|
single-line ::= [^\n]+ "\n"`
|
||||||
|
```
|
||||||
|
|
||||||
|
## Sequences and Alternatives
|
||||||
|
|
||||||
|
The order of symbols in a sequence matter. For example, in `"1. " move " " move "\n"`, the `"1. "` must come before the first `move`, etc.
|
||||||
|
|
||||||
|
Alternatives, denoted by `|`, give different sequences that are acceptable. For example, in `move ::= pawn | nonpawn | castle`, `move` can be a `pawn` move, a `nonpawn` move, or a `castle`.
|
||||||
|
|
||||||
|
Parentheses `()` can be used to group sequences, which allows for embedding alternatives in a larger rule or applying repetition and optptional symbols (below) to a sequence.
|
||||||
|
|
||||||
|
## Repetition and Optional Symbols
|
||||||
|
|
||||||
|
- `*` after a symbol or sequence means that it can be repeated zero or more times.
|
||||||
|
- `+` denotes that the symbol or sequence should appear one or more times.
|
||||||
|
- `?` makes the preceding symbol or sequence optional.
|
||||||
|
|
||||||
|
## Comments and newlines
|
||||||
|
|
||||||
|
Comments can be specified with `#`:
|
||||||
|
```
|
||||||
|
# defines optional whitspace
|
||||||
|
ws ::= [ \t\n]+
|
||||||
|
```
|
||||||
|
|
||||||
|
Newlines are allowed between rules and between symbols or sequences nested inside parentheses. Additionally, a newline after an alternate marker `|` will continue the current rule, even outside of parentheses.
|
||||||
|
|
||||||
|
## The root rule
|
||||||
|
|
||||||
|
In a full grammar, the `root` rule always defines the starting point of the grammar. In other words, it specifies what the entire output must match.
|
||||||
|
|
||||||
|
```
|
||||||
|
# a grammar for lists
|
||||||
|
root ::= ("- " item)+
|
||||||
|
item ::= [^\n]+ "\n"
|
||||||
|
```
|
||||||
|
|
||||||
|
## Next steps
|
||||||
|
|
||||||
|
This guide provides a brief overview. Check out the GBNF files in this directory (`grammars/`) for examples of full grammars. You can try them out with:
|
||||||
|
```
|
||||||
|
./main -m <model> --grammar-file grammars/some-grammar.gbnf -p 'Some prompt'
|
||||||
|
```
|
164
k_quants.c
164
k_quants.c
@ -77,6 +77,11 @@ static float make_qx_quants(int n, int nmax, const float * restrict x, int8_t *
|
|||||||
}
|
}
|
||||||
return 1/iscale;
|
return 1/iscale;
|
||||||
}
|
}
|
||||||
|
bool return_early = false;
|
||||||
|
if (rmse_type < 0) {
|
||||||
|
rmse_type = -rmse_type;
|
||||||
|
return_early = true;
|
||||||
|
}
|
||||||
int weight_type = rmse_type%2;
|
int weight_type = rmse_type%2;
|
||||||
float sumlx = 0;
|
float sumlx = 0;
|
||||||
float suml2 = 0;
|
float suml2 = 0;
|
||||||
@ -89,56 +94,9 @@ static float make_qx_quants(int n, int nmax, const float * restrict x, int8_t *
|
|||||||
suml2 += w*l*l;
|
suml2 += w*l*l;
|
||||||
}
|
}
|
||||||
float scale = sumlx/suml2;
|
float scale = sumlx/suml2;
|
||||||
|
if (return_early) return suml2 > 0 ? 0.5f*(scale + 1/iscale) : 1/iscale;
|
||||||
float best = scale * sumlx;
|
float best = scale * sumlx;
|
||||||
for (int itry = 0; itry < 3; ++itry) {
|
for (int is = -9; is <= 9; ++is) {
|
||||||
iscale = 1/scale;
|
|
||||||
float slx = 0;
|
|
||||||
float sl2 = 0;
|
|
||||||
bool changed = false;
|
|
||||||
for (int i = 0; i < n; ++i) {
|
|
||||||
int l = nearest_int(iscale * x[i]);
|
|
||||||
l = MAX(-nmax, MIN(nmax-1, l));
|
|
||||||
if (l + nmax != L[i]) { changed = true; }
|
|
||||||
float w = weight_type == 1 ? x[i] * x[i] : 1.f;
|
|
||||||
slx += w*x[i]*l;
|
|
||||||
sl2 += w*l*l;
|
|
||||||
}
|
|
||||||
if (!changed || sl2 == 0 || slx*slx <= best*sl2) { break; }
|
|
||||||
for (int i = 0; i < n; ++i) {
|
|
||||||
int l = nearest_int(iscale * x[i]);
|
|
||||||
L[i] = nmax + MAX(-nmax, MIN(nmax-1, l));
|
|
||||||
}
|
|
||||||
sumlx = slx; suml2 = sl2;
|
|
||||||
scale = sumlx/suml2;
|
|
||||||
best = scale * sumlx;
|
|
||||||
}
|
|
||||||
for (int itry = 0; itry < 5; ++itry) {
|
|
||||||
int n_changed = 0;
|
|
||||||
for (int i = 0; i < n; ++i) {
|
|
||||||
float w = weight_type == 1 ? x[i]*x[i] : 1;
|
|
||||||
int l = L[i] - nmax;
|
|
||||||
float slx = sumlx - w*x[i]*l;
|
|
||||||
if (slx > 0) {
|
|
||||||
float sl2 = suml2 - w*l*l;
|
|
||||||
int new_l = nearest_int(x[i] * sl2 / slx);
|
|
||||||
new_l = MAX(-nmax, MIN(nmax-1, new_l));
|
|
||||||
if (new_l != l) {
|
|
||||||
slx += w*x[i]*new_l;
|
|
||||||
sl2 += w*new_l*new_l;
|
|
||||||
if (sl2 > 0 && slx*slx*suml2 > sumlx*sumlx*sl2) {
|
|
||||||
L[i] = nmax + new_l; sumlx = slx; suml2 = sl2;
|
|
||||||
scale = sumlx / suml2; best = scale * sumlx;
|
|
||||||
++n_changed;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!n_changed) { break; }
|
|
||||||
}
|
|
||||||
if (rmse_type < 3) {
|
|
||||||
return scale;
|
|
||||||
}
|
|
||||||
for (int is = -4; is <= 4; ++is) {
|
|
||||||
if (is == 0) {
|
if (is == 0) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -221,12 +179,17 @@ static float make_q3_quants(int n, int nmax, const float * restrict x, int8_t *
|
|||||||
return 1/iscale;
|
return 1/iscale;
|
||||||
}
|
}
|
||||||
|
|
||||||
static float make_qkx1_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, float * restrict the_min, int ntry) {
|
static float make_qkx1_quants(int n, int nmax, const float * restrict x, uint8_t * restrict L, float * restrict the_min,
|
||||||
|
int ntry, float alpha) {
|
||||||
float min = x[0];
|
float min = x[0];
|
||||||
float max = x[0];
|
float max = x[0];
|
||||||
|
float sum_x = 0;
|
||||||
|
float sum_x2 = 0;
|
||||||
for (int i = 1; i < n; ++i) {
|
for (int i = 1; i < n; ++i) {
|
||||||
if (x[i] < min) min = x[i];
|
if (x[i] < min) min = x[i];
|
||||||
if (x[i] > max) max = x[i];
|
if (x[i] > max) max = x[i];
|
||||||
|
sum_x += x[i];
|
||||||
|
sum_x2 += x[i]*x[i];
|
||||||
}
|
}
|
||||||
if (max == min) {
|
if (max == min) {
|
||||||
for (int i = 0; i < n; ++i) L[i] = 0;
|
for (int i = 0; i < n; ++i) L[i] = 0;
|
||||||
@ -254,7 +217,7 @@ static float make_qkx1_quants(int n, int nmax, const float * restrict x, uint8_t
|
|||||||
for (int i = 0; i < n; ++i) {
|
for (int i = 0; i < n; ++i) {
|
||||||
sum += x[i] - scale*L[i];
|
sum += x[i] - scale*L[i];
|
||||||
}
|
}
|
||||||
min = sum/n;
|
min = alpha*min + (1 - alpha)*sum/n;
|
||||||
if (min > 0) min = 0;
|
if (min > 0) min = 0;
|
||||||
iscale = 1/scale;
|
iscale = 1/scale;
|
||||||
if (!did_change) break;
|
if (!did_change) break;
|
||||||
@ -263,6 +226,82 @@ static float make_qkx1_quants(int n, int nmax, const float * restrict x, uint8_t
|
|||||||
return scale;
|
return scale;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static float make_qkx2_quants(int n, int nmax, const float * restrict x, const float * restrict weights,
|
||||||
|
uint8_t * restrict L, float * restrict the_min, uint8_t * restrict Laux,
|
||||||
|
float rmin, float rdelta, int nstep, bool use_mad) {
|
||||||
|
float min = x[0];
|
||||||
|
float max = x[0];
|
||||||
|
float sum_w = weights[0];
|
||||||
|
float sum_x = sum_w * x[0];
|
||||||
|
for (int i = 1; i < n; ++i) {
|
||||||
|
if (x[i] < min) min = x[i];
|
||||||
|
if (x[i] > max) max = x[i];
|
||||||
|
float w = weights[i];
|
||||||
|
sum_w += w;
|
||||||
|
sum_x += w * x[i];
|
||||||
|
}
|
||||||
|
if (min > 0) min = 0;
|
||||||
|
if (max == min) {
|
||||||
|
for (int i = 0; i < n; ++i) L[i] = 0;
|
||||||
|
*the_min = -min;
|
||||||
|
return 0.f;
|
||||||
|
}
|
||||||
|
float iscale = nmax/(max - min);
|
||||||
|
float scale = 1/iscale;
|
||||||
|
float best_mad = 0;
|
||||||
|
for (int i = 0; i < n; ++i) {
|
||||||
|
int l = nearest_int(iscale*(x[i] - min));
|
||||||
|
L[i] = MAX(0, MIN(nmax, l));
|
||||||
|
float diff = scale * L[i] + min - x[i];
|
||||||
|
diff = use_mad ? fabsf(diff) : diff * diff;
|
||||||
|
float w = weights[i];
|
||||||
|
best_mad += w * diff;
|
||||||
|
}
|
||||||
|
if (nstep < 1) {
|
||||||
|
*the_min = -min;
|
||||||
|
return scale;
|
||||||
|
}
|
||||||
|
for (int is = 0; is <= nstep; ++is) {
|
||||||
|
iscale = (rmin + rdelta*is + nmax)/(max - min);
|
||||||
|
float sum_l = 0, sum_l2 = 0, sum_xl = 0;
|
||||||
|
for (int i = 0; i < n; ++i) {
|
||||||
|
int l = nearest_int(iscale*(x[i] - min));
|
||||||
|
l = MAX(0, MIN(nmax, l));
|
||||||
|
Laux[i] = l;
|
||||||
|
float w = weights[i];
|
||||||
|
sum_l += w*l;
|
||||||
|
sum_l2 += w*l*l;
|
||||||
|
sum_xl += w*l*x[i];
|
||||||
|
}
|
||||||
|
float D = sum_w * sum_l2 - sum_l * sum_l;
|
||||||
|
if (D > 0) {
|
||||||
|
float this_scale = (sum_w * sum_xl - sum_x * sum_l)/D;
|
||||||
|
float this_min = (sum_l2 * sum_x - sum_l * sum_xl)/D;
|
||||||
|
if (this_min > 0) {
|
||||||
|
this_min = 0;
|
||||||
|
this_scale = sum_xl / sum_l2;
|
||||||
|
}
|
||||||
|
float mad = 0;
|
||||||
|
for (int i = 0; i < n; ++i) {
|
||||||
|
float diff = this_scale * Laux[i] + this_min - x[i];
|
||||||
|
diff = use_mad ? fabsf(diff) : diff * diff;
|
||||||
|
float w = weights[i];
|
||||||
|
mad += w * diff;
|
||||||
|
}
|
||||||
|
if (mad < best_mad) {
|
||||||
|
for (int i = 0; i < n; ++i) {
|
||||||
|
L[i] = Laux[i];
|
||||||
|
}
|
||||||
|
best_mad = mad;
|
||||||
|
scale = this_scale;
|
||||||
|
min = this_min;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*the_min = -min;
|
||||||
|
return scale;
|
||||||
|
}
|
||||||
|
|
||||||
#if QK_K == 256
|
#if QK_K == 256
|
||||||
static inline void get_scale_min_k4(int j, const uint8_t * restrict q, uint8_t * restrict d, uint8_t * restrict m) {
|
static inline void get_scale_min_k4(int j, const uint8_t * restrict q, uint8_t * restrict d, uint8_t * restrict m) {
|
||||||
if (j < 4) {
|
if (j < 4) {
|
||||||
@ -281,6 +320,8 @@ void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict
|
|||||||
const int nb = k / QK_K;
|
const int nb = k / QK_K;
|
||||||
|
|
||||||
uint8_t L[QK_K];
|
uint8_t L[QK_K];
|
||||||
|
uint8_t Laux[16];
|
||||||
|
float weights[16];
|
||||||
float mins[QK_K/16];
|
float mins[QK_K/16];
|
||||||
float scales[QK_K/16];
|
float scales[QK_K/16];
|
||||||
|
|
||||||
@ -291,7 +332,8 @@ void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict
|
|||||||
float max_scale = 0; // as we are deducting the min, scales are always positive
|
float max_scale = 0; // as we are deducting the min, scales are always positive
|
||||||
float max_min = 0;
|
float max_min = 0;
|
||||||
for (int j = 0; j < QK_K/16; ++j) {
|
for (int j = 0; j < QK_K/16; ++j) {
|
||||||
scales[j] = make_qkx1_quants(16, 3, x + 16*j, L + 16*j, &mins[j], 5);
|
for (int l = 0; l < 16; ++l) weights[l] = fabsf(x[16*j + l]);
|
||||||
|
scales[j] = make_qkx2_quants(16, 3, x + 16*j, weights, L + 16*j, &mins[j], Laux, -0.5f, 0.1f, 15, true);
|
||||||
float scale = scales[j];
|
float scale = scales[j];
|
||||||
if (scale > max_scale) {
|
if (scale > max_scale) {
|
||||||
max_scale = scale;
|
max_scale = scale;
|
||||||
@ -637,6 +679,8 @@ void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict
|
|||||||
const int nb = k / QK_K;
|
const int nb = k / QK_K;
|
||||||
|
|
||||||
uint8_t L[QK_K];
|
uint8_t L[QK_K];
|
||||||
|
uint8_t Laux[32];
|
||||||
|
float weights[32];
|
||||||
float mins[QK_K/32];
|
float mins[QK_K/32];
|
||||||
float scales[QK_K/32];
|
float scales[QK_K/32];
|
||||||
|
|
||||||
@ -645,7 +689,12 @@ void quantize_row_q4_K_reference(const float * restrict x, block_q4_K * restrict
|
|||||||
float max_scale = 0; // as we are deducting the min, scales are always positive
|
float max_scale = 0; // as we are deducting the min, scales are always positive
|
||||||
float max_min = 0;
|
float max_min = 0;
|
||||||
for (int j = 0; j < QK_K/32; ++j) {
|
for (int j = 0; j < QK_K/32; ++j) {
|
||||||
scales[j] = make_qkx1_quants(32, 15, x + 32*j, L + 32*j, &mins[j], 5);
|
//scales[j] = make_qkx1_quants(32, 15, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
|
||||||
|
float sum_x2 = 0;
|
||||||
|
for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
|
||||||
|
float av_x = sqrtf(sum_x2/32);
|
||||||
|
for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
|
||||||
|
scales[j] = make_qkx2_quants(32, 15, x + 32*j, weights, L + 32*j, &mins[j], Laux, -1.f, 0.1f, 20, false);
|
||||||
float scale = scales[j];
|
float scale = scales[j];
|
||||||
if (scale > max_scale) {
|
if (scale > max_scale) {
|
||||||
max_scale = scale;
|
max_scale = scale;
|
||||||
@ -798,6 +847,8 @@ void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict
|
|||||||
uint8_t L[QK_K];
|
uint8_t L[QK_K];
|
||||||
float mins[QK_K/32];
|
float mins[QK_K/32];
|
||||||
float scales[QK_K/32];
|
float scales[QK_K/32];
|
||||||
|
float weights[32];
|
||||||
|
uint8_t Laux[32];
|
||||||
#else
|
#else
|
||||||
int8_t L[QK_K];
|
int8_t L[QK_K];
|
||||||
float scales[QK_K/16];
|
float scales[QK_K/16];
|
||||||
@ -810,7 +861,12 @@ void quantize_row_q5_K_reference(const float * restrict x, block_q5_K * restrict
|
|||||||
float max_scale = 0; // as we are deducting the min, scales are always positive
|
float max_scale = 0; // as we are deducting the min, scales are always positive
|
||||||
float max_min = 0;
|
float max_min = 0;
|
||||||
for (int j = 0; j < QK_K/32; ++j) {
|
for (int j = 0; j < QK_K/32; ++j) {
|
||||||
scales[j] = make_qkx1_quants(32, 31, x + 32*j, L + 32*j, &mins[j], 5);
|
//scales[j] = make_qkx1_quants(32, 31, x + 32*j, L + 32*j, &mins[j], 9, 0.5f);
|
||||||
|
float sum_x2 = 0;
|
||||||
|
for (int l = 0; l < 32; ++l) sum_x2 += x[32*j + l] * x[32*j + l];
|
||||||
|
float av_x = sqrtf(sum_x2/32);
|
||||||
|
for (int l = 0; l < 32; ++l) weights[l] = av_x + fabsf(x[32*j + l]);
|
||||||
|
scales[j] = make_qkx2_quants(32, 31, x + 32*j, weights, L + 32*j, &mins[j], Laux, -0.5f, 0.1f, 15, false);
|
||||||
float scale = scales[j];
|
float scale = scales[j];
|
||||||
if (scale > max_scale) {
|
if (scale > max_scale) {
|
||||||
max_scale = scale;
|
max_scale = scale;
|
||||||
|
553
llama-util.h
553
llama-util.h
@ -1,553 +0,0 @@
|
|||||||
// Internal header to be included only by llama.cpp.
|
|
||||||
// Contains wrappers around OS interfaces.
|
|
||||||
|
|
||||||
#ifndef LLAMA_UTIL_H
|
|
||||||
#define LLAMA_UTIL_H
|
|
||||||
|
|
||||||
#include <cstdio>
|
|
||||||
#include <cstdint>
|
|
||||||
#include <cerrno>
|
|
||||||
#include <cstring>
|
|
||||||
#include <cstdarg>
|
|
||||||
#include <cstdlib>
|
|
||||||
#include <climits>
|
|
||||||
|
|
||||||
#include <string>
|
|
||||||
#include <vector>
|
|
||||||
#include <stdexcept>
|
|
||||||
|
|
||||||
#ifdef __has_include
|
|
||||||
#if __has_include(<unistd.h>)
|
|
||||||
#include <unistd.h>
|
|
||||||
#if defined(_POSIX_MAPPED_FILES)
|
|
||||||
#include <sys/mman.h>
|
|
||||||
#endif
|
|
||||||
#if defined(_POSIX_MEMLOCK_RANGE)
|
|
||||||
#include <sys/resource.h>
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(_WIN32)
|
|
||||||
#define WIN32_LEAN_AND_MEAN
|
|
||||||
#ifndef NOMINMAX
|
|
||||||
#define NOMINMAX
|
|
||||||
#endif
|
|
||||||
#include <windows.h>
|
|
||||||
#include <io.h>
|
|
||||||
#include <stdio.h> // for _fseeki64
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define LLAMA_ASSERT(x) \
|
|
||||||
do { \
|
|
||||||
if (!(x)) { \
|
|
||||||
fprintf(stderr, "LLAMA_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
|
|
||||||
abort(); \
|
|
||||||
} \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#ifdef __GNUC__
|
|
||||||
#ifdef __MINGW32__
|
|
||||||
__attribute__((format(gnu_printf, 1, 2)))
|
|
||||||
#else
|
|
||||||
__attribute__((format(printf, 1, 2)))
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
static std::string format(const char * fmt, ...) {
|
|
||||||
va_list ap, ap2;
|
|
||||||
va_start(ap, fmt);
|
|
||||||
va_copy(ap2, ap);
|
|
||||||
int size = vsnprintf(NULL, 0, fmt, ap);
|
|
||||||
LLAMA_ASSERT(size >= 0 && size < INT_MAX);
|
|
||||||
std::vector<char> buf(size + 1);
|
|
||||||
int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
|
|
||||||
LLAMA_ASSERT(size2 == size);
|
|
||||||
va_end(ap2);
|
|
||||||
va_end(ap);
|
|
||||||
return std::string(buf.data(), size);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct llama_file {
|
|
||||||
// use FILE * so we don't have to re-open the file to mmap
|
|
||||||
FILE * fp;
|
|
||||||
size_t size;
|
|
||||||
|
|
||||||
llama_file(const char * fname, const char * mode) {
|
|
||||||
fp = std::fopen(fname, mode);
|
|
||||||
if (fp == NULL) {
|
|
||||||
throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
|
|
||||||
}
|
|
||||||
seek(0, SEEK_END);
|
|
||||||
size = tell();
|
|
||||||
seek(0, SEEK_SET);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t tell() const {
|
|
||||||
#ifdef _WIN32
|
|
||||||
__int64 ret = _ftelli64(fp);
|
|
||||||
#else
|
|
||||||
long ret = std::ftell(fp);
|
|
||||||
#endif
|
|
||||||
LLAMA_ASSERT(ret != -1); // this really shouldn't fail
|
|
||||||
return (size_t) ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
void seek(size_t offset, int whence) {
|
|
||||||
#ifdef _WIN32
|
|
||||||
int ret = _fseeki64(fp, (__int64) offset, whence);
|
|
||||||
#else
|
|
||||||
int ret = std::fseek(fp, (long) offset, whence);
|
|
||||||
#endif
|
|
||||||
LLAMA_ASSERT(ret == 0); // same
|
|
||||||
}
|
|
||||||
|
|
||||||
void read_raw(void * ptr, size_t len) const {
|
|
||||||
if (len == 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
errno = 0;
|
|
||||||
std::size_t ret = std::fread(ptr, len, 1, fp);
|
|
||||||
if (ferror(fp)) {
|
|
||||||
throw std::runtime_error(format("read error: %s", strerror(errno)));
|
|
||||||
}
|
|
||||||
if (ret != 1) {
|
|
||||||
throw std::runtime_error(std::string("unexpectedly reached end of file"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::uint32_t read_u32() {
|
|
||||||
std::uint32_t ret;
|
|
||||||
read_raw(&ret, sizeof(ret));
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string read_string(std::uint32_t len) {
|
|
||||||
std::vector<char> chars(len);
|
|
||||||
read_raw(chars.data(), len);
|
|
||||||
return std::string(chars.data(), len);
|
|
||||||
}
|
|
||||||
|
|
||||||
void write_raw(const void * ptr, size_t len) const {
|
|
||||||
if (len == 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
errno = 0;
|
|
||||||
size_t ret = std::fwrite(ptr, len, 1, fp);
|
|
||||||
if (ret != 1) {
|
|
||||||
throw std::runtime_error(format("write error: %s", strerror(errno)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void write_u32(std::uint32_t val) {
|
|
||||||
write_raw(&val, sizeof(val));
|
|
||||||
}
|
|
||||||
|
|
||||||
~llama_file() {
|
|
||||||
if (fp) {
|
|
||||||
std::fclose(fp);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// llama_context_data
|
|
||||||
struct llama_data_context {
|
|
||||||
virtual void write(const void * src, size_t size) = 0;
|
|
||||||
virtual size_t get_size_written() = 0;
|
|
||||||
virtual ~llama_data_context() = default;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct llama_data_buffer_context : llama_data_context {
|
|
||||||
uint8_t* ptr;
|
|
||||||
size_t size_written = 0;
|
|
||||||
|
|
||||||
llama_data_buffer_context(uint8_t * p) : ptr(p) {}
|
|
||||||
|
|
||||||
void write(const void * src, size_t size) override {
|
|
||||||
memcpy(ptr, src, size);
|
|
||||||
ptr += size;
|
|
||||||
size_written += size;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t get_size_written() override {
|
|
||||||
return size_written;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
struct llama_data_file_context : llama_data_context {
|
|
||||||
llama_file* file;
|
|
||||||
size_t size_written = 0;
|
|
||||||
|
|
||||||
llama_data_file_context(llama_file * f) : file(f) {}
|
|
||||||
|
|
||||||
void write(const void * src, size_t size) override {
|
|
||||||
file->write_raw(src, size);
|
|
||||||
size_written += size;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t get_size_written() override {
|
|
||||||
return size_written;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
#if defined(_WIN32)
|
|
||||||
static std::string llama_format_win_err(DWORD err) {
|
|
||||||
LPSTR buf;
|
|
||||||
size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
|
|
||||||
NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
|
|
||||||
if (!size) {
|
|
||||||
return "FormatMessageA failed";
|
|
||||||
}
|
|
||||||
std::string ret(buf, size);
|
|
||||||
LocalFree(buf);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct llama_mmap {
|
|
||||||
void * addr;
|
|
||||||
size_t size;
|
|
||||||
|
|
||||||
llama_mmap(const llama_mmap &) = delete;
|
|
||||||
|
|
||||||
#ifdef _POSIX_MAPPED_FILES
|
|
||||||
static constexpr bool SUPPORTED = true;
|
|
||||||
|
|
||||||
llama_mmap(struct llama_file * file, size_t prefetch = (size_t) -1 /* -1 = max value */, bool numa = false) {
|
|
||||||
size = file->size;
|
|
||||||
int fd = fileno(file->fp);
|
|
||||||
int flags = MAP_SHARED;
|
|
||||||
// prefetch/readahead impairs performance on NUMA systems
|
|
||||||
if (numa) { prefetch = 0; }
|
|
||||||
#ifdef __linux__
|
|
||||||
if (prefetch >= file->size) { flags |= MAP_POPULATE; }
|
|
||||||
#endif
|
|
||||||
addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
|
|
||||||
if (addr == MAP_FAILED) {
|
|
||||||
throw std::runtime_error(format("mmap failed: %s", strerror(errno)));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (prefetch > 0) {
|
|
||||||
// Advise the kernel to preload the mapped memory
|
|
||||||
if (madvise(addr, std::min(file->size, prefetch), MADV_WILLNEED)) {
|
|
||||||
fprintf(stderr, "warning: madvise(.., MADV_WILLNEED) failed: %s\n",
|
|
||||||
strerror(errno));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (numa) {
|
|
||||||
// advise the kernel not to use readahead
|
|
||||||
// (because the next page might not belong on the same node)
|
|
||||||
if (madvise(addr, file->size, MADV_RANDOM)) {
|
|
||||||
fprintf(stderr, "warning: madvise(.., MADV_RANDOM) failed: %s\n",
|
|
||||||
strerror(errno));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
~llama_mmap() {
|
|
||||||
munmap(addr, size);
|
|
||||||
}
|
|
||||||
#elif defined(_WIN32)
|
|
||||||
static constexpr bool SUPPORTED = true;
|
|
||||||
|
|
||||||
llama_mmap(struct llama_file * file, bool prefetch = true, bool numa = false) {
|
|
||||||
(void) numa;
|
|
||||||
|
|
||||||
size = file->size;
|
|
||||||
|
|
||||||
HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
|
|
||||||
|
|
||||||
HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
|
|
||||||
DWORD error = GetLastError();
|
|
||||||
|
|
||||||
if (hMapping == NULL) {
|
|
||||||
throw std::runtime_error(format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str()));
|
|
||||||
}
|
|
||||||
|
|
||||||
addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0);
|
|
||||||
error = GetLastError();
|
|
||||||
CloseHandle(hMapping);
|
|
||||||
|
|
||||||
if (addr == NULL) {
|
|
||||||
throw std::runtime_error(format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str()));
|
|
||||||
}
|
|
||||||
|
|
||||||
if (prefetch) {
|
|
||||||
// The PrefetchVirtualMemory API is only present on Windows 8 and above, so we
|
|
||||||
// will dynamically load it using GetProcAddress.
|
|
||||||
BOOL (WINAPI *pPrefetchVirtualMemory) (HANDLE, ULONG_PTR, PWIN32_MEMORY_RANGE_ENTRY, ULONG);
|
|
||||||
HMODULE hKernel32;
|
|
||||||
|
|
||||||
// This call is guaranteed to succeed.
|
|
||||||
hKernel32 = GetModuleHandleW(L"kernel32.dll");
|
|
||||||
|
|
||||||
// This call may fail if on a pre-Win8 system.
|
|
||||||
pPrefetchVirtualMemory = reinterpret_cast<decltype(pPrefetchVirtualMemory)> (GetProcAddress(hKernel32, "PrefetchVirtualMemory"));
|
|
||||||
|
|
||||||
if (pPrefetchVirtualMemory) {
|
|
||||||
// Advise the kernel to preload the mapped memory.
|
|
||||||
WIN32_MEMORY_RANGE_ENTRY range;
|
|
||||||
range.VirtualAddress = addr;
|
|
||||||
range.NumberOfBytes = (SIZE_T)size;
|
|
||||||
if (!pPrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
|
|
||||||
fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n",
|
|
||||||
llama_format_win_err(GetLastError()).c_str());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
~llama_mmap() {
|
|
||||||
if (!UnmapViewOfFile(addr)) {
|
|
||||||
fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n",
|
|
||||||
llama_format_win_err(GetLastError()).c_str());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static constexpr bool SUPPORTED = false;
|
|
||||||
|
|
||||||
llama_mmap(struct llama_file *, bool prefetch = true, bool numa = false) {
|
|
||||||
(void) prefetch;
|
|
||||||
(void) numa;
|
|
||||||
|
|
||||||
throw std::runtime_error(std::string("mmap not supported"));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
// Represents some region of memory being locked using mlock or VirtualLock;
|
|
||||||
// will automatically unlock on destruction.
|
|
||||||
struct llama_mlock {
|
|
||||||
void * addr = NULL;
|
|
||||||
size_t size = 0;
|
|
||||||
bool failed_already = false;
|
|
||||||
|
|
||||||
llama_mlock() {}
|
|
||||||
llama_mlock(const llama_mlock &) = delete;
|
|
||||||
|
|
||||||
~llama_mlock() {
|
|
||||||
if (size) {
|
|
||||||
raw_unlock(addr, size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void init(void * ptr) {
|
|
||||||
LLAMA_ASSERT(addr == NULL && size == 0);
|
|
||||||
addr = ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
void grow_to(size_t target_size) {
|
|
||||||
LLAMA_ASSERT(addr);
|
|
||||||
if (failed_already) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
size_t granularity = lock_granularity();
|
|
||||||
target_size = (target_size + granularity - 1) & ~(granularity - 1);
|
|
||||||
if (target_size > size) {
|
|
||||||
if (raw_lock((uint8_t *) addr + size, target_size - size)) {
|
|
||||||
size = target_size;
|
|
||||||
} else {
|
|
||||||
failed_already = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef _POSIX_MEMLOCK_RANGE
|
|
||||||
static constexpr bool SUPPORTED = true;
|
|
||||||
|
|
||||||
size_t lock_granularity() {
|
|
||||||
return (size_t) sysconf(_SC_PAGESIZE);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef __APPLE__
|
|
||||||
#define MLOCK_SUGGESTION \
|
|
||||||
"Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
|
|
||||||
"decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n"
|
|
||||||
#else
|
|
||||||
#define MLOCK_SUGGESTION \
|
|
||||||
"Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
bool raw_lock(const void * addr, size_t size) {
|
|
||||||
if (!mlock(addr, size)) {
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
char* errmsg = std::strerror(errno);
|
|
||||||
bool suggest = (errno == ENOMEM);
|
|
||||||
|
|
||||||
// Check if the resource limit is fine after all
|
|
||||||
struct rlimit lock_limit;
|
|
||||||
if (suggest && getrlimit(RLIMIT_MEMLOCK, &lock_limit))
|
|
||||||
suggest = false;
|
|
||||||
if (suggest && (lock_limit.rlim_max > lock_limit.rlim_cur + size))
|
|
||||||
suggest = false;
|
|
||||||
|
|
||||||
fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n%s",
|
|
||||||
size, this->size, errmsg, suggest ? MLOCK_SUGGESTION : "");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#undef MLOCK_SUGGESTION
|
|
||||||
|
|
||||||
void raw_unlock(void * addr, size_t size) {
|
|
||||||
if (munlock(addr, size)) {
|
|
||||||
fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#elif defined(_WIN32)
|
|
||||||
static constexpr bool SUPPORTED = true;
|
|
||||||
|
|
||||||
size_t lock_granularity() {
|
|
||||||
SYSTEM_INFO si;
|
|
||||||
GetSystemInfo(&si);
|
|
||||||
return (size_t) si.dwPageSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool raw_lock(void * ptr, size_t len) {
|
|
||||||
for (int tries = 1; ; tries++) {
|
|
||||||
if (VirtualLock(ptr, len)) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
if (tries == 2) {
|
|
||||||
fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
|
|
||||||
len, size, llama_format_win_err(GetLastError()).c_str());
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// It failed but this was only the first try; increase the working
|
|
||||||
// set size and try again.
|
|
||||||
SIZE_T min_ws_size, max_ws_size;
|
|
||||||
if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
|
|
||||||
fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n",
|
|
||||||
llama_format_win_err(GetLastError()).c_str());
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
// Per MSDN: "The maximum number of pages that a process can lock
|
|
||||||
// is equal to the number of pages in its minimum working set minus
|
|
||||||
// a small overhead."
|
|
||||||
// Hopefully a megabyte is enough overhead:
|
|
||||||
size_t increment = len + 1048576;
|
|
||||||
// The minimum must be <= the maximum, so we need to increase both:
|
|
||||||
min_ws_size += increment;
|
|
||||||
max_ws_size += increment;
|
|
||||||
if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
|
|
||||||
fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n",
|
|
||||||
llama_format_win_err(GetLastError()).c_str());
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void raw_unlock(void * ptr, size_t len) {
|
|
||||||
if (!VirtualUnlock(ptr, len)) {
|
|
||||||
fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n",
|
|
||||||
llama_format_win_err(GetLastError()).c_str());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static constexpr bool SUPPORTED = false;
|
|
||||||
|
|
||||||
size_t lock_granularity() {
|
|
||||||
return (size_t) 65536;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool raw_lock(const void * addr, size_t len) {
|
|
||||||
fprintf(stderr, "warning: mlock not supported on this system\n");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void raw_unlock(const void * addr, size_t len) {}
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
// Replacement for std::vector<uint8_t> that doesn't require zero-initialization.
|
|
||||||
struct llama_buffer {
|
|
||||||
uint8_t * addr = NULL;
|
|
||||||
size_t size = 0;
|
|
||||||
|
|
||||||
llama_buffer() = default;
|
|
||||||
|
|
||||||
void resize(size_t len) {
|
|
||||||
#ifdef GGML_USE_METAL
|
|
||||||
free(addr);
|
|
||||||
int result = posix_memalign((void **) &addr, getpagesize(), len);
|
|
||||||
if (result == 0) {
|
|
||||||
memset(addr, 0, len);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
addr = NULL;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
delete[] addr;
|
|
||||||
addr = new uint8_t[len];
|
|
||||||
#endif
|
|
||||||
size = len;
|
|
||||||
}
|
|
||||||
|
|
||||||
~llama_buffer() {
|
|
||||||
#ifdef GGML_USE_METAL
|
|
||||||
free(addr);
|
|
||||||
#else
|
|
||||||
delete[] addr;
|
|
||||||
#endif
|
|
||||||
addr = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
// disable copy and move
|
|
||||||
llama_buffer(const llama_buffer&) = delete;
|
|
||||||
llama_buffer(llama_buffer&&) = delete;
|
|
||||||
llama_buffer& operator=(const llama_buffer&) = delete;
|
|
||||||
llama_buffer& operator=(llama_buffer&&) = delete;
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef GGML_USE_CUBLAS
|
|
||||||
#include "ggml-cuda.h"
|
|
||||||
struct llama_ctx_buffer {
|
|
||||||
uint8_t * addr = NULL;
|
|
||||||
bool is_cuda;
|
|
||||||
size_t size = 0;
|
|
||||||
|
|
||||||
llama_ctx_buffer() = default;
|
|
||||||
|
|
||||||
void resize(size_t size) {
|
|
||||||
free();
|
|
||||||
|
|
||||||
addr = (uint8_t *) ggml_cuda_host_malloc(size);
|
|
||||||
if (addr) {
|
|
||||||
is_cuda = true;
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
// fall back to pageable memory
|
|
||||||
addr = new uint8_t[size];
|
|
||||||
is_cuda = false;
|
|
||||||
}
|
|
||||||
this->size = size;
|
|
||||||
}
|
|
||||||
|
|
||||||
void free() {
|
|
||||||
if (addr) {
|
|
||||||
if (is_cuda) {
|
|
||||||
ggml_cuda_host_free(addr);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
delete[] addr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
addr = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
~llama_ctx_buffer() {
|
|
||||||
free();
|
|
||||||
}
|
|
||||||
|
|
||||||
// disable copy and move
|
|
||||||
llama_ctx_buffer(const llama_ctx_buffer&) = delete;
|
|
||||||
llama_ctx_buffer(llama_ctx_buffer&&) = delete;
|
|
||||||
llama_ctx_buffer& operator=(const llama_ctx_buffer&) = delete;
|
|
||||||
llama_ctx_buffer& operator=(llama_ctx_buffer&&) = delete;
|
|
||||||
};
|
|
||||||
#else
|
|
||||||
typedef llama_buffer llama_ctx_buffer;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
|
257
llama.h
257
llama.h
@ -34,29 +34,18 @@
|
|||||||
# define DEPRECATED(func, hint) func
|
# define DEPRECATED(func, hint) func
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define LLAMA_FILE_MAGIC_GGJT 0x67676a74u // 'ggjt'
|
#define LLAMA_DEFAULT_SEED 0xFFFFFFFF
|
||||||
#define LLAMA_FILE_MAGIC_GGLA 0x67676c61u // 'ggla'
|
|
||||||
#define LLAMA_FILE_MAGIC_GGMF 0x67676d66u // 'ggmf'
|
|
||||||
#define LLAMA_FILE_MAGIC_GGML 0x67676d6cu // 'ggml'
|
|
||||||
#define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
|
#define LLAMA_FILE_MAGIC_GGSN 0x6767736eu // 'ggsn'
|
||||||
|
|
||||||
#define LLAMA_FILE_VERSION 3
|
|
||||||
#define LLAMA_FILE_MAGIC LLAMA_FILE_MAGIC_GGJT
|
|
||||||
#define LLAMA_FILE_MAGIC_UNVERSIONED LLAMA_FILE_MAGIC_GGML
|
|
||||||
#define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
|
#define LLAMA_SESSION_MAGIC LLAMA_FILE_MAGIC_GGSN
|
||||||
#define LLAMA_SESSION_VERSION 1
|
#define LLAMA_SESSION_VERSION 1
|
||||||
|
|
||||||
#define LLAMA_DEFAULT_SEED 0xFFFFFFFF
|
|
||||||
|
|
||||||
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL)
|
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL)
|
||||||
// Defined when llama.cpp is compiled with support for offloading model layers to GPU.
|
// Defined when llama.cpp is compiled with support for offloading model layers to GPU.
|
||||||
#define LLAMA_SUPPORTS_GPU_OFFLOAD
|
#define LLAMA_SUPPORTS_GPU_OFFLOAD
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef LLAMA_DEFAULT_RMS_EPS
|
|
||||||
#define LLAMA_DEFAULT_RMS_EPS 5e-6f
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
@ -72,6 +61,52 @@ extern "C" {
|
|||||||
|
|
||||||
typedef int llama_token;
|
typedef int llama_token;
|
||||||
|
|
||||||
|
enum llama_log_level {
|
||||||
|
LLAMA_LOG_LEVEL_ERROR = 2,
|
||||||
|
LLAMA_LOG_LEVEL_WARN = 3,
|
||||||
|
LLAMA_LOG_LEVEL_INFO = 4
|
||||||
|
};
|
||||||
|
|
||||||
|
enum llama_vocab_type {
|
||||||
|
LLAMA_VOCAB_TYPE_SPM = 0, // SentencePiece
|
||||||
|
LLAMA_VOCAB_TYPE_BPE = 1, // Byte Pair Encoding
|
||||||
|
};
|
||||||
|
|
||||||
|
enum llama_token_type {
|
||||||
|
LLAMA_TOKEN_TYPE_UNDEFINED = 0,
|
||||||
|
LLAMA_TOKEN_TYPE_NORMAL = 1,
|
||||||
|
LLAMA_TOKEN_TYPE_UNKNOWN = 2,
|
||||||
|
LLAMA_TOKEN_TYPE_CONTROL = 3,
|
||||||
|
LLAMA_TOKEN_TYPE_USER_DEFINED = 4,
|
||||||
|
LLAMA_TOKEN_TYPE_UNUSED = 5,
|
||||||
|
LLAMA_TOKEN_TYPE_BYTE = 6,
|
||||||
|
};
|
||||||
|
|
||||||
|
// model file types
|
||||||
|
enum llama_ftype {
|
||||||
|
LLAMA_FTYPE_ALL_F32 = 0,
|
||||||
|
LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
|
||||||
|
// LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
|
||||||
|
// LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q2_K = 10,// except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q3_K_S = 11,// except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q3_K_M = 12,// except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q3_K_L = 13,// except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q4_K_S = 14,// except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q4_K_M = 15,// except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q5_K_S = 16,// except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q5_K_M = 17,// except 1d tensors
|
||||||
|
LLAMA_FTYPE_MOSTLY_Q6_K = 18,// except 1d tensors
|
||||||
|
|
||||||
|
LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
|
||||||
|
};
|
||||||
|
|
||||||
typedef struct llama_token_data {
|
typedef struct llama_token_data {
|
||||||
llama_token id; // token id
|
llama_token id; // token id
|
||||||
float logit; // log-odds of the token
|
float logit; // log-odds of the token
|
||||||
@ -86,25 +121,10 @@ extern "C" {
|
|||||||
|
|
||||||
typedef void (*llama_progress_callback)(float progress, void *ctx);
|
typedef void (*llama_progress_callback)(float progress, void *ctx);
|
||||||
|
|
||||||
enum llama_log_level {
|
|
||||||
LLAMA_LOG_LEVEL_ERROR = 2,
|
|
||||||
LLAMA_LOG_LEVEL_WARN = 3,
|
|
||||||
LLAMA_LOG_LEVEL_INFO = 4
|
|
||||||
};
|
|
||||||
|
|
||||||
// Signature for logging events
|
|
||||||
// Note that text includes the new line character at the end for most events.
|
|
||||||
// If your logging mechanism cannot handle that, check if the last character is '\n' and strip it
|
|
||||||
// if it exists.
|
|
||||||
// It might not exist for progress report where '.' is output repeatedly.
|
|
||||||
typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data);
|
|
||||||
|
|
||||||
struct llama_context_params {
|
struct llama_context_params {
|
||||||
uint32_t seed; // RNG seed, -1 for random
|
uint32_t seed; // RNG seed, -1 for random
|
||||||
int32_t n_ctx; // text context
|
int32_t n_ctx; // text context
|
||||||
int32_t n_batch; // prompt processing batch size
|
int32_t n_batch; // prompt processing batch size
|
||||||
int32_t n_gqa; // grouped-query attention (TEMP - will be moved to model hparams)
|
|
||||||
float rms_norm_eps; // rms norm epsilon (TEMP - will be moved to model hparams)
|
|
||||||
int32_t n_gpu_layers; // number of layers to store in VRAM
|
int32_t n_gpu_layers; // number of layers to store in VRAM
|
||||||
int32_t main_gpu; // the GPU that is used for scratch and small tensors
|
int32_t main_gpu; // the GPU that is used for scratch and small tensors
|
||||||
|
|
||||||
@ -129,28 +149,13 @@ extern "C" {
|
|||||||
bool use_mlock; // force system to keep model in RAM
|
bool use_mlock; // force system to keep model in RAM
|
||||||
bool embedding; // embedding mode only
|
bool embedding; // embedding mode only
|
||||||
};
|
};
|
||||||
// model file types
|
|
||||||
enum llama_ftype {
|
// Signature for logging events
|
||||||
LLAMA_FTYPE_ALL_F32 = 0,
|
// Note that text includes the new line character at the end for most events.
|
||||||
LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
|
// If your logging mechanism cannot handle that, check if the last character is '\n' and strip it
|
||||||
LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
|
// if it exists.
|
||||||
LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
|
// It might not exist for progress report where '.' is output repeatedly.
|
||||||
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
|
typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data);
|
||||||
// LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // support has been removed
|
|
||||||
// LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // support has been removed
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q2_K = 10,// except 1d tensors
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q3_K_S = 11,// except 1d tensors
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q3_K_M = 12,// except 1d tensors
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q3_K_L = 13,// except 1d tensors
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q4_K_S = 14,// except 1d tensors
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q4_K_M = 15,// except 1d tensors
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q5_K_S = 16,// except 1d tensors
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q5_K_M = 17,// except 1d tensors
|
|
||||||
LLAMA_FTYPE_MOSTLY_Q6_K = 18,// except 1d tensors
|
|
||||||
};
|
|
||||||
|
|
||||||
// model quantization parameters
|
// model quantization parameters
|
||||||
typedef struct llama_model_quantize_params {
|
typedef struct llama_model_quantize_params {
|
||||||
@ -208,27 +213,16 @@ extern "C" {
|
|||||||
int32_t n_eval;
|
int32_t n_eval;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Set callback for all future logging events.
|
LLAMA_API struct llama_context_params llama_context_default_params(void);
|
||||||
// If this is not called, or NULL is supplied, everything is output on stderr.
|
LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params(void);
|
||||||
LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data);
|
|
||||||
|
|
||||||
LLAMA_API int llama_max_devices();
|
|
||||||
|
|
||||||
LLAMA_API struct llama_context_params llama_context_default_params();
|
|
||||||
LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params();
|
|
||||||
|
|
||||||
LLAMA_API bool llama_mmap_supported();
|
|
||||||
LLAMA_API bool llama_mlock_supported();
|
|
||||||
|
|
||||||
// TODO: not great API - very likely to change
|
|
||||||
// Initialize the llama + ggml backend
|
// Initialize the llama + ggml backend
|
||||||
// If numa is true, use NUMA optimizations
|
// If numa is true, use NUMA optimizations
|
||||||
// Call once at the start of the program
|
// Call once at the start of the program
|
||||||
LLAMA_API void llama_backend_init(bool numa);
|
LLAMA_API void llama_backend_init(bool numa);
|
||||||
// Call once at the end of the program - currently only used for MPI
|
|
||||||
LLAMA_API void llama_backend_free();
|
|
||||||
|
|
||||||
LLAMA_API int64_t llama_time_us();
|
// Call once at the end of the program - currently only used for MPI
|
||||||
|
LLAMA_API void llama_backend_free(void);
|
||||||
|
|
||||||
LLAMA_API struct llama_model * llama_load_model_from_file(
|
LLAMA_API struct llama_model * llama_load_model_from_file(
|
||||||
const char * path_model,
|
const char * path_model,
|
||||||
@ -240,17 +234,26 @@ extern "C" {
|
|||||||
struct llama_model * model,
|
struct llama_model * model,
|
||||||
struct llama_context_params params);
|
struct llama_context_params params);
|
||||||
|
|
||||||
// Various functions for loading a ggml llama model.
|
|
||||||
// Allocate (almost) all memory needed for the model.
|
|
||||||
// Return NULL on failure
|
|
||||||
LLAMA_API DEPRECATED(struct llama_context * llama_init_from_file(
|
|
||||||
const char * path_model,
|
|
||||||
struct llama_context_params params),
|
|
||||||
"please use llama_load_model_from_file combined with llama_new_context_with_model instead");
|
|
||||||
|
|
||||||
// Frees all allocated memory
|
// Frees all allocated memory
|
||||||
LLAMA_API void llama_free(struct llama_context * ctx);
|
LLAMA_API void llama_free(struct llama_context * ctx);
|
||||||
|
|
||||||
|
LLAMA_API int64_t llama_time_us(void);
|
||||||
|
|
||||||
|
LLAMA_API int llama_max_devices (void);
|
||||||
|
LLAMA_API bool llama_mmap_supported (void);
|
||||||
|
LLAMA_API bool llama_mlock_supported(void);
|
||||||
|
|
||||||
|
LLAMA_API int llama_n_vocab(const struct llama_context * ctx);
|
||||||
|
LLAMA_API int llama_n_ctx (const struct llama_context * ctx);
|
||||||
|
LLAMA_API int llama_n_embd (const struct llama_context * ctx);
|
||||||
|
|
||||||
|
LLAMA_API int llama_model_n_vocab(const struct llama_model * model);
|
||||||
|
LLAMA_API int llama_model_n_ctx (const struct llama_model * model);
|
||||||
|
LLAMA_API int llama_model_n_embd (const struct llama_model * model);
|
||||||
|
|
||||||
|
// Get a string describing the model type
|
||||||
|
LLAMA_API int llama_model_type(const struct llama_model * model, char * buf, size_t buf_size);
|
||||||
|
|
||||||
// Returns 0 on success
|
// Returns 0 on success
|
||||||
LLAMA_API int llama_model_quantize(
|
LLAMA_API int llama_model_quantize(
|
||||||
const char * fname_inp,
|
const char * fname_inp,
|
||||||
@ -324,11 +327,40 @@ extern "C" {
|
|||||||
// IMPORTANT: do not use for anything else other than debugging and testing!
|
// IMPORTANT: do not use for anything else other than debugging and testing!
|
||||||
LLAMA_API int llama_eval_export(struct llama_context * ctx, const char * fname);
|
LLAMA_API int llama_eval_export(struct llama_context * ctx, const char * fname);
|
||||||
|
|
||||||
|
// Token logits obtained from the last call to llama_eval()
|
||||||
|
// The logits for the last token are stored in the last row
|
||||||
|
// Can be mutated in order to change the probabilities of the next token
|
||||||
|
// Rows: n_tokens
|
||||||
|
// Cols: n_vocab
|
||||||
|
LLAMA_API float * llama_get_logits(struct llama_context * ctx);
|
||||||
|
|
||||||
|
// Get the embeddings for the input
|
||||||
|
// shape: [n_embd] (1-dimensional)
|
||||||
|
LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
|
||||||
|
|
||||||
|
//
|
||||||
|
// Vocab
|
||||||
|
//
|
||||||
|
|
||||||
|
LLAMA_API const char * llama_token_get_text(const struct llama_context * ctx, llama_token token);
|
||||||
|
|
||||||
|
LLAMA_API float llama_token_get_score(const struct llama_context * ctx, llama_token token);
|
||||||
|
|
||||||
|
LLAMA_API llama_token_type llama_token_get_type(const struct llama_context * ctx, llama_token token);
|
||||||
|
|
||||||
|
// Special tokens
|
||||||
|
LLAMA_API llama_token llama_token_bos(const struct llama_context * ctx); // beginning-of-sentence
|
||||||
|
LLAMA_API llama_token llama_token_eos(const struct llama_context * ctx); // end-of-sentence
|
||||||
|
LLAMA_API llama_token llama_token_nl (const struct llama_context * ctx); // next-line
|
||||||
|
|
||||||
|
//
|
||||||
|
// Tokenization
|
||||||
|
//
|
||||||
|
|
||||||
// Convert the provided text into tokens.
|
// Convert the provided text into tokens.
|
||||||
// The tokens pointer must be large enough to hold the resulting tokens.
|
// The tokens pointer must be large enough to hold the resulting tokens.
|
||||||
// Returns the number of tokens on success, no more than n_max_tokens
|
// Returns the number of tokens on success, no more than n_max_tokens
|
||||||
// Returns a negative number on failure - the number of tokens that would have been returned
|
// Returns a negative number on failure - the number of tokens that would have been returned
|
||||||
// TODO: not sure if correct
|
|
||||||
LLAMA_API int llama_tokenize(
|
LLAMA_API int llama_tokenize(
|
||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
const char * text,
|
const char * text,
|
||||||
@ -336,6 +368,13 @@ extern "C" {
|
|||||||
int n_max_tokens,
|
int n_max_tokens,
|
||||||
bool add_bos);
|
bool add_bos);
|
||||||
|
|
||||||
|
LLAMA_API int llama_tokenize_bpe(
|
||||||
|
struct llama_context * ctx,
|
||||||
|
const char * text,
|
||||||
|
llama_token * tokens,
|
||||||
|
int n_max_tokens,
|
||||||
|
bool add_bos);
|
||||||
|
|
||||||
LLAMA_API int llama_tokenize_with_model(
|
LLAMA_API int llama_tokenize_with_model(
|
||||||
const struct llama_model * model,
|
const struct llama_model * model,
|
||||||
const char * text,
|
const char * text,
|
||||||
@ -343,57 +382,30 @@ extern "C" {
|
|||||||
int n_max_tokens,
|
int n_max_tokens,
|
||||||
bool add_bos);
|
bool add_bos);
|
||||||
|
|
||||||
LLAMA_API int llama_n_vocab(const struct llama_context * ctx);
|
|
||||||
LLAMA_API int llama_n_ctx (const struct llama_context * ctx);
|
|
||||||
LLAMA_API int llama_n_embd (const struct llama_context * ctx);
|
|
||||||
|
|
||||||
LLAMA_API int llama_n_vocab_from_model(const struct llama_model * model);
|
|
||||||
LLAMA_API int llama_n_ctx_from_model (const struct llama_model * model);
|
|
||||||
LLAMA_API int llama_n_embd_from_model (const struct llama_model * model);
|
|
||||||
|
|
||||||
LLAMA_API int llama_model_type(const struct llama_model * model, char * buf, size_t buf_size);
|
|
||||||
|
|
||||||
// Get the vocabulary as output parameters.
|
|
||||||
// Returns number of results.
|
|
||||||
LLAMA_API int llama_get_vocab(
|
|
||||||
const struct llama_context * ctx,
|
|
||||||
const char * * strings,
|
|
||||||
float * scores,
|
|
||||||
int capacity);
|
|
||||||
|
|
||||||
LLAMA_API int llama_get_vocab_from_model(
|
|
||||||
const struct llama_model * model,
|
|
||||||
const char * * strings,
|
|
||||||
float * scores,
|
|
||||||
int capacity);
|
|
||||||
|
|
||||||
// Token logits obtained from the last call to llama_eval()
|
|
||||||
// The logits for the last token are stored in the last row
|
|
||||||
// Can be mutated in order to change the probabilities of the next token
|
|
||||||
// Rows: n_tokens
|
|
||||||
// Cols: n_vocab
|
|
||||||
LLAMA_API float * llama_get_logits(struct llama_context * ctx);
|
|
||||||
|
|
||||||
// Get the embeddings for the input
|
|
||||||
// shape: [n_embd] (1-dimensional)
|
|
||||||
LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
|
|
||||||
|
|
||||||
// Token Id -> String. Uses the vocabulary in the provided context
|
// Token Id -> String. Uses the vocabulary in the provided context
|
||||||
LLAMA_API const char * llama_token_to_str(
|
// Does not write null terminator to the buffer
|
||||||
|
LLAMA_API int llama_token_to_str(
|
||||||
const struct llama_context * ctx,
|
const struct llama_context * ctx,
|
||||||
llama_token token);
|
llama_token token,
|
||||||
|
char * buf,
|
||||||
|
int length);
|
||||||
|
|
||||||
LLAMA_API const char * llama_token_to_str_with_model(
|
LLAMA_API int llama_token_to_str_bpe(
|
||||||
|
const struct llama_context * ctx,
|
||||||
|
llama_token token,
|
||||||
|
char * buf,
|
||||||
|
int length);
|
||||||
|
|
||||||
|
LLAMA_API int llama_token_to_str_with_model(
|
||||||
const struct llama_model * model,
|
const struct llama_model * model,
|
||||||
llama_token token);
|
llama_token token,
|
||||||
|
char * buf,
|
||||||
// Special tokens
|
int length);
|
||||||
LLAMA_API llama_token llama_token_bos(); // beginning-of-sentence
|
|
||||||
LLAMA_API llama_token llama_token_eos(); // end-of-sentence
|
|
||||||
LLAMA_API llama_token llama_token_nl(); // next-line
|
|
||||||
|
|
||||||
|
//
|
||||||
// Grammar
|
// Grammar
|
||||||
//
|
//
|
||||||
|
|
||||||
LLAMA_API struct llama_grammar * llama_grammar_init(
|
LLAMA_API struct llama_grammar * llama_grammar_init(
|
||||||
const llama_grammar_element ** rules,
|
const llama_grammar_element ** rules,
|
||||||
size_t n_rules,
|
size_t n_rules,
|
||||||
@ -401,7 +413,9 @@ extern "C" {
|
|||||||
|
|
||||||
LLAMA_API void llama_grammar_free(struct llama_grammar * grammar);
|
LLAMA_API void llama_grammar_free(struct llama_grammar * grammar);
|
||||||
|
|
||||||
|
//
|
||||||
// Sampling functions
|
// Sampling functions
|
||||||
|
//
|
||||||
|
|
||||||
/// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
|
/// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
|
||||||
LLAMA_API void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float penalty);
|
LLAMA_API void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float penalty);
|
||||||
@ -470,6 +484,10 @@ extern "C" {
|
|||||||
// Print system information
|
// Print system information
|
||||||
LLAMA_API const char * llama_print_system_info(void);
|
LLAMA_API const char * llama_print_system_info(void);
|
||||||
|
|
||||||
|
// Set callback for all future logging events.
|
||||||
|
// If this is not called, or NULL is supplied, everything is output on stderr.
|
||||||
|
LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -479,10 +497,11 @@ extern "C" {
|
|||||||
|
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
struct ggml_tensor;
|
struct ggml_tensor;
|
||||||
|
|
||||||
const std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx);
|
const std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx);
|
||||||
|
|
||||||
#endif
|
#endif // LLAMA_API_INTERNAL
|
||||||
|
|
||||||
#endif // LLAMA_H
|
#endif // LLAMA_H
|
||||||
|
1
models/.editorconfig
Normal file
1
models/.editorconfig
Normal file
@ -0,0 +1 @@
|
|||||||
|
root = true
|
BIN
models/ggml-vocab-llama.gguf
Normal file
BIN
models/ggml-vocab-llama.gguf
Normal file
Binary file not shown.
Binary file not shown.
0
scripts/get-wikitext-2.sh
Normal file → Executable file
0
scripts/get-wikitext-2.sh
Normal file → Executable file
@ -1,6 +1,7 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
cp -rpv ../ggml/src/ggml.c ./ggml.c
|
cp -rpv ../ggml/src/ggml.c ./ggml.c
|
||||||
|
cp -rpv ../ggml/src/ggml-alloc.c ./ggml-alloc.c
|
||||||
cp -rpv ../ggml/src/ggml-cuda.h ./ggml-cuda.h
|
cp -rpv ../ggml/src/ggml-cuda.h ./ggml-cuda.h
|
||||||
cp -rpv ../ggml/src/ggml-cuda.cu ./ggml-cuda.cu
|
cp -rpv ../ggml/src/ggml-cuda.cu ./ggml-cuda.cu
|
||||||
cp -rpv ../ggml/src/ggml-opencl.h ./ggml-opencl.h
|
cp -rpv ../ggml/src/ggml-opencl.h ./ggml-opencl.h
|
||||||
@ -9,6 +10,7 @@ cp -rpv ../ggml/src/ggml-metal.h ./ggml-metal.h
|
|||||||
cp -rpv ../ggml/src/ggml-metal.m ./ggml-metal.m
|
cp -rpv ../ggml/src/ggml-metal.m ./ggml-metal.m
|
||||||
cp -rpv ../ggml/src/ggml-metal.metal ./ggml-metal.metal
|
cp -rpv ../ggml/src/ggml-metal.metal ./ggml-metal.metal
|
||||||
cp -rpv ../ggml/include/ggml/ggml.h ./ggml.h
|
cp -rpv ../ggml/include/ggml/ggml.h ./ggml.h
|
||||||
|
cp -rpv ../ggml/include/ggml/ggml-alloc.h ./ggml-alloc.h
|
||||||
|
|
||||||
cp -rpv ../ggml/tests/test-opt.cpp ./tests/test-opt.cpp
|
cp -rpv ../ggml/tests/test-opt.cpp ./tests/test-opt.cpp
|
||||||
cp -rpv ../ggml/tests/test-grad0.cpp ./tests/test-grad0.cpp
|
cp -rpv ../ggml/tests/test-grad0.cpp ./tests/test-grad0.cpp
|
||||||
|
@ -1,17 +1,36 @@
|
|||||||
function(llama_add_test source)
|
function(llama_build_executable source)
|
||||||
get_filename_component(TEST_TARGET ${source} NAME_WE)
|
get_filename_component(TEST_TARGET ${source} NAME_WE)
|
||||||
add_executable(${TEST_TARGET} ${source})
|
add_executable(${TEST_TARGET} ${source})
|
||||||
install(TARGETS ${TEST_TARGET} RUNTIME)
|
install(TARGETS ${TEST_TARGET} RUNTIME)
|
||||||
target_link_libraries(${TEST_TARGET} PRIVATE llama)
|
target_link_libraries(${TEST_TARGET} PRIVATE llama common)
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
function(llama_test_executable name source)
|
||||||
|
get_filename_component(TEST_TARGET ${source} NAME_WE)
|
||||||
|
# add_executable(${TEST_TARGET} ${source})
|
||||||
|
# install(TARGETS ${TEST_TARGET} RUNTIME)
|
||||||
|
# target_link_libraries(${TEST_TARGET} PRIVATE llama)
|
||||||
|
add_test(NAME ${name} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
|
||||||
|
endfunction()
|
||||||
|
|
||||||
|
function(llama_build_and_test_executable source)
|
||||||
|
get_filename_component(TEST_TARGET ${source} NAME_WE)
|
||||||
|
add_executable(${TEST_TARGET} ${source})
|
||||||
|
install(TARGETS ${TEST_TARGET} RUNTIME)
|
||||||
|
target_link_libraries(${TEST_TARGET} PRIVATE llama common)
|
||||||
add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
|
add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
|
||||||
endfunction()
|
endfunction()
|
||||||
|
|
||||||
# llama_add_test(test-double-float.cpp) # SLOW
|
# llama_build_and_test_executable(test-double-float.cpp) # SLOW
|
||||||
llama_add_test(test-quantize-fns.cpp)
|
llama_build_and_test_executable(test-quantize-fns.cpp)
|
||||||
llama_add_test(test-quantize-perf.cpp)
|
llama_build_and_test_executable(test-quantize-perf.cpp)
|
||||||
llama_add_test(test-sampling.cpp)
|
llama_build_and_test_executable(test-sampling.cpp)
|
||||||
llama_add_test(test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab.bin)
|
llama_build_executable(test-tokenizer-0.cpp)
|
||||||
llama_add_test(test-grammar-parser.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../examples/grammar-parser.cpp)
|
llama_test_executable (test-tokenizer-0.llama test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
|
||||||
llama_add_test(test-llama-grammar.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../examples/grammar-parser.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../examples/common.cpp)
|
llama_build_executable(test-tokenizer-1.cpp)
|
||||||
llama_add_test(test-grad0.cpp) # SLOW
|
llama_test_executable (test-tokenizer-1.llama test-tokenizer-1.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
|
||||||
# llama_add_test(test-opt.cpp) # SLOW
|
#llama_test_executable(test-tokenizer-1.aquila test-tokenizer-1.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
|
||||||
|
llama_build_and_test_executable(test-grammar-parser.cpp)
|
||||||
|
llama_build_and_test_executable(test-llama-grammar.cpp)
|
||||||
|
llama_build_and_test_executable(test-grad0.cpp) # SLOW
|
||||||
|
# llama_build_and_test_executable(test-opt.cpp) # SLOW
|
||||||
|
@ -3,7 +3,8 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include "llama.h"
|
#include "llama.h"
|
||||||
#include "examples/grammar-parser.cpp"
|
#include "grammar-parser.h"
|
||||||
|
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
|
|
||||||
int main()
|
int main()
|
||||||
|
@ -2,9 +2,9 @@
|
|||||||
#undef NDEBUG
|
#undef NDEBUG
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include "llama.cpp"
|
#include "llama.cpp" // TODO: not great
|
||||||
#include "examples/common.cpp"
|
#include "grammar-parser.h"
|
||||||
#include "examples/grammar-parser.cpp"
|
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
|
|
||||||
int main()
|
int main()
|
||||||
|
@ -1,22 +1,55 @@
|
|||||||
#include "llama.h"
|
#include "llama.h"
|
||||||
|
#include "common.h"
|
||||||
|
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
static const std::map<std::string, std::vector<llama_token>> & k_tests()
|
static std::string unescape_whitespace(llama_context* ctx, const std::vector<llama_token>& tokens) {
|
||||||
{
|
std::string result;
|
||||||
|
for (size_t i = 0; i < tokens.size(); ++i) {
|
||||||
|
result += llama_token_to_str(ctx, tokens[i]);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const std::map<std::string, std::vector<llama_token>> & k_tests() {
|
||||||
static std::map<std::string, std::vector<llama_token>> _k_tests = {
|
static std::map<std::string, std::vector<llama_token>> _k_tests = {
|
||||||
{ "Hello World", { 1, 10994, 2787, }, },
|
{ " ", {1, 259, }, },
|
||||||
{ " Hello World", { 1, 15043, 2787, }, },
|
{ " ", { 1, 1678, }, },
|
||||||
{ " Hello World!", { 1, 15043, 2787, 29991, }, },
|
{ " ", { 1, 268, }, },
|
||||||
{ " this is 🦙.cpp", { 1, 445, 338, 29871, 243, 162, 169, 156, 29889, 8223, }, },
|
{ "\t", { 1, 29871, 12, }, },
|
||||||
{ "w048 7tuijk dsdfhu", { 1, 29893, 29900, 29946, 29947, 29871, 29955, 9161, 13535, 18031, 2176, 6905, }, },
|
{ "\n", { 1, 29871, 13, }, },
|
||||||
{ "нещо на Български", { 1, 821, 4851, 665, 1386, 29713, 1305, }, },
|
{ "\t\n", { 1, 29871, 12, 13, }, },
|
||||||
|
{ "Hello world", { 1, 15043, 3186, }, },
|
||||||
|
{ " Hello world", { 1, 29871, 15043, 3186, }, },
|
||||||
|
{ "Hello World", { 1, 15043, 2787, }, },
|
||||||
|
{ " Hello World", { 1, 29871, 15043, 2787, }, },
|
||||||
|
{ " Hello World!", { 1, 29871, 15043, 2787, 29991, }, },
|
||||||
|
{ " this is 🦙.cpp", { 1, 29871, 445, 338, 29871, 243, 162, 169, 156, 29889, 8223, }, },
|
||||||
|
{ "w048 7tuijk dsdfhu", { 1, 281, 29900, 29946, 29947, 29871, 29955, 9161, 13535, 18031, 2176, 6905, }, },
|
||||||
|
{ "нещо на Български", { 1, 1538, 4851, 665, 1386, 29713, 1305, }, },
|
||||||
|
{ "កាន់តែពិសេសអាចខលចេញ", { 1, 29871, 31849, 31324, 31934, 228, 162, 142, 228, 161,
|
||||||
|
146, 228, 162, 133, 228, 161, 153, 228, 161, 186,
|
||||||
|
31708, 228, 162, 132, 31708, 228, 161, 165, 31324, 228,
|
||||||
|
161, 136, 228, 161, 132, 228, 161, 158, 228, 161,
|
||||||
|
136, 228, 162, 132, 228, 161, 140, }, },
|
||||||
|
{ "🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)",
|
||||||
|
{ 1, 29871, 243, 162, 157, 131, 313, 8945, 29897, 29871,
|
||||||
|
243, 162, 155, 185, 30722, 243, 162, 143, 174, 30598,
|
||||||
|
313, 20787, 953, 3848, 275, 16125, 630, 29897, 29871, 31681,
|
||||||
|
313, 6194, 953, 29877, 2397, 393, 756, 967, 1914, 5993, 29897, }, },
|
||||||
|
{ "Hello", { 1, 15043 }, },
|
||||||
|
{ " Hello", { 1, 29871, 15043 }, },
|
||||||
|
{ " Hello", { 1, 259, 15043 }, },
|
||||||
|
{ " Hello", { 1, 1678, 15043 }, },
|
||||||
|
{ " Hello", { 1, 268, 15043 }, },
|
||||||
|
{ " Hello\n Hello", { 1, 268, 15043, 13, 1678, 15043 }, },
|
||||||
};
|
};
|
||||||
|
|
||||||
return _k_tests;
|
return _k_tests;
|
||||||
};
|
}
|
||||||
|
|
||||||
int main(int argc, char **argv) {
|
int main(int argc, char **argv) {
|
||||||
if (argc < 2) {
|
if (argc < 2) {
|
||||||
@ -64,10 +97,12 @@ int main(int argc, char **argv) {
|
|||||||
return 2;
|
return 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool success = true;
|
||||||
|
|
||||||
for (const auto & test_kv : k_tests()) {
|
for (const auto & test_kv : k_tests()) {
|
||||||
std::vector<llama_token> res(test_kv.first.size());
|
std::vector<llama_token> res = llama_tokenize(ctx, test_kv.first, true);
|
||||||
const int n = llama_tokenize(ctx, test_kv.first.c_str(), res.data(), int(res.size()), true);
|
fprintf(stderr, "%s : '%s' tokenized to '%s'\n",
|
||||||
res.resize(n);
|
__func__, test_kv.first.c_str(), unescape_whitespace(ctx, res).c_str());
|
||||||
|
|
||||||
bool correct = res.size() == test_kv.second.size();
|
bool correct = res.size() == test_kv.second.size();
|
||||||
|
|
||||||
@ -79,6 +114,8 @@ int main(int argc, char **argv) {
|
|||||||
|
|
||||||
if (!correct) {
|
if (!correct) {
|
||||||
fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str());
|
fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str());
|
||||||
|
fprintf(stderr, "%s : detokenized to: '%s' instead of '%s'\n", __func__,
|
||||||
|
unescape_whitespace(ctx, res).c_str(), unescape_whitespace(ctx, test_kv.second).c_str());
|
||||||
fprintf(stderr, "%s : expected tokens: ", __func__);
|
fprintf(stderr, "%s : expected tokens: ", __func__);
|
||||||
for (const auto & t : test_kv.second) {
|
for (const auto & t : test_kv.second) {
|
||||||
fprintf(stderr, "%6d, ", t);
|
fprintf(stderr, "%6d, ", t);
|
||||||
@ -90,9 +127,7 @@ int main(int argc, char **argv) {
|
|||||||
}
|
}
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
|
|
||||||
llama_free_model(model);
|
success = false;
|
||||||
llama_free(ctx);
|
|
||||||
return 3;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -101,5 +136,5 @@ int main(int argc, char **argv) {
|
|||||||
|
|
||||||
llama_backend_free();
|
llama_backend_free();
|
||||||
|
|
||||||
return 0;
|
return success ? 0 : 3;
|
||||||
}
|
}
|
||||||
|
124
tests/test-tokenizer-1.cpp
Normal file
124
tests/test-tokenizer-1.cpp
Normal file
@ -0,0 +1,124 @@
|
|||||||
|
#include "llama.h"
|
||||||
|
#include "common.h"
|
||||||
|
|
||||||
|
#include <cassert>
|
||||||
|
#include <cstdio>
|
||||||
|
#include <cstring>
|
||||||
|
#include <string>
|
||||||
|
#include <codecvt>
|
||||||
|
#include <map>
|
||||||
|
#include <vector>
|
||||||
|
#include <locale>
|
||||||
|
|
||||||
|
static std::string escape_whitespace(const std::string& text) {
|
||||||
|
std::string result = "\xe2\x96\x81";
|
||||||
|
for (size_t offs = 0; offs < text.length(); ++offs) {
|
||||||
|
if (text[offs] == ' ') {
|
||||||
|
result += "\xe2\x96\x81";
|
||||||
|
} else {
|
||||||
|
result += text[offs];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::string unescape_whitespace(llama_context * ctx, const std::vector<llama_token> & tokens) {
|
||||||
|
std::string result;
|
||||||
|
for (size_t i = 0; i < tokens.size(); ++i) {
|
||||||
|
result += llama_token_to_str(ctx, tokens[i]);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char **argv) {
|
||||||
|
if (argc < 2) {
|
||||||
|
fprintf(stderr, "Usage: %s <vocab-file>\n", argv[0]);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::string fname = argv[1];
|
||||||
|
|
||||||
|
fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
|
||||||
|
|
||||||
|
llama_model * model;
|
||||||
|
llama_context * ctx;
|
||||||
|
|
||||||
|
llama_backend_init(false);
|
||||||
|
|
||||||
|
// load the vocab
|
||||||
|
{
|
||||||
|
auto lparams = llama_context_default_params();
|
||||||
|
|
||||||
|
lparams.vocab_only = true;
|
||||||
|
|
||||||
|
model = llama_load_model_from_file(fname.c_str(), lparams);
|
||||||
|
|
||||||
|
if (model == NULL) {
|
||||||
|
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx = llama_new_context_with_model(model, lparams);
|
||||||
|
|
||||||
|
if (ctx == NULL) {
|
||||||
|
fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
|
||||||
|
llama_free_model(model);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const int n_vocab = llama_n_vocab(ctx);
|
||||||
|
|
||||||
|
for (int i = 0; i < n_vocab; ++i) {
|
||||||
|
std::string forward = llama_token_to_str_bpe(ctx, i);
|
||||||
|
std::vector<llama_token> tokens = llama_tokenize_bpe(ctx, forward, false);
|
||||||
|
if (tokens.size() == 1) {
|
||||||
|
if (i != tokens[0]) {
|
||||||
|
std::string backward = llama_token_to_str(ctx, tokens[0]);
|
||||||
|
fprintf(stderr, "%s : error: token %d is string %s but bpe returns token %d %s\n",
|
||||||
|
__func__, i, llama_token_to_str(ctx, i).c_str(), tokens[0], backward.c_str());
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
llama_token_type type = llama_token_get_type(ctx, i);
|
||||||
|
if (type == LLAMA_TOKEN_TYPE_UNKNOWN || type == LLAMA_TOKEN_TYPE_CONTROL || type == LLAMA_TOKEN_TYPE_BYTE) {
|
||||||
|
fprintf(stderr, "%s : info: token %d is string %s and bpe returns tokens %s\n",
|
||||||
|
__func__, i, llama_token_to_str(ctx, i).c_str(), unescape_whitespace(ctx, tokens).c_str());
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "%s : error: token %d is string %s but bpe returns tokens %s\n",
|
||||||
|
__func__, i, llama_token_to_str(ctx, i).c_str(), unescape_whitespace(ctx, tokens).c_str());
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
std::wstring_convert<typename std::codecvt_utf8<char16_t>, char16_t> u16converter;
|
||||||
|
for (char16_t ch = 0x0000; ch < 0xffff; ++ch) {
|
||||||
|
std::u16string u16str(1, ch);
|
||||||
|
std::string str = u16converter.to_bytes(u16str);
|
||||||
|
std::vector<llama_token> tokens = llama_tokenize(ctx, escape_whitespace(str).c_str(), false);
|
||||||
|
if (tokens.size() == 1) {
|
||||||
|
fprintf(stderr, "%s : info: %s tokenized to %d \n",
|
||||||
|
__func__, str.c_str(), tokens[0]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::wstring_convert<typename std::codecvt_utf8<char32_t>, char32_t> u32converter;
|
||||||
|
for (char32_t ch = 0x0000; ch < 0x0010ffff; ++ch) {
|
||||||
|
std::u32string u32str(1, ch);
|
||||||
|
std::string str = u32converter.to_bytes(u32str);
|
||||||
|
std::vector<llama_token> tokens = llama_tokenize(ctx, escape_whitespace(str).c_str(), false);
|
||||||
|
if (tokens.size() == 1) {
|
||||||
|
fprintf(stderr, "%s : info: %s tokenized to %d \n", __func__, str.c_str(), tokens[0]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
llama_free_model(model);
|
||||||
|
llama_free(ctx);
|
||||||
|
|
||||||
|
llama_backend_free();
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user