mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-14 14:59:52 +00:00
Merge branch 'ggerganov:master' into master
This commit is contained in:
commit
f9e2dc00ac
@ -14,7 +14,9 @@ RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
|
|||||||
echo "GGML_SYCL_F16 is set" && \
|
echo "GGML_SYCL_F16 is set" && \
|
||||||
export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
|
export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
|
||||||
fi && \
|
fi && \
|
||||||
cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx ${OPT_SYCL_F16} && \
|
echo "Building with static libs" && \
|
||||||
|
cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx \
|
||||||
|
${OPT_SYCL_F16} -DBUILD_SHARED_LIBS=OFF && \
|
||||||
cmake --build build --config Release --target llama-cli
|
cmake --build build --config Release --target llama-cli
|
||||||
|
|
||||||
FROM intel/oneapi-basekit:$ONEAPI_VERSION AS runtime
|
FROM intel/oneapi-basekit:$ONEAPI_VERSION AS runtime
|
||||||
|
@ -14,6 +14,7 @@ RUN if [ "${GGML_SYCL_F16}" = "ON" ]; then \
|
|||||||
echo "GGML_SYCL_F16 is set" && \
|
echo "GGML_SYCL_F16 is set" && \
|
||||||
export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
|
export OPT_SYCL_F16="-DGGML_SYCL_F16=ON"; \
|
||||||
fi && \
|
fi && \
|
||||||
|
echo "Building with dynamic libs" && \
|
||||||
cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \
|
cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DLLAMA_CURL=ON ${OPT_SYCL_F16} && \
|
||||||
cmake --build build --config Release --target llama-server
|
cmake --build build --config Release --target llama-server
|
||||||
|
|
||||||
|
@ -10,7 +10,6 @@
|
|||||||
"llama-embedding"
|
"llama-embedding"
|
||||||
"llama-server"
|
"llama-server"
|
||||||
"llama-quantize"
|
"llama-quantize"
|
||||||
"llama-train-text-from-scratch"
|
|
||||||
];
|
];
|
||||||
mkApp = name: {
|
mkApp = name: {
|
||||||
type = "app";
|
type = "app";
|
||||||
|
@ -13,8 +13,6 @@ elif [[ "$arg1" == '--quantize' || "$arg1" == '-q' ]]; then
|
|||||||
./llama-quantize "$@"
|
./llama-quantize "$@"
|
||||||
elif [[ "$arg1" == '--run' || "$arg1" == '-r' ]]; then
|
elif [[ "$arg1" == '--run' || "$arg1" == '-r' ]]; then
|
||||||
./llama-cli "$@"
|
./llama-cli "$@"
|
||||||
elif [[ "$arg1" == '--finetune' || "$arg1" == '-f' ]]; then
|
|
||||||
./llama-finetune "$@"
|
|
||||||
elif [[ "$arg1" == '--all-in-one' || "$arg1" == '-a' ]]; then
|
elif [[ "$arg1" == '--all-in-one' || "$arg1" == '-a' ]]; then
|
||||||
echo "Converting PTH to GGML..."
|
echo "Converting PTH to GGML..."
|
||||||
for i in `ls $1/$2/ggml-model-f16.bin*`; do
|
for i in `ls $1/$2/ggml-model-f16.bin*`; do
|
||||||
@ -36,8 +34,6 @@ else
|
|||||||
echo " ex: --outtype f16 \"/models/7B/\" "
|
echo " ex: --outtype f16 \"/models/7B/\" "
|
||||||
echo " --quantize (-q): Optimize with quantization process ggml"
|
echo " --quantize (-q): Optimize with quantization process ggml"
|
||||||
echo " ex: \"/models/7B/ggml-model-f16.bin\" \"/models/7B/ggml-model-q4_0.bin\" 2"
|
echo " ex: \"/models/7B/ggml-model-f16.bin\" \"/models/7B/ggml-model-q4_0.bin\" 2"
|
||||||
echo " --finetune (-f): Run finetune command to create a lora finetune of the model"
|
|
||||||
echo " See documentation for finetune for command-line parameters"
|
|
||||||
echo " --all-in-one (-a): Execute --convert & --quantize"
|
echo " --all-in-one (-a): Execute --convert & --quantize"
|
||||||
echo " ex: \"/models/\" 7B"
|
echo " ex: \"/models/\" 7B"
|
||||||
echo " --server (-s): Run a model on the server"
|
echo " --server (-s): Run a model on the server"
|
||||||
|
30
Makefile
30
Makefile
@ -11,7 +11,6 @@ BUILD_TARGETS = \
|
|||||||
llama-embedding \
|
llama-embedding \
|
||||||
llama-eval-callback \
|
llama-eval-callback \
|
||||||
llama-export-lora \
|
llama-export-lora \
|
||||||
llama-finetune \
|
|
||||||
llama-gbnf-validator \
|
llama-gbnf-validator \
|
||||||
llama-gguf \
|
llama-gguf \
|
||||||
llama-gguf-hash \
|
llama-gguf-hash \
|
||||||
@ -37,7 +36,6 @@ BUILD_TARGETS = \
|
|||||||
llama-simple \
|
llama-simple \
|
||||||
llama-speculative \
|
llama-speculative \
|
||||||
llama-tokenize \
|
llama-tokenize \
|
||||||
llama-train-text-from-scratch \
|
|
||||||
llama-vdot \
|
llama-vdot \
|
||||||
llama-cvector-generator \
|
llama-cvector-generator \
|
||||||
tests/test-c.o
|
tests/test-c.o
|
||||||
@ -64,13 +62,13 @@ TEST_TARGETS = \
|
|||||||
tests/test-tokenizer-1-spm
|
tests/test-tokenizer-1-spm
|
||||||
|
|
||||||
# Legacy build targets that were renamed in #7809, but should still be removed when the project is cleaned
|
# Legacy build targets that were renamed in #7809, but should still be removed when the project is cleaned
|
||||||
LEGACY_TARGETS_CLEAN = main quantize quantize-stats perplexity imatrix embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \
|
LEGACY_TARGETS_CLEAN = main quantize quantize-stats perplexity imatrix embedding vdot q8dot convert-llama2c-to-ggml \
|
||||||
simple batched batched-bench save-load-state server gguf gguf-split eval-callback llama-bench libllava.a llava-cli baby-llama \
|
simple batched batched-bench save-load-state server gguf gguf-split eval-callback llama-bench libllava.a llava-cli baby-llama \
|
||||||
retrieval speculative infill tokenize benchmark-matmult parallel finetune export-lora lookahead lookup passkey gritlm
|
retrieval speculative infill tokenize benchmark-matmult parallel export-lora lookahead lookup passkey gritlm
|
||||||
|
|
||||||
# Legacy build targets that were renamed in #7809, but we want to build binaries that for them that output a deprecation warning if people try to use them.
|
# Legacy build targets that were renamed in #7809, but we want to build binaries that for them that output a deprecation warning if people try to use them.
|
||||||
# We don't want to clutter things too much, so we only build replacements for the most commonly used binaries.
|
# We don't want to clutter things too much, so we only build replacements for the most commonly used binaries.
|
||||||
LEGACY_TARGETS_BUILD = main quantize perplexity embedding server finetune
|
LEGACY_TARGETS_BUILD = main quantize perplexity embedding server
|
||||||
|
|
||||||
# Deprecation aliases
|
# Deprecation aliases
|
||||||
ifdef LLAMA_CUBLAS
|
ifdef LLAMA_CUBLAS
|
||||||
@ -1296,11 +1294,6 @@ llama-cvector-generator: examples/cvector-generator/cvector-generator.cpp \
|
|||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
llama-train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp \
|
|
||||||
$(OBJ_ALL)
|
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
|
||||||
|
|
||||||
llama-convert-llama2c-to-ggml: examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp \
|
llama-convert-llama2c-to-ggml: examples/convert-llama2c-to-ggml/convert-llama2c-to-ggml.cpp \
|
||||||
$(OBJ_GGML) $(OBJ_LLAMA)
|
$(OBJ_GGML) $(OBJ_LLAMA)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
@ -1316,11 +1309,6 @@ llama-baby-llama: examples/baby-llama/baby-llama.cpp \
|
|||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
llama-finetune: examples/finetune/finetune.cpp \
|
|
||||||
$(OBJ_ALL)
|
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
|
||||||
|
|
||||||
llama-export-lora: examples/export-lora/export-lora.cpp \
|
llama-export-lora: examples/export-lora/export-lora.cpp \
|
||||||
$(OBJ_ALL)
|
$(OBJ_ALL)
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
||||||
@ -1578,7 +1566,7 @@ llama-q8dot: pocs/vdot/q8dot.cpp ggml/src/ggml.o \
|
|||||||
# Deprecated binaries that we want to keep around long enough for people to migrate to the new filenames, then these can be removed.
|
# Deprecated binaries that we want to keep around long enough for people to migrate to the new filenames, then these can be removed.
|
||||||
#
|
#
|
||||||
# Mark legacy binary targets as .PHONY so that they are always checked.
|
# Mark legacy binary targets as .PHONY so that they are always checked.
|
||||||
.PHONY: main quantize perplexity embedding server finetune
|
.PHONY: main quantize perplexity embedding server
|
||||||
|
|
||||||
# NOTE: We currently will always build the deprecation-warning `main` and `server` binaries to help users migrate.
|
# NOTE: We currently will always build the deprecation-warning `main` and `server` binaries to help users migrate.
|
||||||
# Eventually we will want to remove these target from building all the time.
|
# Eventually we will want to remove these target from building all the time.
|
||||||
@ -1621,13 +1609,3 @@ ifneq (,$(wildcard embedding))
|
|||||||
@echo " Remove the 'embedding' binary to remove this warning."
|
@echo " Remove the 'embedding' binary to remove this warning."
|
||||||
@echo "#########"
|
@echo "#########"
|
||||||
endif
|
endif
|
||||||
|
|
||||||
finetune: examples/deprecation-warning/deprecation-warning.cpp
|
|
||||||
ifneq (,$(wildcard finetune))
|
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
|
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
|
|
||||||
@echo "#########"
|
|
||||||
@echo "WARNING: The 'finetune' binary is deprecated. Please use 'llama-finetune' instead."
|
|
||||||
@echo " Remove the 'finetune' binary to remove this warning."
|
|
||||||
@echo "#########"
|
|
||||||
endif
|
|
||||||
|
@ -138,6 +138,7 @@ Typically finetunes of the base models below are supported as well.
|
|||||||
|
|
||||||
Unless otherwise noted these projects are open-source with permissive licensing:
|
Unless otherwise noted these projects are open-source with permissive licensing:
|
||||||
|
|
||||||
|
- [MindWorkAI/AI-Studio](https://github.com/MindWorkAI/AI-Studio) (FSL-1.1-MIT)
|
||||||
- [iohub/collama](https://github.com/iohub/coLLaMA)
|
- [iohub/collama](https://github.com/iohub/coLLaMA)
|
||||||
- [janhq/jan](https://github.com/janhq/jan) (AGPL)
|
- [janhq/jan](https://github.com/janhq/jan) (AGPL)
|
||||||
- [nat/openplayground](https://github.com/nat/openplayground)
|
- [nat/openplayground](https://github.com/nat/openplayground)
|
||||||
@ -181,6 +182,9 @@ Unless otherwise noted these projects are open-source with permissive licensing:
|
|||||||
|
|
||||||
- [Paddler](https://github.com/distantmagic/paddler) - Stateful load balancer custom-tailored for llama.cpp
|
- [Paddler](https://github.com/distantmagic/paddler) - Stateful load balancer custom-tailored for llama.cpp
|
||||||
|
|
||||||
|
**Games:**
|
||||||
|
- [Lucy's Labyrinth](https://github.com/MorganRO8/Lucys_Labyrinth) - A simple maze game where agents controlled by an AI model will try to trick you.
|
||||||
|
|
||||||
## Demo
|
## Demo
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
|
@ -2723,7 +2723,7 @@ std::string llama_chat_format_single(const struct llama_model * model,
|
|||||||
const llama_chat_msg & new_msg,
|
const llama_chat_msg & new_msg,
|
||||||
bool add_ass) {
|
bool add_ass) {
|
||||||
std::ostringstream ss;
|
std::ostringstream ss;
|
||||||
auto fmt_past_msg = llama_chat_apply_template(model, tmpl, past_msg, false);
|
auto fmt_past_msg = past_msg.empty() ? "" : llama_chat_apply_template(model, tmpl, past_msg, false);
|
||||||
std::vector<llama_chat_msg> chat_new(past_msg);
|
std::vector<llama_chat_msg> chat_new(past_msg);
|
||||||
// if the past_msg ends with a newline, we must preserve it in the formatted version
|
// if the past_msg ends with a newline, we must preserve it in the formatted version
|
||||||
if (add_ass && !fmt_past_msg.empty() && fmt_past_msg.back() == '\n') {
|
if (add_ass && !fmt_past_msg.empty() && fmt_past_msg.back() == '\n') {
|
||||||
|
@ -2084,6 +2084,7 @@ class Phi3MiniModel(Model):
|
|||||||
self.gguf_writer.add_rope_dimension_count(rope_dims)
|
self.gguf_writer.add_rope_dimension_count(rope_dims)
|
||||||
self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"]))
|
self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"]))
|
||||||
self.gguf_writer.add_file_type(self.ftype)
|
self.gguf_writer.add_file_type(self.ftype)
|
||||||
|
self.gguf_writer.add_sliding_window(self.find_hparam(["sliding_window"]))
|
||||||
|
|
||||||
# write rope scaling for long context (128k) model
|
# write rope scaling for long context (128k) model
|
||||||
rope_scaling = self.find_hparam(['rope_scaling'], True)
|
rope_scaling = self.find_hparam(['rope_scaling'], True)
|
||||||
|
@ -293,31 +293,26 @@ Similar to the native `sycl-ls`, available SYCL devices can be queried as follow
|
|||||||
```sh
|
```sh
|
||||||
./build/bin/llama-ls-sycl-device
|
./build/bin/llama-ls-sycl-device
|
||||||
```
|
```
|
||||||
A example of such log in a system with 1 *intel CPU* and 1 *intel GPU* can look like the following:
|
This command will only display the selected backend that is supported by SYCL. The default backend is level_zero. For example, in a system with 2 *intel GPU* it would look like the following:
|
||||||
```
|
```
|
||||||
found 6 SYCL devices:
|
found 2 SYCL devices:
|
||||||
|
|
||||||
| | | |Compute |Max compute|Max work|Max sub| |
|
| | | |Compute |Max compute|Max work|Max sub| |
|
||||||
|ID| Device Type| Name|capability|units |group |group |Global mem size|
|
|ID| Device Type| Name|capability|units |group |group |Global mem size|
|
||||||
|--|------------------|---------------------------------------------|----------|-----------|--------|-------|---------------|
|
|--|------------------|---------------------------------------------|----------|-----------|--------|-------|---------------|
|
||||||
| 0|[level_zero:gpu:0]| Intel(R) Arc(TM) A770 Graphics| 1.3| 512| 1024| 32| 16225243136|
|
| 0|[level_zero:gpu:0]| Intel(R) Arc(TM) A770 Graphics| 1.3| 512| 1024| 32| 16225243136|
|
||||||
| 1|[level_zero:gpu:1]| Intel(R) UHD Graphics 770| 1.3| 32| 512| 32| 53651849216|
|
| 1|[level_zero:gpu:1]| Intel(R) UHD Graphics 770| 1.3| 32| 512| 32| 53651849216|
|
||||||
| 2| [opencl:gpu:0]| Intel(R) Arc(TM) A770 Graphics| 3.0| 512| 1024| 32| 16225243136|
|
|
||||||
| 3| [opencl:gpu:1]| Intel(R) UHD Graphics 770| 3.0| 32| 512| 32| 53651849216|
|
|
||||||
| 4| [opencl:cpu:0]| 13th Gen Intel(R) Core(TM) i7-13700K| 3.0| 24| 8192| 64| 67064815616|
|
|
||||||
| 5| [opencl:acc:0]| Intel(R) FPGA Emulation Device| 1.2| 24|67108864| 64| 67064815616|
|
|
||||||
```
|
```
|
||||||
|
|
||||||
| Attribute | Note |
|
|
||||||
|------------------------|-------------------------------------------------------------|
|
|
||||||
| compute capability 1.3 | Level-zero driver/runtime, recommended |
|
|
||||||
| compute capability 3.0 | OpenCL driver/runtime, slower than level-zero in most cases |
|
|
||||||
|
|
||||||
4. Launch inference
|
4. Launch inference
|
||||||
|
|
||||||
There are two device selection modes:
|
There are two device selection modes:
|
||||||
|
|
||||||
- Single device: Use one device target specified by the user.
|
- Single device: Use one device target specified by the user.
|
||||||
- Multiple devices: Automatically select the devices with the same largest Max compute-units.
|
- Multiple devices: Automatically choose the devices with the same backend.
|
||||||
|
|
||||||
|
In two device selection modes, the default SYCL backend is level_zero, you can choose other backend supported by SYCL by setting environment variable ONEAPI_DEVICE_SELECTOR.
|
||||||
|
|
||||||
| Device selection | Parameter |
|
| Device selection | Parameter |
|
||||||
|------------------|----------------------------------------|
|
|------------------|----------------------------------------|
|
||||||
@ -474,33 +469,26 @@ Similar to the native `sycl-ls`, available SYCL devices can be queried as follow
|
|||||||
build\bin\ls-sycl-device.exe
|
build\bin\ls-sycl-device.exe
|
||||||
```
|
```
|
||||||
|
|
||||||
The output of this command in a system with 1 *intel CPU* and 1 *intel GPU* would look like the following:
|
This command will only display the selected backend that is supported by SYCL. The default backend is level_zero. For example, in a system with 2 *intel GPU* it would look like the following:
|
||||||
```
|
```
|
||||||
found 6 SYCL devices:
|
found 2 SYCL devices:
|
||||||
| | | |Compute |Max compute|Max work|Max sub| |
|
| | | |Compute |Max compute|Max work|Max sub| |
|
||||||
|ID| Device Type| Name|capability|units |group |group |Global mem size|
|
|ID| Device Type| Name|capability|units |group |group |Global mem size|
|
||||||
|--|------------------|---------------------------------------------|----------|-----------|--------|-------|---------------|
|
|--|------------------|---------------------------------------------|----------|-----------|--------|-------|---------------|
|
||||||
| 0|[level_zero:gpu:0]| Intel(R) Arc(TM) A770 Graphics| 1.3| 512| 1024| 32| 16225243136|
|
| 0|[level_zero:gpu:0]| Intel(R) Arc(TM) A770 Graphics| 1.3| 512| 1024| 32| 16225243136|
|
||||||
| 1|[level_zero:gpu:1]| Intel(R) UHD Graphics 770| 1.3| 32| 512| 32| 53651849216|
|
| 1|[level_zero:gpu:1]| Intel(R) UHD Graphics 770| 1.3| 32| 512| 32| 53651849216|
|
||||||
| 2| [opencl:gpu:0]| Intel(R) Arc(TM) A770 Graphics| 3.0| 512| 1024| 32| 16225243136|
|
|
||||||
| 3| [opencl:gpu:1]| Intel(R) UHD Graphics 770| 3.0| 32| 512| 32| 53651849216|
|
|
||||||
| 4| [opencl:cpu:0]| 13th Gen Intel(R) Core(TM) i7-13700K| 3.0| 24| 8192| 64| 67064815616|
|
|
||||||
| 5| [opencl:acc:0]| Intel(R) FPGA Emulation Device| 1.2| 24|67108864| 64| 67064815616|
|
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
| Attribute | Note |
|
|
||||||
|------------------------|-----------------------------------------------------------|
|
|
||||||
| compute capability 1.3 | Level-zero running time, recommended |
|
|
||||||
| compute capability 3.0 | OpenCL running time, slower than level-zero in most cases |
|
|
||||||
|
|
||||||
|
|
||||||
4. Launch inference
|
4. Launch inference
|
||||||
|
|
||||||
There are two device selection modes:
|
There are two device selection modes:
|
||||||
|
|
||||||
- Single device: Use one device assigned by user.
|
- Single device: Use one device assigned by user. Default device id is 0.
|
||||||
- Multiple devices: Automatically choose the devices with the same biggest Max compute units.
|
- Multiple devices: Automatically choose the devices with the same backend.
|
||||||
|
|
||||||
|
In two device selection modes, the default SYCL backend is level_zero, you can choose other backend supported by SYCL by setting environment variable ONEAPI_DEVICE_SELECTOR.
|
||||||
|
|
||||||
| Device selection | Parameter |
|
| Device selection | Parameter |
|
||||||
|------------------|----------------------------------------|
|
|------------------|----------------------------------------|
|
||||||
|
@ -21,7 +21,6 @@ else()
|
|||||||
add_subdirectory(embedding)
|
add_subdirectory(embedding)
|
||||||
add_subdirectory(eval-callback)
|
add_subdirectory(eval-callback)
|
||||||
add_subdirectory(export-lora)
|
add_subdirectory(export-lora)
|
||||||
add_subdirectory(finetune)
|
|
||||||
add_subdirectory(gbnf-validator)
|
add_subdirectory(gbnf-validator)
|
||||||
add_subdirectory(gguf-hash)
|
add_subdirectory(gguf-hash)
|
||||||
add_subdirectory(gguf-split)
|
add_subdirectory(gguf-split)
|
||||||
@ -53,5 +52,4 @@ else()
|
|||||||
add_subdirectory(simple)
|
add_subdirectory(simple)
|
||||||
add_subdirectory(speculative)
|
add_subdirectory(speculative)
|
||||||
add_subdirectory(tokenize)
|
add_subdirectory(tokenize)
|
||||||
add_subdirectory(train-text-from-scratch)
|
|
||||||
endif()
|
endif()
|
||||||
|
@ -13,7 +13,6 @@ Please update all scripts and workflows to use the new binary names.
|
|||||||
| server | llama-server |
|
| server | llama-server |
|
||||||
| llama-bench | llama-bench |
|
| llama-bench | llama-bench |
|
||||||
| embedding | llama-embedding |
|
| embedding | llama-embedding |
|
||||||
| finetune | llama-finetune |
|
|
||||||
| quantize | llama-quantize |
|
| quantize | llama-quantize |
|
||||||
| tokenize | llama-tokenize |
|
| tokenize | llama-tokenize |
|
||||||
| export-lora | llama-export-lora |
|
| export-lora | llama-export-lora |
|
||||||
@ -45,7 +44,6 @@ Please update all scripts and workflows to use the new binary names.
|
|||||||
| save-load-state | llama-save-load-state |
|
| save-load-state | llama-save-load-state |
|
||||||
| simple | llama-simple |
|
| simple | llama-simple |
|
||||||
| speculative | llama-speculative |
|
| speculative | llama-speculative |
|
||||||
| train-text-from-scratch | llama-train-text-from-scratch |
|
|
||||||
| vdot | llama-vdot |
|
| vdot | llama-vdot |
|
||||||
| tests/test-c.o | tests/test-c.o |
|
| tests/test-c.o | tests/test-c.o |
|
||||||
|
|
||||||
|
@ -19,7 +19,15 @@ For example:
|
|||||||
./bin/llama-export-lora \
|
./bin/llama-export-lora \
|
||||||
-m open-llama-3b-v2-q8_0.gguf \
|
-m open-llama-3b-v2-q8_0.gguf \
|
||||||
-o open-llama-3b-v2-q8_0-english2tokipona-chat.gguf \
|
-o open-llama-3b-v2-q8_0-english2tokipona-chat.gguf \
|
||||||
--lora lora-open-llama-3b-v2-q8_0-english2tokipona-chat-LATEST.bin
|
--lora lora-open-llama-3b-v2-q8_0-english2tokipona-chat-LATEST.gguf
|
||||||
```
|
```
|
||||||
|
|
||||||
Multiple LORA adapters can be applied by passing multiple `--lora FNAME` or `--lora-scaled FNAME S` command line parameters.
|
Multiple LORA adapters can be applied by passing multiple `--lora FNAME` or `--lora-scaled FNAME S` command line parameters:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./bin/llama-export-lora \
|
||||||
|
-m your_base_model.gguf \
|
||||||
|
-o your_merged_model.gguf \
|
||||||
|
--lora-scaled lora_task_A.gguf 0.5 \
|
||||||
|
--lora-scaled lora_task_B.gguf 0.5
|
||||||
|
```
|
||||||
|
@ -1,5 +0,0 @@
|
|||||||
set(TARGET llama-finetune)
|
|
||||||
add_executable(${TARGET} finetune.cpp)
|
|
||||||
install(TARGETS ${TARGET} RUNTIME)
|
|
||||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
|
||||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
|
@ -1,90 +0,0 @@
|
|||||||
# finetune
|
|
||||||
|
|
||||||
Basic usage instructions:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# get training data
|
|
||||||
wget https://raw.githubusercontent.com/brunoklein99/deep-learning-notes/master/shakespeare.txt
|
|
||||||
|
|
||||||
# finetune LORA adapter
|
|
||||||
./bin/llama-finetune \
|
|
||||||
--model-base open-llama-3b-v2-q8_0.gguf \
|
|
||||||
--checkpoint-in chk-lora-open-llama-3b-v2-q8_0-shakespeare-LATEST.gguf \
|
|
||||||
--checkpoint-out chk-lora-open-llama-3b-v2-q8_0-shakespeare-ITERATION.gguf \
|
|
||||||
--lora-out lora-open-llama-3b-v2-q8_0-shakespeare-ITERATION.bin \
|
|
||||||
--train-data "shakespeare.txt" \
|
|
||||||
--save-every 10 \
|
|
||||||
--threads 6 --adam-iter 30 --batch 4 --ctx 64 \
|
|
||||||
--use-checkpointing
|
|
||||||
|
|
||||||
# predict
|
|
||||||
./bin/llama-cli -m open-llama-3b-v2-q8_0.gguf --lora lora-open-llama-3b-v2-q8_0-shakespeare-LATEST.bin
|
|
||||||
```
|
|
||||||
|
|
||||||
**Only llama based models are supported!** The output files will be saved every N iterations (config with `--save-every N`).
|
|
||||||
The pattern 'ITERATION' in the output filenames will be replaced with the iteration number and with 'LATEST' for the latest output.
|
|
||||||
So in above example after 10 iterations these files will be written:
|
|
||||||
- chk-lora-open-llama-3b-v2-q8_0-shakespeare-10.gguf
|
|
||||||
- chk-lora-open-llama-3b-v2-q8_0-shakespeare-LATEST.gguf
|
|
||||||
- lora-open-llama-3b-v2-q8_0-shakespeare-10.bin
|
|
||||||
- lora-open-llama-3b-v2-q8_0-shakespeare-LATEST.bin
|
|
||||||
|
|
||||||
After 10 more iterations:
|
|
||||||
- chk-lora-open-llama-3b-v2-q8_0-shakespeare-20.gguf
|
|
||||||
- chk-lora-open-llama-3b-v2-q8_0-shakespeare-LATEST.gguf
|
|
||||||
- lora-open-llama-3b-v2-q8_0-shakespeare-20.bin
|
|
||||||
- lora-open-llama-3b-v2-q8_0-shakespeare-LATEST.bin
|
|
||||||
|
|
||||||
Checkpoint files (`--checkpoint-in FN`, `--checkpoint-out FN`) store the training process. When the input checkpoint file does not exist, it will begin finetuning a new randomly initialized adapter.
|
|
||||||
|
|
||||||
llama.cpp compatible LORA adapters will be saved with filename specified by `--lora-out FN`.
|
|
||||||
These LORA adapters can then be used by `llama-cli` together with the base model, like in the 'predict' example command above.
|
|
||||||
|
|
||||||
In `llama-cli` you can also load multiple LORA adapters, which will then be mixed together.
|
|
||||||
|
|
||||||
For example if you have two LORA adapters `lora-open-llama-3b-v2-q8_0-shakespeare-LATEST.bin` and `lora-open-llama-3b-v2-q8_0-bible-LATEST.bin`, you can mix them together like this:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./bin/llama-cli -m open-llama-3b-v2-q8_0.gguf \
|
|
||||||
--lora lora-open-llama-3b-v2-q8_0-shakespeare-LATEST.bin \
|
|
||||||
--lora lora-open-llama-3b-v2-q8_0-bible-LATEST.bin
|
|
||||||
```
|
|
||||||
|
|
||||||
You can change how strong each LORA adapter is applied to the base model by using `--lora-scaled FN SCALE` instead of `--lora FN`.
|
|
||||||
|
|
||||||
For example to apply 40% of the 'shakespeare' LORA adapter, 80% of the 'bible' LORA adapter and 100% of yet another one:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
./bin/llama-cli -m open-llama-3b-v2-q8_0.gguf \
|
|
||||||
--lora-scaled lora-open-llama-3b-v2-q8_0-shakespeare-LATEST.bin 0.4 \
|
|
||||||
--lora-scaled lora-open-llama-3b-v2-q8_0-bible-LATEST.bin 0.8 \
|
|
||||||
--lora lora-open-llama-3b-v2-q8_0-yet-another-one-LATEST.bin
|
|
||||||
```
|
|
||||||
|
|
||||||
The scale numbers don't need to add up to one, and you can also use numbers greater than 1 to further increase the influence of an adapter. But making the values too big will sometimes result in worse output. Play around to find good values.
|
|
||||||
|
|
||||||
Gradient checkpointing reduces the memory requirements by ~50% but increases the runtime.
|
|
||||||
If you have enough RAM, you can make finetuning a bit faster by disabling checkpointing with `--no-checkpointing`.
|
|
||||||
|
|
||||||
The default LORA rank can be specified with `--lora-r N`.
|
|
||||||
The LORA rank can be configured for each model tensor type separately with these command line options:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
--lora-r N LORA r: default rank. Also specifies resulting scaling together with lora-alpha. (default 4)
|
|
||||||
--rank-att-norm N LORA rank for attention norm tensor (default 1)
|
|
||||||
--rank-ffn-norm N LORA rank for feed-forward norm tensor (default 1)
|
|
||||||
--rank-out-norm N LORA rank for output norm tensor (default 1)
|
|
||||||
--rank-tok-embd N LORA rank for token embeddings tensor (default 4)
|
|
||||||
--rank-out N LORA rank for output tensor (default 4)
|
|
||||||
--rank-wq N LORA rank for wq tensor (default 4)
|
|
||||||
--rank-wk N LORA rank for wk tensor (default 4)
|
|
||||||
--rank-wv N LORA rank for wv tensor (default 4)
|
|
||||||
--rank-wo N LORA rank for wo tensor (default 4)
|
|
||||||
--rank-ffn_gate N LORA rank for ffn_gate tensor (default 4)
|
|
||||||
--rank-ffn_down N LORA rank for ffn_down tensor (default 4)
|
|
||||||
--rank-ffn_up N LORA rank for ffn_up tensor (default 4)
|
|
||||||
```
|
|
||||||
|
|
||||||
The LORA rank of 'norm' tensors should always be 1.
|
|
||||||
|
|
||||||
To see all available options use `llama-finetune --help`.
|
|
@ -1,487 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
# finetune checkpoint --> gguf conversion
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import gguf
|
|
||||||
import struct
|
|
||||||
import numpy as np
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
# gguf constants
|
|
||||||
LLM_KV_OPTIMIZER_TYPE = "optimizer.type"
|
|
||||||
LLM_KV_OPTIMIZER_TYPE_ADAM = "adam"
|
|
||||||
LLM_KV_OPTIMIZER_TYPE_LBFGS = "lbfgs"
|
|
||||||
LLM_KV_OPTIMIZER_FILE_VERSION = "optimizer.file_version"
|
|
||||||
LLM_KV_OPTIMIZER_CONVERGENCE_PAST_COUNT = "optimizer.convergence_past_count"
|
|
||||||
LLM_KV_OPTIMIZER_PARAMETER_COUNT = "optimizer.parameter_count"
|
|
||||||
LLM_KV_OPTIMIZER_ITERATION_COUNT = "optimizer.iteration_count"
|
|
||||||
LLM_KV_OPTIMIZER_JUST_INITIALIZED = "optimizer.just_initialized"
|
|
||||||
LLM_KV_OPTIMIZER_ADAM_BEST_LOSS = "optimizer.adam.best_loss"
|
|
||||||
LLM_KV_OPTIMIZER_ADAM_PREVIOUS_LOSS = "optimizer.adam.previous_loss"
|
|
||||||
LLM_KV_OPTIMIZER_ADAM_NO_IMPROVEMENT_COUNT = "optimizer.adam.no_improvement_count"
|
|
||||||
LLM_KV_OPTIMIZER_LBFGS_APPROX_HESSIAN_COUNT = "optimizer.lbfgs.approx_hessian_count"
|
|
||||||
LLM_KV_OPTIMIZER_LBFGS_BEST_LOSS = "optimizer.lbfgs.best_loss"
|
|
||||||
LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_STEP = "optimizer.lbfgs.line_search_step"
|
|
||||||
LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_J = "optimizer.lbfgs.line_search_j"
|
|
||||||
LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_K = "optimizer.lbfgs.line_search_k"
|
|
||||||
LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_END = "optimizer.lbfgs.line_search_end"
|
|
||||||
LLM_KV_OPTIMIZER_LBFGS_NO_IMPROVEMENT_COUNT = "optimizer.lbfgs.no_improvement_count"
|
|
||||||
|
|
||||||
LLM_TENSOR_OPTIMIZER_ADAM_FIRST_MOMENTS = "optimizer.adam.first_moments"
|
|
||||||
LLM_TENSOR_OPTIMIZER_ADAM_SECOND_MOMENTS = "optimizer.adam.second_moments"
|
|
||||||
LLM_TENSOR_OPTIMIZER_ADAM_PAST_LOSS_VALUES = "optimizer.adam.past_loss_values"
|
|
||||||
|
|
||||||
LLM_TENSOR_OPTIMIZER_LBFGS_CURRENT_PARAMETERS = "optimizer.lbfgs.current_parameters"
|
|
||||||
LLM_TENSOR_OPTIMIZER_LBFGS_PREVIOUS_PARAMETERS = "optimizer.lbfgs.previous_parameters"
|
|
||||||
LLM_TENSOR_OPTIMIZER_LBFGS_CURRENT_GRADIENTS = "optimizer.lbfgs.current_gradients"
|
|
||||||
LLM_TENSOR_OPTIMIZER_LBFGS_PREVIOUS_GRADIENTS = "optimizer.lbfgs.previous_gradients"
|
|
||||||
LLM_TENSOR_OPTIMIZER_LBFGS_SEARCH_DIRECTION = "optimizer.lbfgs.search_direction"
|
|
||||||
LLM_TENSOR_OPTIMIZER_LBFGS_PAST_LOSS_VALUES = "optimizer.lbfgs.past_loss_values"
|
|
||||||
LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_ALPHA = "optimizer.lbfgs.memory_alpha"
|
|
||||||
LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_YS = "optimizer.lbfgs.memory_ys"
|
|
||||||
LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_S = "optimizer.lbfgs.memory_s"
|
|
||||||
LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_Y = "optimizer.lbfgs.memory_y"
|
|
||||||
|
|
||||||
LLM_KV_TRAINING_TYPE_TRAIN_MODEL = "train_model"
|
|
||||||
LLM_KV_TRAINING_TYPE_FINETUNE_LORA = "finetune_lora"
|
|
||||||
LLM_KV_TRAINING_TYPE = "training.type"
|
|
||||||
LLM_KV_TRAINING_FILE_VERSION = "training.file_version"
|
|
||||||
LLM_KV_TRAINING_ITERATION_COUNT = "training.iteration_count"
|
|
||||||
LLM_KV_TRAINING_SAMPLE_COUNT = "training.sample_count"
|
|
||||||
LLM_KV_TRAINING_TOKEN_COUNT = "training.token_count"
|
|
||||||
|
|
||||||
LLM_KV_TRAINING_LORA_RANK_TOKEN_EMBD = "training.lora.rank.token_embd"
|
|
||||||
LLM_KV_TRAINING_LORA_RANK_OUTPUT_NORM = "training.lora.rank.output_norm"
|
|
||||||
LLM_KV_TRAINING_LORA_RANK_OUTPUT = "training.lora.rank.output"
|
|
||||||
LLM_KV_TRAINING_LORA_RANK_ATTN_NORM = "training.lora.rank.attn_norm"
|
|
||||||
LLM_KV_TRAINING_LORA_RANK_ATTN_Q = "training.lora.rank.attn_q"
|
|
||||||
LLM_KV_TRAINING_LORA_RANK_ATTN_K = "training.lora.rank.attn_k"
|
|
||||||
LLM_KV_TRAINING_LORA_RANK_ATTN_V = "training.lora.rank.attn_v"
|
|
||||||
LLM_KV_TRAINING_LORA_RANK_ATTN_OUT = "training.lora.rank.attn_output"
|
|
||||||
LLM_KV_TRAINING_LORA_RANK_FFN_NORM = "training.lora.rank.ffn_norm"
|
|
||||||
LLM_KV_TRAINING_LORA_RANK_FFN_GATE = "training.lora.rank.ffn_gate"
|
|
||||||
LLM_KV_TRAINING_LORA_RANK_FFN_DOWN = "training.lora.rank.ffn_down"
|
|
||||||
LLM_KV_TRAINING_LORA_RANK_FFN_UP = "training.lora.rank.ffn_up"
|
|
||||||
|
|
||||||
class Tensor:
|
|
||||||
def __init__(self, dtype='f', ne=None):
|
|
||||||
if ne is None:
|
|
||||||
ne = []
|
|
||||||
self.dtype = dtype
|
|
||||||
self.ne = ne
|
|
||||||
self.nbytes = 0
|
|
||||||
if self.dtype == 'f':
|
|
||||||
if len(self.ne) == 0:
|
|
||||||
self.nbytes = 0
|
|
||||||
else:
|
|
||||||
self.nbytes = int(np.prod(self.ne)) * 4
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Unhandled data type '{self.dtype}'")
|
|
||||||
|
|
||||||
def load(self, data, offset):
|
|
||||||
nd = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
namelen = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
dtype = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
|
|
||||||
assert(nd == len(self.ne))
|
|
||||||
ne = []
|
|
||||||
for d in range(nd):
|
|
||||||
n = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
ne.append(n)
|
|
||||||
|
|
||||||
if tuple(ne) != tuple(self.ne):
|
|
||||||
raise ValueError(f"Tensor.load: Expected number of elements {str(self.ne)} does not match what is read from file {str(ne)}")
|
|
||||||
|
|
||||||
if self.dtype == 'f':
|
|
||||||
assert(dtype == 0)
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Unhandled data type '{self.dtype}'")
|
|
||||||
|
|
||||||
self.name = bytes(data[offset:offset+namelen]); offset += namelen
|
|
||||||
# 32-byte alignment
|
|
||||||
offset += (0 - offset) & 31
|
|
||||||
self.data = data[offset:offset+self.nbytes]
|
|
||||||
offset += self.nbytes
|
|
||||||
return offset
|
|
||||||
|
|
||||||
def max_storage_size(self):
|
|
||||||
result = 0
|
|
||||||
result += 4 # nd
|
|
||||||
result += 4 # namelen
|
|
||||||
result += 4 # dtype
|
|
||||||
result += len(self.ne)*8 # ne
|
|
||||||
result += 48 # name (maximum as of commit 3b5515bbe0e2224425986ba24f1f5d84aa38dce9)
|
|
||||||
result += 31 # 32-byte alignment
|
|
||||||
result += self.nbytes
|
|
||||||
return result
|
|
||||||
|
|
||||||
def save_gguf(self, gguf_writer, name):
|
|
||||||
gguf_writer.add_tensor(
|
|
||||||
name=name,
|
|
||||||
tensor=self.data,
|
|
||||||
raw_shape=np.array(list(reversed(self.ne))),
|
|
||||||
raw_dtype=gguf.GGMLQuantizationType.F32)
|
|
||||||
|
|
||||||
class OptimizationContext:
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def load(self, data, offset):
|
|
||||||
self.version = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]
|
|
||||||
offset += 4
|
|
||||||
|
|
||||||
if self.version != 1:
|
|
||||||
raise ValueError('Invalid version of optimization context in checkpoint file')
|
|
||||||
|
|
||||||
self.past = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_m = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.nx = struct.unpack('N', bytes(data[offset:offset + 8]))[0]; offset += 8
|
|
||||||
self.iter = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.just_initialized = bool(struct.unpack('<i', bytes(data[offset:offset + 4]))[0]); offset += 4
|
|
||||||
|
|
||||||
self.adam_m = Tensor('f', [self.nx])
|
|
||||||
self.adam_v = Tensor('f', [self.nx])
|
|
||||||
self.adam_pf = Tensor('f', [self.past] if self.past > 0 else [])
|
|
||||||
|
|
||||||
self.lbfgs_x = Tensor('f', [self.nx])
|
|
||||||
self.lbfgs_xp = Tensor('f', [self.nx])
|
|
||||||
self.lbfgs_g = Tensor('f', [self.nx])
|
|
||||||
self.lbfgs_gp = Tensor('f', [self.nx])
|
|
||||||
self.lbfgs_d = Tensor('f', [self.nx])
|
|
||||||
self.lbfgs_pf = Tensor('f', [self.past] if self.past > 0 else [])
|
|
||||||
self.lbfgs_lmal = Tensor('f', [self.lbfgs_m])
|
|
||||||
self.lbfgs_lmys = Tensor('f', [self.lbfgs_m])
|
|
||||||
self.lbfgs_lms = Tensor('f', [self.nx, self.lbfgs_m])
|
|
||||||
self.lbfgs_lmy = Tensor('f', [self.nx, self.lbfgs_m])
|
|
||||||
|
|
||||||
# forgot to save type in version 1:
|
|
||||||
# guess self.type from number of remaining bytes
|
|
||||||
size_type_0 = 12 + sum([t.max_storage_size() for t in
|
|
||||||
[self.adam_m, self.adam_v]
|
|
||||||
+([self.adam_pf] if (self.past > 0) else [])])
|
|
||||||
size_type_1 = 24 + sum([t.max_storage_size() for t in
|
|
||||||
[self.lbfgs_x, self.lbfgs_xp, self.lbfgs_g,
|
|
||||||
self.lbfgs_gp, self.lbfgs_d, self.lbfgs_pf,
|
|
||||||
self.lbfgs_lmal, self.lbfgs_lmys,
|
|
||||||
self.lbfgs_lms, self.lbfgs_lmy]
|
|
||||||
+([self.lbfgs_pf] if (self.past > 0) else [])])
|
|
||||||
# due to alignment padding the size might not by exact
|
|
||||||
# but the difference in size for both types is significant,
|
|
||||||
# so we can just use whichever is closest
|
|
||||||
remaining = len(data) - offset
|
|
||||||
if abs(remaining - size_type_0) < abs(remaining - size_type_1):
|
|
||||||
self.type = 0
|
|
||||||
else:
|
|
||||||
self.type = 1
|
|
||||||
|
|
||||||
if self.type == 0:
|
|
||||||
offset = self.adam_m.load(data, offset)
|
|
||||||
offset = self.adam_v.load(data, offset)
|
|
||||||
offset = self.adam_pf.load(data,offset)
|
|
||||||
|
|
||||||
self.adam_fx_best = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.adam_fx_prev = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.adam_n_no_improvement = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
|
|
||||||
elif self.type == 1:
|
|
||||||
offset = self.lbfgs_x.load(data, offset)
|
|
||||||
offset = self.lbfgs_xp.load(data, offset)
|
|
||||||
offset = self.lbfgs_g.load(data, offset)
|
|
||||||
offset = self.lbfgs_gp.load(data, offset)
|
|
||||||
offset = self.lbfgs_d.load(data, offset)
|
|
||||||
offset = self.lbfgs_pf.load(data, offset)
|
|
||||||
offset = self.lbfgs_lmal.load(data, offset)
|
|
||||||
offset = self.lbfgs_lmys.load(data, offset)
|
|
||||||
offset = self.lbfgs_lms.load(data, offset)
|
|
||||||
offset = self.lbfgs_lmy.load(data, offset)
|
|
||||||
|
|
||||||
self.lbfgs_fx_best = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_step = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_j = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_k = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_end = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_n_no_improvement = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Invalid optimizer type '{self.type}'")
|
|
||||||
|
|
||||||
return offset
|
|
||||||
|
|
||||||
def save_gguf(self, gguf_writer):
|
|
||||||
gguf_writer.add_uint32(LLM_KV_OPTIMIZER_FILE_VERSION, 0)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_OPTIMIZER_CONVERGENCE_PAST_COUNT, self.past)
|
|
||||||
gguf_writer.add_uint64(LLM_KV_OPTIMIZER_PARAMETER_COUNT, self.nx)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_OPTIMIZER_ITERATION_COUNT, self.iter)
|
|
||||||
gguf_writer.add_bool(LLM_KV_OPTIMIZER_JUST_INITIALIZED, self.just_initialized)
|
|
||||||
|
|
||||||
if self.type == 0:
|
|
||||||
gguf_writer.add_string(LLM_KV_OPTIMIZER_TYPE, LLM_KV_OPTIMIZER_TYPE_ADAM)
|
|
||||||
gguf_writer.add_float32(LLM_KV_OPTIMIZER_ADAM_BEST_LOSS, self.adam_fx_best)
|
|
||||||
gguf_writer.add_float32(LLM_KV_OPTIMIZER_ADAM_PREVIOUS_LOSS, self.adam_fx_prev)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_OPTIMIZER_ADAM_NO_IMPROVEMENT_COUNT, self.adam_n_no_improvement)
|
|
||||||
|
|
||||||
self.adam_m.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_ADAM_FIRST_MOMENTS)
|
|
||||||
self.adam_v.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_ADAM_SECOND_MOMENTS)
|
|
||||||
if self.past > 0:
|
|
||||||
self.adam_pf.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_ADAM_PAST_LOSS_VALUES)
|
|
||||||
|
|
||||||
elif self.type == 1:
|
|
||||||
gguf_writer.add_string(LLM_KV_OPTIMIZER_TYPE, LLM_KV_OPTIMIZER_TYPE_LBFGS)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_OPTIMIZER_LBFGS_APPROX_HESSIAN_COUNT, self.lbfgs_m)
|
|
||||||
gguf_writer.add_float32(LLM_KV_OPTIMIZER_LBFGS_BEST_LOSS, self.lbfgs_fx_best)
|
|
||||||
gguf_writer.add_float32(LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_STEP, self.lbfgs_step)
|
|
||||||
gguf_writer.add_int32(LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_J, self.lbfgs_j)
|
|
||||||
gguf_writer.add_int32(LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_K, self.lbfgs_k)
|
|
||||||
gguf_writer.add_int32(LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_END, self.lbfgs_end)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_OPTIMIZER_LBFGS_NO_IMPROVEMENT_COUNT, self.lbfgs_n_no_improvement)
|
|
||||||
|
|
||||||
self.lbfgs_x.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_CURRENT_PARAMETERS)
|
|
||||||
self.lbfgs_xp.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_PREVIOUS_PARAMETERS)
|
|
||||||
self.lbfgs_g.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_CURRENT_GRADIENTS)
|
|
||||||
self.lbfgs_gp.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_PREVIOUS_GRADIENTS)
|
|
||||||
self.lbfgs_d.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_SEARCH_DIRECTION)
|
|
||||||
if self.past > 0:
|
|
||||||
self.lbfgs_pf.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_PAST_LOSS_VALUES)
|
|
||||||
self.lbfgs_lmal.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_ALPHA)
|
|
||||||
self.lbfgs_lmys.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_YS)
|
|
||||||
self.lbfgs_lms.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_S)
|
|
||||||
self.lbfgs_lmy.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_Y)
|
|
||||||
else:
|
|
||||||
raise ValueError('Unknown optimizer type')
|
|
||||||
|
|
||||||
class LoraParams:
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def load(self, data, offset):
|
|
||||||
self.n_rank_attention_norm = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_rank_wq = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_rank_wk = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_rank_wv = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_rank_wo = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_rank_ffn_norm = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_rank_w1 = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_rank_w2 = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_rank_w3 = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_rank_tok_embeddings = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_rank_norm = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_rank_output = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
return offset
|
|
||||||
|
|
||||||
def save_gguf(self, gguf_writer):
|
|
||||||
gguf_writer.add_uint32(LLM_KV_TRAINING_LORA_RANK_TOKEN_EMBD, self.n_rank_tok_embeddings)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_TRAINING_LORA_RANK_OUTPUT_NORM, self.n_rank_norm)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_TRAINING_LORA_RANK_OUTPUT, self.n_rank_output)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_TRAINING_LORA_RANK_ATTN_NORM, self.n_rank_attention_norm)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_TRAINING_LORA_RANK_ATTN_Q, self.n_rank_wq)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_TRAINING_LORA_RANK_ATTN_K, self.n_rank_wk)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_TRAINING_LORA_RANK_ATTN_V, self.n_rank_wv)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_TRAINING_LORA_RANK_ATTN_OUT, self.n_rank_wo)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_TRAINING_LORA_RANK_FFN_NORM, self.n_rank_ffn_norm)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_TRAINING_LORA_RANK_FFN_GATE, self.n_rank_w1)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_TRAINING_LORA_RANK_FFN_DOWN, self.n_rank_w2)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_TRAINING_LORA_RANK_FFN_UP, self.n_rank_w3)
|
|
||||||
|
|
||||||
class ModelParams:
|
|
||||||
def __init__(self, n_ff = None):
|
|
||||||
self.n_ff = n_ff
|
|
||||||
|
|
||||||
def load(self, data, offset):
|
|
||||||
self.n_vocab = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_embd = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_mult = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_head = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_layer = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_rot = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
return offset
|
|
||||||
|
|
||||||
def get_n_ff(self):
|
|
||||||
if self.n_ff is None:
|
|
||||||
# struct my_llama_model::get_n_ff in train-text-from-scratch.cpp commit 3b5515bbe0e2224425986ba24f1f5d84aa38dce9
|
|
||||||
return ((2*(4*self.n_embd)//3 + self.n_mult - 1)//self.n_mult)*self.n_mult
|
|
||||||
else:
|
|
||||||
return self.n_ff
|
|
||||||
|
|
||||||
def save_gguf(self, gguf_writer):
|
|
||||||
# self.n_vocab not saved
|
|
||||||
gguf_writer.add_embedding_length(self.n_embd)
|
|
||||||
gguf_writer.add_head_count(self.n_head)
|
|
||||||
gguf_writer.add_block_count(self.n_layer)
|
|
||||||
gguf_writer.add_rope_dimension_count(self.n_rot)
|
|
||||||
gguf_writer.add_feed_forward_length(self.get_n_ff())
|
|
||||||
|
|
||||||
def tensor_name(key, bid=None, suffix=".weight"):
|
|
||||||
return gguf.TENSOR_NAMES[key].format(bid=bid) + suffix
|
|
||||||
|
|
||||||
class Layer:
|
|
||||||
def __init__(self, params, lora_params, bid):
|
|
||||||
self.bid = bid
|
|
||||||
self.att_norm_a = Tensor('f', [lora_params.n_rank_attention_norm, params.n_embd])
|
|
||||||
self.att_norm_b = Tensor('f', [lora_params.n_rank_attention_norm, 1])
|
|
||||||
self.wq_a = Tensor('f', [lora_params.n_rank_wq, params.n_embd])
|
|
||||||
self.wq_b = Tensor('f', [lora_params.n_rank_wq, params.n_embd])
|
|
||||||
self.wk_a = Tensor('f', [lora_params.n_rank_wk, params.n_embd])
|
|
||||||
self.wk_b = Tensor('f', [lora_params.n_rank_wk, params.n_embd])
|
|
||||||
self.wv_a = Tensor('f', [lora_params.n_rank_wv, params.n_embd])
|
|
||||||
self.wv_b = Tensor('f', [lora_params.n_rank_wv, params.n_embd])
|
|
||||||
self.wo_a = Tensor('f', [lora_params.n_rank_wo, params.n_embd])
|
|
||||||
self.wo_b = Tensor('f', [lora_params.n_rank_wo, params.n_embd])
|
|
||||||
self.ffn_norm_a = Tensor('f', [lora_params.n_rank_ffn_norm, params.n_embd])
|
|
||||||
self.ffn_norm_b = Tensor('f', [lora_params.n_rank_ffn_norm, 1])
|
|
||||||
self.w1_a = Tensor('f', [lora_params.n_rank_w1, params.n_embd])
|
|
||||||
self.w1_b = Tensor('f', [lora_params.n_rank_w1, params.get_n_ff()])
|
|
||||||
self.w2_a = Tensor('f', [lora_params.n_rank_w2, params.get_n_ff()])
|
|
||||||
self.w2_b = Tensor('f', [lora_params.n_rank_w2, params.n_embd])
|
|
||||||
self.w3_a = Tensor('f', [lora_params.n_rank_w3, params.n_embd])
|
|
||||||
self.w3_b = Tensor('f', [lora_params.n_rank_w3, params.get_n_ff()])
|
|
||||||
|
|
||||||
def load(self, data, offset):
|
|
||||||
offset = self.att_norm_a.load(data, offset)
|
|
||||||
offset = self.att_norm_b.load(data, offset)
|
|
||||||
offset = self.wq_a.load(data, offset)
|
|
||||||
offset = self.wq_b.load(data, offset)
|
|
||||||
offset = self.wk_a.load(data, offset)
|
|
||||||
offset = self.wk_b.load(data, offset)
|
|
||||||
offset = self.wv_a.load(data, offset)
|
|
||||||
offset = self.wv_b.load(data, offset)
|
|
||||||
offset = self.wo_a.load(data, offset)
|
|
||||||
offset = self.wo_b.load(data, offset)
|
|
||||||
offset = self.ffn_norm_a.load(data, offset)
|
|
||||||
offset = self.ffn_norm_b.load(data, offset)
|
|
||||||
offset = self.w1_a.load(data, offset)
|
|
||||||
offset = self.w1_b.load(data, offset)
|
|
||||||
offset = self.w2_a.load(data, offset)
|
|
||||||
offset = self.w2_b.load(data, offset)
|
|
||||||
offset = self.w3_a.load(data, offset)
|
|
||||||
offset = self.w3_b.load(data, offset)
|
|
||||||
return offset
|
|
||||||
|
|
||||||
def save_gguf(self, gguf_writer):
|
|
||||||
self.att_norm_a.save_gguf(gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.ATTN_NORM, self.bid, ".weight.lora_a"))
|
|
||||||
self.att_norm_b.save_gguf(gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.ATTN_NORM, self.bid, ".weight.lora_b"))
|
|
||||||
self.wq_a.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.ATTN_Q, self.bid, ".weight.lora_a"))
|
|
||||||
self.wq_b.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.ATTN_Q, self.bid, ".weight.lora_b"))
|
|
||||||
self.wk_a.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.ATTN_K, self.bid, ".weight.lora_a"))
|
|
||||||
self.wk_b.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.ATTN_K, self.bid, ".weight.lora_b"))
|
|
||||||
self.wv_a.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.ATTN_V, self.bid, ".weight.lora_a"))
|
|
||||||
self.wv_b.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.ATTN_V, self.bid, ".weight.lora_b"))
|
|
||||||
self.wo_a.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.ATTN_OUT, self.bid, ".weight.lora_a"))
|
|
||||||
self.wo_b.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.ATTN_OUT, self.bid, ".weight.lora_b"))
|
|
||||||
self.ffn_norm_a.save_gguf(gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.FFN_NORM, self.bid, ".weight.lora_a"))
|
|
||||||
self.ffn_norm_b.save_gguf(gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.FFN_NORM, self.bid, ".weight.lora_b"))
|
|
||||||
self.w1_a.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.FFN_GATE, self.bid, ".weight.lora_a"))
|
|
||||||
self.w1_b.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.FFN_GATE, self.bid, ".weight.lora_b"))
|
|
||||||
self.w2_a.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.FFN_DOWN, self.bid, ".weight.lora_a"))
|
|
||||||
self.w2_b.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.FFN_DOWN, self.bid, ".weight.lora_b"))
|
|
||||||
self.w3_a.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.FFN_UP, self.bid, ".weight.lora_a"))
|
|
||||||
self.w3_b.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.FFN_UP, self.bid, ".weight.lora_b"))
|
|
||||||
|
|
||||||
class LoraModel:
|
|
||||||
def __init__(self, n_ff = None):
|
|
||||||
self.params = ModelParams(n_ff = n_ff)
|
|
||||||
self.lora_params = LoraParams()
|
|
||||||
self.layers = []
|
|
||||||
|
|
||||||
def load(self, data, offset):
|
|
||||||
offset = self.params.load(data, offset)
|
|
||||||
offset = self.lora_params.load(data, offset)
|
|
||||||
|
|
||||||
self.tok_embd_a = Tensor('f', [self.lora_params.n_rank_tok_embeddings, self.params.n_embd])
|
|
||||||
self.tok_embd_b = Tensor('f', [self.lora_params.n_rank_tok_embeddings, self.params.n_vocab])
|
|
||||||
self.norm_a = Tensor('f', [self.lora_params.n_rank_norm, self.params.n_embd])
|
|
||||||
self.norm_b = Tensor('f', [self.lora_params.n_rank_norm, 1])
|
|
||||||
self.output_a = Tensor('f', [self.lora_params.n_rank_output, self.params.n_embd])
|
|
||||||
self.output_b = Tensor('f', [self.lora_params.n_rank_output, self.params.n_vocab])
|
|
||||||
|
|
||||||
offset = self.tok_embd_a.load(data, offset)
|
|
||||||
offset = self.tok_embd_b.load(data, offset)
|
|
||||||
offset = self.norm_a.load(data, offset)
|
|
||||||
offset = self.norm_b.load(data, offset)
|
|
||||||
offset = self.output_a.load(data, offset)
|
|
||||||
offset = self.output_b.load(data, offset)
|
|
||||||
|
|
||||||
self.layers.clear()
|
|
||||||
for bid in range(self.params.n_layer):
|
|
||||||
layer = Layer(self.params, self.lora_params, bid)
|
|
||||||
offset = layer.load(data, offset)
|
|
||||||
self.layers.append(layer)
|
|
||||||
|
|
||||||
return offset
|
|
||||||
|
|
||||||
def save_gguf(self, gguf_writer):
|
|
||||||
self.params.save_gguf(gguf_writer)
|
|
||||||
self.lora_params.save_gguf(gguf_writer)
|
|
||||||
|
|
||||||
self.tok_embd_a.save_gguf(gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD, suffix=".weight.lora_a"))
|
|
||||||
self.tok_embd_b.save_gguf(gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD, suffix=".weight.lora_b"))
|
|
||||||
self.norm_a.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.OUTPUT_NORM, suffix=".weight.lora_a"))
|
|
||||||
self.norm_b.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.OUTPUT_NORM, suffix=".weight.lora_b"))
|
|
||||||
self.output_a.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.OUTPUT, suffix=".weight.lora_a"))
|
|
||||||
self.output_b.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.OUTPUT, suffix=".weight.lora_b"))
|
|
||||||
|
|
||||||
for layer in self.layers:
|
|
||||||
layer.save_gguf(gguf_writer)
|
|
||||||
|
|
||||||
class LoraCheckpoint:
|
|
||||||
def __init__(self, n_ff = None):
|
|
||||||
self.model = LoraModel(n_ff = n_ff)
|
|
||||||
self.opt_ctx = OptimizationContext()
|
|
||||||
|
|
||||||
def load(self, data, offset):
|
|
||||||
magic = bytes(reversed(data[offset:offset + 4])); offset += 4
|
|
||||||
if magic != b'ggcl':
|
|
||||||
raise ValueError(f"File header magic indicates, that this is no finetune-lora checkpoint file. Expected 'ggcl', Got '{str(magic)}'")
|
|
||||||
|
|
||||||
self.version = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
if self.version != 0:
|
|
||||||
raise ValueError('Invalid version of checkpoint file')
|
|
||||||
|
|
||||||
self.train_its = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.train_samples = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.train_tokens = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
|
|
||||||
offset = self.model.load(data, offset)
|
|
||||||
offset = self.opt_ctx.load(data, offset)
|
|
||||||
|
|
||||||
return offset
|
|
||||||
|
|
||||||
def save_gguf(self, gguf_writer):
|
|
||||||
gguf_writer.add_file_type(gguf.GGMLQuantizationType.F32)
|
|
||||||
gguf_writer.add_layer_norm_rms_eps(1e-5)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_TRAINING_FILE_VERSION, 0)
|
|
||||||
gguf_writer.add_string(LLM_KV_TRAINING_TYPE, LLM_KV_TRAINING_TYPE_FINETUNE_LORA)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_TRAINING_ITERATION_COUNT, self.train_its)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_TRAINING_SAMPLE_COUNT, self.train_samples)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_TRAINING_TOKEN_COUNT, self.train_tokens)
|
|
||||||
self.model.save_gguf(gguf_writer)
|
|
||||||
self.opt_ctx.save_gguf(gguf_writer)
|
|
||||||
|
|
||||||
def handle_args():
|
|
||||||
parser = argparse.ArgumentParser(description = 'Convert finetune checkpoints to GGUF')
|
|
||||||
parser.add_argument('--input', '-i', type = Path, help = 'Input finetune checkpoint filename', required=True)
|
|
||||||
parser.add_argument('--output', '-o', type = Path, help = 'Output GGUF filename', required=True)
|
|
||||||
parser.add_argument('--ff', type = int, help = "Feedforward size, if not provided compute from n_mult. Provide this if you get 'ValueError: Tensor.load: Expected number of elements does not match what is read from file'", required=False)
|
|
||||||
return parser.parse_args()
|
|
||||||
|
|
||||||
def main():
|
|
||||||
cfg = handle_args()
|
|
||||||
print(cfg)
|
|
||||||
data = np.memmap(cfg.input, mode = 'r')
|
|
||||||
chk = LoraCheckpoint(n_ff = cfg.ff)
|
|
||||||
offset = 0
|
|
||||||
offset = chk.load(data, offset)
|
|
||||||
# we should have read all available data
|
|
||||||
assert(offset == len(data))
|
|
||||||
|
|
||||||
gguf_writer = gguf.GGUFWriter(cfg.output, gguf.MODEL_ARCH_NAMES[gguf.MODEL_ARCH.LLAMA], use_temp_file = False)
|
|
||||||
chk.save_gguf(gguf_writer)
|
|
||||||
print(" gguf: write header")
|
|
||||||
gguf_writer.write_header_to_file()
|
|
||||||
print(" gguf: write metadata")
|
|
||||||
gguf_writer.write_kv_data_to_file()
|
|
||||||
print(" gguf: write tensors")
|
|
||||||
gguf_writer.write_tensors_to_file()
|
|
||||||
gguf_writer.close()
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
File diff suppressed because it is too large
Load Diff
@ -1,34 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
cd `dirname $0`
|
|
||||||
cd ../..
|
|
||||||
|
|
||||||
EXE="./llama-finetune"
|
|
||||||
|
|
||||||
if [[ ! $LLAMA_MODEL_DIR ]]; then LLAMA_MODEL_DIR="./models"; fi
|
|
||||||
if [[ ! $LLAMA_TRAINING_DIR ]]; then LLAMA_TRAINING_DIR="."; fi
|
|
||||||
|
|
||||||
# MODEL="$LLAMA_MODEL_DIR/openllama-3b-v2-q8_0.gguf" # This is the model the readme uses.
|
|
||||||
MODEL="$LLAMA_MODEL_DIR/openllama-3b-v2.gguf" # An f16 model. Note in this case with "-g", you get an f32-format .BIN file that isn't yet supported if you use it with "llama-cli --lora" with GPU inferencing.
|
|
||||||
|
|
||||||
while getopts "dg" opt; do
|
|
||||||
case $opt in
|
|
||||||
d)
|
|
||||||
DEBUGGER="gdb --args"
|
|
||||||
;;
|
|
||||||
g)
|
|
||||||
EXE="./build/bin/Release/finetune"
|
|
||||||
GPUARG="--gpu-layers 25"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
$DEBUGGER $EXE \
|
|
||||||
--model-base $MODEL \
|
|
||||||
$GPUARG \
|
|
||||||
--checkpoint-in chk-ol3b-shakespeare-LATEST.gguf \
|
|
||||||
--checkpoint-out chk-ol3b-shakespeare-ITERATION.gguf \
|
|
||||||
--lora-out lora-ol3b-shakespeare-ITERATION.bin \
|
|
||||||
--train-data "$LLAMA_TRAINING_DIR\shakespeare.txt" \
|
|
||||||
--save-every 10 \
|
|
||||||
--threads 10 --adam-iter 30 --batch 4 --ctx 64 \
|
|
||||||
--use-checkpointing
|
|
@ -1,6 +1,6 @@
|
|||||||
# llama.cpp/examples/imatrix
|
# llama.cpp/examples/imatrix
|
||||||
|
|
||||||
Compute an importance matrix for a model and given text dataset. Can be used during quantization to enchance the quality of the quantum models.
|
Compute an importance matrix for a model and given text dataset. Can be used during quantization to enchance the quality of the quantized models.
|
||||||
More information is available here: https://github.com/ggerganov/llama.cpp/pull/4861
|
More information is available here: https://github.com/ggerganov/llama.cpp/pull/4861
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
@ -124,6 +124,7 @@ static std::string chat_add_and_format(struct llama_model * model, std::vector<l
|
|||||||
auto formatted = llama_chat_format_single(
|
auto formatted = llama_chat_format_single(
|
||||||
model, g_params->chat_template, chat_msgs, new_msg, role == "user");
|
model, g_params->chat_template, chat_msgs, new_msg, role == "user");
|
||||||
chat_msgs.push_back({role, content});
|
chat_msgs.push_back({role, content});
|
||||||
|
LOG("formatted: %s\n", formatted.c_str());
|
||||||
return formatted;
|
return formatted;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,7 +5,7 @@ Fast, lightweight, pure C/C++ HTTP server based on [httplib](https://github.com/
|
|||||||
Set of LLM REST APIs and a simple web front end to interact with llama.cpp.
|
Set of LLM REST APIs and a simple web front end to interact with llama.cpp.
|
||||||
|
|
||||||
**Features:**
|
**Features:**
|
||||||
* LLM inference of F16 and quantum models on GPU and CPU
|
* LLM inference of F16 and quantized models on GPU and CPU
|
||||||
* [OpenAI API](https://github.com/openai/openai-openapi) compatible chat completions and embeddings routes
|
* [OpenAI API](https://github.com/openai/openai-openapi) compatible chat completions and embeddings routes
|
||||||
* Parallel decoding with multi-user support
|
* Parallel decoding with multi-user support
|
||||||
* Continuous batching
|
* Continuous batching
|
||||||
|
@ -1,5 +0,0 @@
|
|||||||
set(TARGET llama-train-text-from-scratch)
|
|
||||||
add_executable(${TARGET} train-text-from-scratch.cpp)
|
|
||||||
install(TARGETS ${TARGET} RUNTIME)
|
|
||||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
|
||||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
|
@ -1,27 +0,0 @@
|
|||||||
# train-text-from-scratch
|
|
||||||
|
|
||||||
Basic usage instructions:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
# get training data
|
|
||||||
wget https://raw.githubusercontent.com/brunoklein99/deep-learning-notes/master/shakespeare.txt
|
|
||||||
|
|
||||||
# train
|
|
||||||
./bin/llama-train-text-from-scratch \
|
|
||||||
--vocab-model ../models/ggml-vocab-llama.gguf \
|
|
||||||
--ctx 64 --embd 256 --head 8 --layer 16 \
|
|
||||||
--checkpoint-in chk-shakespeare-256x16-LATEST.gguf \
|
|
||||||
--checkpoint-out chk-shakespeare-256x16-ITERATION.gguf \
|
|
||||||
--model-out ggml-shakespeare-256x16-f32-ITERATION.gguf \
|
|
||||||
--train-data "shakespeare.txt" \
|
|
||||||
-t 6 -b 16 --seed 1 --adam-iter 256 \
|
|
||||||
--no-checkpointing
|
|
||||||
|
|
||||||
# predict
|
|
||||||
./bin/llama-cli -m ggml-shakespeare-256x16-f32.gguf
|
|
||||||
```
|
|
||||||
|
|
||||||
Output files will be saved every N iterations (config with `--save-every N`).
|
|
||||||
The pattern "ITERATION" in the output filenames will be replaced with the iteration number and "LATEST" for the latest output.
|
|
||||||
|
|
||||||
To train GGUF models just pass them to `--checkpoint-in FN`.
|
|
@ -1,499 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
# train-text-from-scratch checkpoint --> gguf conversion
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import os
|
|
||||||
import struct
|
|
||||||
import sys
|
|
||||||
import numpy as np
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
if 'NO_LOCAL_GGUF' not in os.environ:
|
|
||||||
sys.path.insert(1, str(Path(__file__).parent / '..' / '..' / 'gguf-py'))
|
|
||||||
import gguf
|
|
||||||
|
|
||||||
# gguf constants
|
|
||||||
LLM_KV_OPTIMIZER_TYPE = "optimizer.type"
|
|
||||||
LLM_KV_OPTIMIZER_TYPE_ADAM = "adam"
|
|
||||||
LLM_KV_OPTIMIZER_TYPE_LBFGS = "lbfgs"
|
|
||||||
LLM_KV_OPTIMIZER_FILE_VERSION = "optimizer.file_version"
|
|
||||||
LLM_KV_OPTIMIZER_CONVERGENCE_PAST_COUNT = "optimizer.convergence_past_count"
|
|
||||||
LLM_KV_OPTIMIZER_PARAMETER_COUNT = "optimizer.parameter_count"
|
|
||||||
LLM_KV_OPTIMIZER_ITERATION_COUNT = "optimizer.iteration_count"
|
|
||||||
LLM_KV_OPTIMIZER_JUST_INITIALIZED = "optimizer.just_initialized"
|
|
||||||
LLM_KV_OPTIMIZER_ADAM_BEST_LOSS = "optimizer.adam.best_loss"
|
|
||||||
LLM_KV_OPTIMIZER_ADAM_PREVIOUS_LOSS = "optimizer.adam.previous_loss"
|
|
||||||
LLM_KV_OPTIMIZER_ADAM_NO_IMPROVEMENT_COUNT = "optimizer.adam.no_improvement_count"
|
|
||||||
LLM_KV_OPTIMIZER_LBFGS_APPROX_HESSIAN_COUNT = "optimizer.lbfgs.approx_hessian_count"
|
|
||||||
LLM_KV_OPTIMIZER_LBFGS_BEST_LOSS = "optimizer.lbfgs.best_loss"
|
|
||||||
LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_STEP = "optimizer.lbfgs.line_search_step"
|
|
||||||
LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_J = "optimizer.lbfgs.line_search_j"
|
|
||||||
LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_K = "optimizer.lbfgs.line_search_k"
|
|
||||||
LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_END = "optimizer.lbfgs.line_search_end"
|
|
||||||
LLM_KV_OPTIMIZER_LBFGS_NO_IMPROVEMENT_COUNT = "optimizer.lbfgs.no_improvement_count"
|
|
||||||
|
|
||||||
LLM_TENSOR_OPTIMIZER_ADAM_FIRST_MOMENTS = "optimizer.adam.first_moments"
|
|
||||||
LLM_TENSOR_OPTIMIZER_ADAM_SECOND_MOMENTS = "optimizer.adam.second_moments"
|
|
||||||
LLM_TENSOR_OPTIMIZER_ADAM_PAST_LOSS_VALUES = "optimizer.adam.past_loss_values"
|
|
||||||
|
|
||||||
LLM_TENSOR_OPTIMIZER_LBFGS_CURRENT_PARAMETERS = "optimizer.lbfgs.current_parameters"
|
|
||||||
LLM_TENSOR_OPTIMIZER_LBFGS_PREVIOUS_PARAMETERS = "optimizer.lbfgs.previous_parameters"
|
|
||||||
LLM_TENSOR_OPTIMIZER_LBFGS_CURRENT_GRADIENTS = "optimizer.lbfgs.current_gradients"
|
|
||||||
LLM_TENSOR_OPTIMIZER_LBFGS_PREVIOUS_GRADIENTS = "optimizer.lbfgs.previous_gradients"
|
|
||||||
LLM_TENSOR_OPTIMIZER_LBFGS_SEARCH_DIRECTION = "optimizer.lbfgs.search_direction"
|
|
||||||
LLM_TENSOR_OPTIMIZER_LBFGS_PAST_LOSS_VALUES = "optimizer.lbfgs.past_loss_values"
|
|
||||||
LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_ALPHA = "optimizer.lbfgs.memory_alpha"
|
|
||||||
LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_YS = "optimizer.lbfgs.memory_ys"
|
|
||||||
LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_S = "optimizer.lbfgs.memory_s"
|
|
||||||
LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_Y = "optimizer.lbfgs.memory_y"
|
|
||||||
|
|
||||||
LLM_KV_TRAINING_TYPE_TRAIN_MODEL = "train_model"
|
|
||||||
LLM_KV_TRAINING_TYPE_FINETUNE_LORA = "finetune_lora"
|
|
||||||
LLM_KV_TRAINING_TYPE = "training.type"
|
|
||||||
LLM_KV_TRAINING_FILE_VERSION = "training.file_version"
|
|
||||||
LLM_KV_TRAINING_ITERATION_COUNT = "training.iteration_count"
|
|
||||||
LLM_KV_TRAINING_SAMPLE_COUNT = "training.sample_count"
|
|
||||||
LLM_KV_TRAINING_TOKEN_COUNT = "training.token_count"
|
|
||||||
|
|
||||||
class Tensor:
|
|
||||||
def __init__(self, dtype='f', ne=None):
|
|
||||||
if ne is None:
|
|
||||||
ne = []
|
|
||||||
self.dtype = dtype
|
|
||||||
self.ne = ne
|
|
||||||
self.nbytes = 0
|
|
||||||
if self.dtype == 'f':
|
|
||||||
if len(self.ne) == 0:
|
|
||||||
self.nbytes = 0
|
|
||||||
else:
|
|
||||||
self.nbytes = int(np.prod(self.ne)) * 4
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Unhandled data type '{self.dtype}'")
|
|
||||||
|
|
||||||
def load(self, data, offset):
|
|
||||||
nd = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
namelen = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
dtype = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
|
|
||||||
assert(nd == len(self.ne))
|
|
||||||
ne = []
|
|
||||||
for d in range(nd):
|
|
||||||
n = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
ne.append(n)
|
|
||||||
|
|
||||||
assert(tuple(ne) == tuple(self.ne))
|
|
||||||
|
|
||||||
if self.dtype == 'f':
|
|
||||||
assert(dtype == 0)
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Unhandled data type '{self.dtype}'")
|
|
||||||
|
|
||||||
self.name = bytes(data[offset:offset+namelen]); offset += namelen
|
|
||||||
# 32-byte alignment
|
|
||||||
offset += (0 - offset) & 31
|
|
||||||
self.data = data[offset:offset+self.nbytes]
|
|
||||||
offset += self.nbytes
|
|
||||||
return offset
|
|
||||||
|
|
||||||
def max_storage_size(self):
|
|
||||||
result = 0
|
|
||||||
result += 4 # nd
|
|
||||||
result += 4 # namelen
|
|
||||||
result += 4 # dtype
|
|
||||||
result += len(self.ne)*8 # ne
|
|
||||||
result += 48 # name (maximum as of commit 3b5515bbe0e2224425986ba24f1f5d84aa38dce9)
|
|
||||||
result += 31 # 32-byte alignment
|
|
||||||
result += self.nbytes
|
|
||||||
return result
|
|
||||||
|
|
||||||
def save_gguf(self, gguf_writer, name):
|
|
||||||
gguf_writer.add_tensor(
|
|
||||||
name=name,
|
|
||||||
tensor=self.data,
|
|
||||||
raw_shape=np.array(list(reversed(self.ne))),
|
|
||||||
raw_dtype=gguf.GGMLQuantizationType.F32)
|
|
||||||
|
|
||||||
class OptimizationParamsV0:
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def load(self, data, offset):
|
|
||||||
self.type = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_threads = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.past = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.delta = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.print_forward_graph = struct.unpack('<?', bytes(data[offset:offset + 1]))[0]; offset += 4 # 32bit-aligned
|
|
||||||
self.print_backward_graph = struct.unpack('<?', bytes(data[offset:offset + 1]))[0]; offset += 4 # 32bit-aligned
|
|
||||||
self.adam_n_iter = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.adam_sched = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.adam_decay = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.adam_alpha = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.adam_beta1 = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.adam_beta2 = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.adam_eps = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.adam_eps_f = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.adam_eps_g = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_m = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_n_iter = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_max_linesearch = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_eps = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_ftol = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_wolfe = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_min_step = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_max_step = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_linesearch = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
return offset
|
|
||||||
|
|
||||||
class OptimizationContext:
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def load(self, data, offset):
|
|
||||||
self.version = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]
|
|
||||||
offset += 4
|
|
||||||
|
|
||||||
if self.version == 0:
|
|
||||||
params = OptimizationParamsV0()
|
|
||||||
offset = params.load(data, offset)
|
|
||||||
self.past = params.past
|
|
||||||
self.lbfgs_m = params.lbfgs_m
|
|
||||||
self.nx = struct.unpack('N', bytes(data[offset:offset + 8]))[0]; offset += 8
|
|
||||||
self.iter = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.just_initialized = bool(struct.unpack('<i', bytes(data[offset:offset + 4]))[0]); offset += 4
|
|
||||||
self.type = params.type
|
|
||||||
|
|
||||||
self.adam_m = Tensor('f', [self.nx])
|
|
||||||
self.adam_v = Tensor('f', [self.nx])
|
|
||||||
self.adam_pf = Tensor('f', [self.past] if self.past > 0 else [])
|
|
||||||
|
|
||||||
self.lbfgs_x = Tensor('f', [self.nx])
|
|
||||||
self.lbfgs_xp = Tensor('f', [self.nx])
|
|
||||||
self.lbfgs_g = Tensor('f', [self.nx])
|
|
||||||
self.lbfgs_gp = Tensor('f', [self.nx])
|
|
||||||
self.lbfgs_d = Tensor('f', [self.nx])
|
|
||||||
self.lbfgs_pf = Tensor('f', [self.past] if self.past > 0 else [])
|
|
||||||
self.lbfgs_lmal = Tensor('f', [self.lbfgs_m])
|
|
||||||
self.lbfgs_lmys = Tensor('f', [self.lbfgs_m])
|
|
||||||
self.lbfgs_lms = Tensor('f', [self.nx, self.lbfgs_m])
|
|
||||||
self.lbfgs_lmy = Tensor('f', [self.nx, self.lbfgs_m])
|
|
||||||
|
|
||||||
if self.type == 0:
|
|
||||||
# these tensors are stored, but we don't need their data
|
|
||||||
x = Tensor('f', [self.nx])
|
|
||||||
g = Tensor('f', [self.nx])
|
|
||||||
g2 = Tensor('f', [self.nx])
|
|
||||||
mh = Tensor('f', [self.nx])
|
|
||||||
vh = Tensor('f', [self.nx])
|
|
||||||
|
|
||||||
offset = x.load(data, offset)
|
|
||||||
offset = g.load(data, offset)
|
|
||||||
offset = g2.load(data, offset)
|
|
||||||
offset = self.adam_m.load(data, offset)
|
|
||||||
offset = self.adam_v.load(data, offset)
|
|
||||||
offset = mh.load(data, offset)
|
|
||||||
offset = vh.load(data, offset)
|
|
||||||
offset = self.adam_pf.load(data, offset)
|
|
||||||
|
|
||||||
self.adam_fx_best = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.adam_fx_prev = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.adam_n_no_improvement = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
|
|
||||||
elif self.type == 1:
|
|
||||||
offset = self.lbfgs_x.load(data, offset)
|
|
||||||
offset = self.lbfgs_xp.load(data, offset)
|
|
||||||
offset = self.lbfgs_g.load(data, offset)
|
|
||||||
offset = self.lbfgs_gp.load(data, offset)
|
|
||||||
offset = self.lbfgs_d.load(data, offset)
|
|
||||||
offset = self.lbfgs_pf.load(data, offset)
|
|
||||||
offset = self.lbfgs_lmal.load(data, offset)
|
|
||||||
offset = self.lbfgs_lmys.load(data, offset)
|
|
||||||
offset = self.lbfgs_lms.load(data, offset)
|
|
||||||
offset = self.lbfgs_lmy.load(data, offset)
|
|
||||||
|
|
||||||
self.lbfgs_fx_best = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_step = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_j = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_k = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_end = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_n_no_improvement = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
|
|
||||||
else:
|
|
||||||
raise ValueError('Unknown optimizer type')
|
|
||||||
|
|
||||||
|
|
||||||
elif self.version == 1:
|
|
||||||
self.past = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_m = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.nx = struct.unpack('N', bytes(data[offset:offset + 8]))[0]; offset += 8
|
|
||||||
self.iter = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.just_initialized = bool(struct.unpack('<i', bytes(data[offset:offset + 4]))[0]); offset += 4
|
|
||||||
|
|
||||||
self.adam_m = Tensor('f', [self.nx])
|
|
||||||
self.adam_v = Tensor('f', [self.nx])
|
|
||||||
self.adam_pf = Tensor('f', [self.past] if self.past > 0 else [])
|
|
||||||
|
|
||||||
self.lbfgs_x = Tensor('f', [self.nx])
|
|
||||||
self.lbfgs_xp = Tensor('f', [self.nx])
|
|
||||||
self.lbfgs_g = Tensor('f', [self.nx])
|
|
||||||
self.lbfgs_gp = Tensor('f', [self.nx])
|
|
||||||
self.lbfgs_d = Tensor('f', [self.nx])
|
|
||||||
self.lbfgs_pf = Tensor('f', [self.past] if self.past > 0 else [])
|
|
||||||
self.lbfgs_lmal = Tensor('f', [self.lbfgs_m])
|
|
||||||
self.lbfgs_lmys = Tensor('f', [self.lbfgs_m])
|
|
||||||
self.lbfgs_lms = Tensor('f', [self.nx, self.lbfgs_m])
|
|
||||||
self.lbfgs_lmy = Tensor('f', [self.nx, self.lbfgs_m])
|
|
||||||
|
|
||||||
# forgot to save type in version 1:
|
|
||||||
# guess self.type from number of remaining bytes
|
|
||||||
size_type_0 = 12 + sum([t.max_storage_size() for t in
|
|
||||||
[self.adam_m, self.adam_v]
|
|
||||||
+([self.adam_pf] if (self.past > 0) else [])])
|
|
||||||
size_type_1 = 24 + sum([t.max_storage_size() for t in
|
|
||||||
[self.lbfgs_x, self.lbfgs_xp, self.lbfgs_g,
|
|
||||||
self.lbfgs_gp, self.lbfgs_d, self.lbfgs_pf,
|
|
||||||
self.lbfgs_lmal, self.lbfgs_lmys,
|
|
||||||
self.lbfgs_lms, self.lbfgs_lmy]
|
|
||||||
+([self.lbfgs_pf] if (self.past > 0) else [])])
|
|
||||||
# due to alignment padding the size might not by exact
|
|
||||||
# but the difference in size for both types is significant,
|
|
||||||
# so we can just use whichever is closest
|
|
||||||
remaining = len(data) - offset
|
|
||||||
if abs(remaining - size_type_0) < abs(remaining - size_type_1):
|
|
||||||
self.type = 0
|
|
||||||
else:
|
|
||||||
self.type = 1
|
|
||||||
|
|
||||||
if self.type == 0:
|
|
||||||
offset = self.adam_m.load(data, offset)
|
|
||||||
offset = self.adam_v.load(data, offset)
|
|
||||||
offset = self.adam_pf.load(data,offset)
|
|
||||||
|
|
||||||
self.adam_fx_best = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.adam_fx_prev = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.adam_n_no_improvement = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
|
|
||||||
elif self.type == 1:
|
|
||||||
offset = self.lbfgs_x.load(data, offset)
|
|
||||||
offset = self.lbfgs_xp.load(data, offset)
|
|
||||||
offset = self.lbfgs_g.load(data, offset)
|
|
||||||
offset = self.lbfgs_gp.load(data, offset)
|
|
||||||
offset = self.lbfgs_d.load(data, offset)
|
|
||||||
offset = self.lbfgs_pf.load(data, offset)
|
|
||||||
offset = self.lbfgs_lmal.load(data, offset)
|
|
||||||
offset = self.lbfgs_lmys.load(data, offset)
|
|
||||||
offset = self.lbfgs_lms.load(data, offset)
|
|
||||||
offset = self.lbfgs_lmy.load(data, offset)
|
|
||||||
|
|
||||||
self.lbfgs_fx_best = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_step = struct.unpack('<f', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_j = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_k = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_end = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.lbfgs_n_no_improvement = struct.unpack('<i', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
|
|
||||||
else:
|
|
||||||
raise ValueError('Invalid version of checkpoint file')
|
|
||||||
|
|
||||||
return offset
|
|
||||||
|
|
||||||
def save_gguf(self, gguf_writer):
|
|
||||||
gguf_writer.add_uint32(LLM_KV_OPTIMIZER_FILE_VERSION, 0)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_OPTIMIZER_CONVERGENCE_PAST_COUNT, self.past)
|
|
||||||
gguf_writer.add_uint64(LLM_KV_OPTIMIZER_PARAMETER_COUNT, self.nx)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_OPTIMIZER_ITERATION_COUNT, self.iter)
|
|
||||||
gguf_writer.add_bool(LLM_KV_OPTIMIZER_JUST_INITIALIZED, self.just_initialized)
|
|
||||||
|
|
||||||
if self.type == 0:
|
|
||||||
gguf_writer.add_string(LLM_KV_OPTIMIZER_TYPE, LLM_KV_OPTIMIZER_TYPE_ADAM)
|
|
||||||
gguf_writer.add_float32(LLM_KV_OPTIMIZER_ADAM_BEST_LOSS, self.adam_fx_best)
|
|
||||||
gguf_writer.add_float32(LLM_KV_OPTIMIZER_ADAM_PREVIOUS_LOSS, self.adam_fx_prev)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_OPTIMIZER_ADAM_NO_IMPROVEMENT_COUNT, self.adam_n_no_improvement)
|
|
||||||
|
|
||||||
self.adam_m.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_ADAM_FIRST_MOMENTS)
|
|
||||||
self.adam_v.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_ADAM_SECOND_MOMENTS)
|
|
||||||
if self.past > 0:
|
|
||||||
self.adam_pf.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_ADAM_PAST_LOSS_VALUES)
|
|
||||||
|
|
||||||
elif self.type == 1:
|
|
||||||
gguf_writer.add_string(LLM_KV_OPTIMIZER_TYPE, LLM_KV_OPTIMIZER_TYPE_LBFGS)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_OPTIMIZER_LBFGS_APPROX_HESSIAN_COUNT, self.lbfgs_m)
|
|
||||||
gguf_writer.add_float32(LLM_KV_OPTIMIZER_LBFGS_BEST_LOSS, self.lbfgs_fx_best)
|
|
||||||
gguf_writer.add_float32(LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_STEP, self.lbfgs_step)
|
|
||||||
gguf_writer.add_int32(LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_J, self.lbfgs_j)
|
|
||||||
gguf_writer.add_int32(LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_K, self.lbfgs_k)
|
|
||||||
gguf_writer.add_int32(LLM_KV_OPTIMIZER_LBFGS_LINE_SEARCH_END, self.lbfgs_end)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_OPTIMIZER_LBFGS_NO_IMPROVEMENT_COUNT, self.lbfgs_n_no_improvement)
|
|
||||||
|
|
||||||
self.lbfgs_x.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_CURRENT_PARAMETERS)
|
|
||||||
self.lbfgs_xp.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_PREVIOUS_PARAMETERS)
|
|
||||||
self.lbfgs_g.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_CURRENT_GRADIENTS)
|
|
||||||
self.lbfgs_gp.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_PREVIOUS_GRADIENTS)
|
|
||||||
self.lbfgs_d.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_SEARCH_DIRECTION)
|
|
||||||
if self.past > 0:
|
|
||||||
self.lbfgs_pf.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_PAST_LOSS_VALUES)
|
|
||||||
self.lbfgs_lmal.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_ALPHA)
|
|
||||||
self.lbfgs_lmys.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_YS)
|
|
||||||
self.lbfgs_lms.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_S)
|
|
||||||
self.lbfgs_lmy.save_gguf(gguf_writer, name=LLM_TENSOR_OPTIMIZER_LBFGS_MEMORY_Y)
|
|
||||||
else:
|
|
||||||
raise ValueError('Unknown optimizer type')
|
|
||||||
|
|
||||||
class ModelParams:
|
|
||||||
def __init__(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def load(self, data, offset):
|
|
||||||
self.n_vocab = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_embd = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_mult = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_head = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_layer = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.n_rot = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
return offset
|
|
||||||
|
|
||||||
def get_n_ff(self):
|
|
||||||
# struct my_llama_model::get_n_ff in train-text-from-scratch.cpp commit 3b5515bbe0e2224425986ba24f1f5d84aa38dce9
|
|
||||||
return ((2*(4*self.n_embd)//3 + self.n_mult - 1)//self.n_mult)*self.n_mult
|
|
||||||
|
|
||||||
def save_gguf(self, gguf_writer):
|
|
||||||
# self.n_vocab not saved
|
|
||||||
gguf_writer.add_embedding_length(self.n_embd)
|
|
||||||
gguf_writer.add_head_count(self.n_head)
|
|
||||||
gguf_writer.add_block_count(self.n_layer)
|
|
||||||
gguf_writer.add_rope_dimension_count(self.n_rot)
|
|
||||||
gguf_writer.add_feed_forward_length(self.get_n_ff())
|
|
||||||
|
|
||||||
def tensor_name(key, bid=None):
|
|
||||||
return gguf.TENSOR_NAMES[key].format(bid=bid) + ".weight"
|
|
||||||
|
|
||||||
class Layer:
|
|
||||||
def __init__(self, params, bid):
|
|
||||||
self.bid = bid
|
|
||||||
self.att_norm = Tensor('f', [params.n_embd])
|
|
||||||
self.wq = Tensor('f', [params.n_embd, params.n_embd])
|
|
||||||
self.wk = Tensor('f', [params.n_embd, params.n_embd])
|
|
||||||
self.wv = Tensor('f', [params.n_embd, params.n_embd])
|
|
||||||
self.wo = Tensor('f', [params.n_embd, params.n_embd])
|
|
||||||
self.ffn_norm = Tensor('f', [params.n_embd])
|
|
||||||
self.w1 = Tensor('f', [params.n_embd, params.get_n_ff()])
|
|
||||||
self.w2 = Tensor('f', [params.get_n_ff(), params.n_embd])
|
|
||||||
self.w3 = Tensor('f', [params.n_embd, params.get_n_ff()])
|
|
||||||
|
|
||||||
def load(self, data, offset):
|
|
||||||
offset = self.att_norm.load(data, offset)
|
|
||||||
offset = self.wq.load(data, offset)
|
|
||||||
offset = self.wk.load(data, offset)
|
|
||||||
offset = self.wv.load(data, offset)
|
|
||||||
offset = self.wo.load(data, offset)
|
|
||||||
offset = self.ffn_norm.load(data, offset)
|
|
||||||
offset = self.w1.load(data, offset)
|
|
||||||
offset = self.w2.load(data, offset)
|
|
||||||
offset = self.w3.load(data, offset)
|
|
||||||
return offset
|
|
||||||
|
|
||||||
def save_gguf(self, gguf_writer):
|
|
||||||
self.att_norm.save_gguf(gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.ATTN_NORM, self.bid))
|
|
||||||
self.wq.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.ATTN_Q, self.bid))
|
|
||||||
self.wk.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.ATTN_K, self.bid))
|
|
||||||
self.wv.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.ATTN_V, self.bid))
|
|
||||||
self.wo.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.ATTN_OUT, self.bid))
|
|
||||||
self.ffn_norm.save_gguf(gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.FFN_NORM, self.bid))
|
|
||||||
self.w1.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.FFN_GATE, self.bid))
|
|
||||||
self.w2.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.FFN_DOWN, self.bid))
|
|
||||||
self.w3.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.FFN_UP, self.bid))
|
|
||||||
|
|
||||||
class Model:
|
|
||||||
def __init__(self):
|
|
||||||
self.params = ModelParams()
|
|
||||||
self.layers = []
|
|
||||||
|
|
||||||
def load(self, data, offset):
|
|
||||||
offset = self.params.load(data, offset)
|
|
||||||
|
|
||||||
self.tok_embd = Tensor('f', [self.params.n_embd, self.params.n_vocab])
|
|
||||||
self.norm = Tensor('f', [self.params.n_embd])
|
|
||||||
self.output = Tensor('f', [self.params.n_embd, self.params.n_vocab])
|
|
||||||
|
|
||||||
offset = self.tok_embd.load(data, offset)
|
|
||||||
offset = self.norm.load(data, offset)
|
|
||||||
offset = self.output.load(data, offset)
|
|
||||||
|
|
||||||
self.layers.clear()
|
|
||||||
for bid in range(self.params.n_layer):
|
|
||||||
layer = Layer(self.params, bid)
|
|
||||||
offset = layer.load(data, offset)
|
|
||||||
self.layers.append(layer)
|
|
||||||
|
|
||||||
return offset
|
|
||||||
|
|
||||||
def save_gguf(self, gguf_writer):
|
|
||||||
self.params.save_gguf(gguf_writer)
|
|
||||||
|
|
||||||
self.tok_embd.save_gguf(gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD))
|
|
||||||
self.norm.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.OUTPUT_NORM))
|
|
||||||
self.output.save_gguf (gguf_writer, name=tensor_name(gguf.MODEL_TENSOR.OUTPUT))
|
|
||||||
|
|
||||||
for layer in self.layers:
|
|
||||||
layer.save_gguf(gguf_writer)
|
|
||||||
|
|
||||||
class Checkpoint:
|
|
||||||
def __init__(self):
|
|
||||||
self.model = Model()
|
|
||||||
self.opt_ctx = OptimizationContext()
|
|
||||||
|
|
||||||
def load(self, data, offset):
|
|
||||||
magic = bytes(reversed(data[offset:offset + 4])); offset += 4
|
|
||||||
if magic != b'ggcp':
|
|
||||||
raise ValueError(f"File header magic indicates, that this is no checkpoint file. Expected 'ggcp', Got '{str(magic)}'")
|
|
||||||
|
|
||||||
self.version = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
if self.version != 0:
|
|
||||||
raise ValueError('Invalid version of checkpoint file')
|
|
||||||
|
|
||||||
self.train_its = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.train_samples = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
self.train_tokens = struct.unpack('<I', bytes(data[offset:offset + 4]))[0]; offset += 4
|
|
||||||
|
|
||||||
offset = self.model.load(data, offset)
|
|
||||||
offset = self.opt_ctx.load(data, offset)
|
|
||||||
|
|
||||||
return offset
|
|
||||||
|
|
||||||
def save_gguf(self, gguf_writer):
|
|
||||||
gguf_writer.add_file_type(gguf.GGMLQuantizationType.F32)
|
|
||||||
gguf_writer.add_layer_norm_rms_eps(1e-5)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_TRAINING_FILE_VERSION, 0)
|
|
||||||
gguf_writer.add_string(LLM_KV_TRAINING_TYPE, LLM_KV_TRAINING_TYPE_TRAIN_MODEL)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_TRAINING_ITERATION_COUNT, self.train_its)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_TRAINING_SAMPLE_COUNT, self.train_samples)
|
|
||||||
gguf_writer.add_uint32(LLM_KV_TRAINING_TOKEN_COUNT, self.train_tokens)
|
|
||||||
self.model.save_gguf(gguf_writer)
|
|
||||||
self.opt_ctx.save_gguf(gguf_writer)
|
|
||||||
|
|
||||||
def handle_args():
|
|
||||||
parser = argparse.ArgumentParser(description = 'Convert train-text-from-scratch checkpoints to GGUF')
|
|
||||||
parser.add_argument('--input', '-i', type = Path, help = 'Input train checkpoint filename', required=True)
|
|
||||||
parser.add_argument('--output', '-o', type = Path, help ='Output GGUF filename', required=True)
|
|
||||||
return parser.parse_args()
|
|
||||||
|
|
||||||
def main():
|
|
||||||
cfg = handle_args()
|
|
||||||
data = np.memmap(cfg.input, mode = 'r')
|
|
||||||
chk = Checkpoint()
|
|
||||||
offset = 0
|
|
||||||
offset = chk.load(data, offset)
|
|
||||||
# we should have read all available data
|
|
||||||
assert(offset == len(data))
|
|
||||||
|
|
||||||
gguf_writer = gguf.GGUFWriter(cfg.output, gguf.MODEL_ARCH_NAMES[gguf.MODEL_ARCH.LLAMA], use_temp_file = False)
|
|
||||||
chk.save_gguf(gguf_writer)
|
|
||||||
print(" gguf: write header")
|
|
||||||
gguf_writer.write_header_to_file()
|
|
||||||
print(" gguf: write metadata")
|
|
||||||
gguf_writer.write_kv_data_to_file()
|
|
||||||
print(" gguf: write tensors")
|
|
||||||
gguf_writer.write_tensors_to_file()
|
|
||||||
gguf_writer.close()
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
File diff suppressed because it is too large
Load Diff
@ -2400,6 +2400,7 @@ extern "C" {
|
|||||||
GGML_API int ggml_cpu_has_vsx (void);
|
GGML_API int ggml_cpu_has_vsx (void);
|
||||||
GGML_API int ggml_cpu_has_matmul_int8(void);
|
GGML_API int ggml_cpu_has_matmul_int8(void);
|
||||||
GGML_API int ggml_cpu_has_cann (void);
|
GGML_API int ggml_cpu_has_cann (void);
|
||||||
|
GGML_API int ggml_cpu_has_llamafile (void);
|
||||||
|
|
||||||
//
|
//
|
||||||
// Internal types and functions exposed for tests and benchmarks
|
// Internal types and functions exposed for tests and benchmarks
|
||||||
|
@ -510,10 +510,10 @@ if (GGML_SYCL)
|
|||||||
set(GGML_EXTRA_LIBS ${GGML_EXTRA_LIBS} IntelSYCL::SYCL_CXX MKL::MKL MKL::MKL_SYCL)
|
set(GGML_EXTRA_LIBS ${GGML_EXTRA_LIBS} IntelSYCL::SYCL_CXX MKL::MKL MKL::MKL_SYCL)
|
||||||
else()
|
else()
|
||||||
if (GGML_SYCL_TARGET STREQUAL "INTEL")
|
if (GGML_SYCL_TARGET STREQUAL "INTEL")
|
||||||
set(GGML_EXTRA_LIBS ${GGML_EXTRA_LIBS} OpenCL mkl_core pthread m dl mkl_sycl_blas mkl_intel_ilp64 mkl_tbb_thread)
|
set(GGML_EXTRA_LIBS ${GGML_EXTRA_LIBS} -fsycl OpenCL mkl_core pthread m dl mkl_sycl_blas mkl_intel_ilp64 mkl_tbb_thread)
|
||||||
elseif (GGML_SYCL_TARGET STREQUAL "NVIDIA")
|
elseif (GGML_SYCL_TARGET STREQUAL "NVIDIA")
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsycl-targets=nvptx64-nvidia-cuda")
|
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsycl-targets=nvptx64-nvidia-cuda")
|
||||||
set(GGML_EXTRA_LIBS ${GGML_EXTRA_LIBS} pthread m dl onemkl)
|
set(GGML_EXTRA_LIBS ${GGML_EXTRA_LIBS} -fsycl pthread m dl onemkl)
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
@ -267,7 +267,7 @@ struct ggml_backend_sycl_context {
|
|||||||
|
|
||||||
queue_ptr stream(int device, int stream) {
|
queue_ptr stream(int device, int stream) {
|
||||||
if (qptrs[device][stream] == nullptr) {
|
if (qptrs[device][stream] == nullptr) {
|
||||||
qptrs[device][stream] = &(dpct::get_current_device().default_queue());
|
qptrs[device][stream] = &(dpct::get_device(device).default_queue());
|
||||||
}
|
}
|
||||||
return qptrs[device][stream];
|
return qptrs[device][stream];
|
||||||
}
|
}
|
||||||
|
@ -588,7 +588,7 @@ namespace dpct
|
|||||||
out = prop;
|
out = prop;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// dpct device extension
|
/// dpct device extension
|
||||||
class device_ext : public sycl::device {
|
class device_ext : public sycl::device {
|
||||||
typedef std::mutex mutex_type;
|
typedef std::mutex mutex_type;
|
||||||
|
|
||||||
@ -697,7 +697,7 @@ namespace dpct
|
|||||||
std::unique_lock<mutex_type> lock(m_mutex);
|
std::unique_lock<mutex_type> lock(m_mutex);
|
||||||
lock.unlock();
|
lock.unlock();
|
||||||
for (auto &q : _queues) {
|
for (auto &q : _queues) {
|
||||||
q.wait_and_throw();
|
q.wait_and_throw();
|
||||||
}
|
}
|
||||||
// Guard the destruct of current_queues to make sure the ref count is
|
// Guard the destruct of current_queues to make sure the ref count is
|
||||||
// safe.
|
// safe.
|
||||||
@ -734,7 +734,12 @@ namespace dpct
|
|||||||
|
|
||||||
void destroy_queue(sycl::queue queue) {
|
void destroy_queue(sycl::queue queue) {
|
||||||
std::lock_guard<mutex_type> lock(m_mutex);
|
std::lock_guard<mutex_type> lock(m_mutex);
|
||||||
_queues.clear();
|
_queues.erase(std::remove_if(_queues.begin(), _queues.end(),
|
||||||
|
[=](const sycl::queue &q) -> bool
|
||||||
|
{
|
||||||
|
return q == queue;
|
||||||
|
}),
|
||||||
|
_queues.end());
|
||||||
}
|
}
|
||||||
void set_saved_queue(sycl::queue q) {
|
void set_saved_queue(sycl::queue q) {
|
||||||
std::lock_guard<mutex_type> lock(m_mutex);
|
std::lock_guard<mutex_type> lock(m_mutex);
|
||||||
@ -764,13 +769,13 @@ namespace dpct
|
|||||||
if (enable_exception_handler) {
|
if (enable_exception_handler) {
|
||||||
eh = exception_handler;
|
eh = exception_handler;
|
||||||
}
|
}
|
||||||
auto q = sycl::queue(*this, eh,
|
_queues.push_back(sycl::queue(
|
||||||
sycl::property_list(
|
*this, eh,
|
||||||
|
sycl::property_list(
|
||||||
#ifdef DPCT_PROFILING_ENABLED
|
#ifdef DPCT_PROFILING_ENABLED
|
||||||
sycl::property::queue::enable_profiling(),
|
sycl::property::queue::enable_profiling(),
|
||||||
#endif
|
#endif
|
||||||
properties...));
|
properties...)));
|
||||||
_queues.push_back(q);
|
|
||||||
|
|
||||||
return _queues.back();
|
return _queues.back();
|
||||||
}
|
}
|
||||||
@ -783,8 +788,8 @@ namespace dpct
|
|||||||
if (enable_exception_handler) {
|
if (enable_exception_handler) {
|
||||||
eh = exception_handler;
|
eh = exception_handler;
|
||||||
}
|
}
|
||||||
_queues.push_back(
|
_queues.push_back(sycl::queue(
|
||||||
sycl::queue(device, eh,
|
device, eh,
|
||||||
sycl::property_list(
|
sycl::property_list(
|
||||||
#ifdef DPCT_PROFILING_ENABLED
|
#ifdef DPCT_PROFILING_ENABLED
|
||||||
sycl::property::queue::enable_profiling(),
|
sycl::property::queue::enable_profiling(),
|
||||||
@ -855,15 +860,75 @@ namespace dpct
|
|||||||
unsigned int get_device_id(const sycl::device &dev)
|
unsigned int get_device_id(const sycl::device &dev)
|
||||||
{
|
{
|
||||||
unsigned int id = 0;
|
unsigned int id = 0;
|
||||||
for (auto dev_item : _devs)
|
for (auto &dev_item : _devs)
|
||||||
{
|
{
|
||||||
if (*dev_item == dev)
|
if (*dev_item == dev)
|
||||||
{
|
{
|
||||||
break;
|
return id;
|
||||||
}
|
}
|
||||||
id++;
|
id++;
|
||||||
}
|
}
|
||||||
return id;
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline std::string get_preferred_gpu_platform_name() {
|
||||||
|
std::string result;
|
||||||
|
|
||||||
|
std::string filter = "level-zero";
|
||||||
|
char* env = getenv("ONEAPI_DEVICE_SELECTOR");
|
||||||
|
if (env) {
|
||||||
|
if (std::strstr(env, "level_zero")) {
|
||||||
|
filter = "level-zero";
|
||||||
|
}
|
||||||
|
else if (std::strstr(env, "opencl")) {
|
||||||
|
filter = "opencl";
|
||||||
|
}
|
||||||
|
else if (std::strstr(env, "cuda")) {
|
||||||
|
filter = "cuda";
|
||||||
|
}
|
||||||
|
else if (std::strstr(env, "hip")) {
|
||||||
|
filter = "hip";
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
throw std::runtime_error("invalid device filter: " + std::string(env));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
auto plaform_list = sycl::platform::get_platforms();
|
||||||
|
|
||||||
|
for (const auto& platform : plaform_list) {
|
||||||
|
auto devices = platform.get_devices();
|
||||||
|
auto gpu_dev = std::find_if(devices.begin(), devices.end(), [](const sycl::device& d) {
|
||||||
|
return d.is_gpu();
|
||||||
|
});
|
||||||
|
|
||||||
|
if (gpu_dev == devices.end()) {
|
||||||
|
// cout << "platform [" << platform_name
|
||||||
|
// << "] does not contain GPU devices, skipping\n";
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto platform_name = platform.get_info<sycl::info::platform::name>();
|
||||||
|
std::string platform_name_low_case;
|
||||||
|
platform_name_low_case.resize(platform_name.size());
|
||||||
|
|
||||||
|
std::transform(
|
||||||
|
platform_name.begin(), platform_name.end(), platform_name_low_case.begin(), ::tolower);
|
||||||
|
|
||||||
|
if (platform_name_low_case.find(filter) == std::string::npos) {
|
||||||
|
// cout << "platform [" << platform_name
|
||||||
|
// << "] does not match with requested "
|
||||||
|
// << filter << ", skipping\n";
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
result = platform_name;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (result.empty())
|
||||||
|
throw std::runtime_error("can not find preferred GPU platform");
|
||||||
|
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class DeviceSelector>
|
template <class DeviceSelector>
|
||||||
@ -930,10 +995,15 @@ namespace dpct
|
|||||||
// Keep track of the number of devices per backend
|
// Keep track of the number of devices per backend
|
||||||
std::map<sycl::backend, size_t> DeviceNums;
|
std::map<sycl::backend, size_t> DeviceNums;
|
||||||
std::map<std::string, std::vector<sycl::device>> backend_devices;
|
std::map<std::string, std::vector<sycl::device>> backend_devices;
|
||||||
|
auto preferred_platform_name = get_preferred_gpu_platform_name();
|
||||||
|
|
||||||
while (!Platforms.empty()) {
|
while (!Platforms.empty()) {
|
||||||
auto Platform = Platforms.back();
|
auto Platform = Platforms.back();
|
||||||
Platforms.pop_back();
|
Platforms.pop_back();
|
||||||
|
auto platform_name = Platform.get_info<sycl::info::platform::name>();
|
||||||
|
if (platform_name.compare(preferred_platform_name) != 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
auto devices = Platform.get_devices();
|
auto devices = Platform.get_devices();
|
||||||
std::string backend_type = get_device_backend_and_type(devices[0]);
|
std::string backend_type = get_device_backend_and_type(devices[0]);
|
||||||
for (const auto &device : devices) {
|
for (const auto &device : devices) {
|
||||||
@ -1989,6 +2059,11 @@ namespace dpct
|
|||||||
return dev_mgr::instance().current_device();
|
return dev_mgr::instance().current_device();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline device_ext &get_device(unsigned int id)
|
||||||
|
{
|
||||||
|
return dev_mgr::instance().get_device(id);
|
||||||
|
}
|
||||||
|
|
||||||
static inline sycl::queue &get_in_order_queue()
|
static inline sycl::queue &get_in_order_queue()
|
||||||
{
|
{
|
||||||
return dev_mgr::instance().current_device().in_order_queue();
|
return dev_mgr::instance().current_device().in_order_queue();
|
||||||
|
@ -22005,6 +22005,14 @@ int ggml_cpu_has_cann(void) {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int ggml_cpu_has_llamafile(void) {
|
||||||
|
#if defined(GGML_USE_LLAMAFILE)
|
||||||
|
return 1;
|
||||||
|
#else
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
int ggml_cpu_has_gpublas(void) {
|
int ggml_cpu_has_gpublas(void) {
|
||||||
return ggml_cpu_has_cuda() || ggml_cpu_has_vulkan() || ggml_cpu_has_kompute() || ggml_cpu_has_sycl();
|
return ggml_cpu_has_cuda() || ggml_cpu_has_vulkan() || ggml_cpu_has_kompute() || ggml_cpu_has_sycl();
|
||||||
}
|
}
|
||||||
|
@ -529,12 +529,16 @@ extern "C" {
|
|||||||
struct llama_lora_adapter * adapter,
|
struct llama_lora_adapter * adapter,
|
||||||
float scale);
|
float scale);
|
||||||
|
|
||||||
// Remove a LoRA adapter from given context
|
// Remove a specific LoRA adapter from given context
|
||||||
// Return -1 if the adapter is not present in the context
|
// Return -1 if the adapter is not present in the context
|
||||||
LLAMA_API int32_t llama_lora_adapter_remove(
|
LLAMA_API int32_t llama_lora_adapter_remove(
|
||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
struct llama_lora_adapter * adapter);
|
struct llama_lora_adapter * adapter);
|
||||||
|
|
||||||
|
// Remove all LoRA adapters from given context
|
||||||
|
LLAMA_API void llama_lora_adapter_clear(
|
||||||
|
struct llama_context * ctx);
|
||||||
|
|
||||||
// Manually free a LoRA adapter
|
// Manually free a LoRA adapter
|
||||||
// Note: loaded adapters will be free when the associated model is deleted
|
// Note: loaded adapters will be free when the associated model is deleted
|
||||||
LLAMA_API void llama_lora_adapter_free(struct llama_lora_adapter * adapter);
|
LLAMA_API void llama_lora_adapter_free(struct llama_lora_adapter * adapter);
|
||||||
|
@ -4889,6 +4889,7 @@ static void llm_load_hparams(
|
|||||||
} break;
|
} break;
|
||||||
case LLM_ARCH_PHI3:
|
case LLM_ARCH_PHI3:
|
||||||
{
|
{
|
||||||
|
ml.get_key(LLM_KV_ATTENTION_SLIDING_WINDOW, hparams.n_swa);
|
||||||
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, hparams.f_norm_rms_eps);
|
||||||
|
|
||||||
switch (hparams.n_layer) {
|
switch (hparams.n_layer) {
|
||||||
@ -10748,7 +10749,7 @@ struct llm_build_context {
|
|||||||
struct ggml_tensor * inp_pos = build_inp_pos();
|
struct ggml_tensor * inp_pos = build_inp_pos();
|
||||||
|
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
|
struct ggml_tensor * KQ_mask_swa = build_inp_KQ_mask_swa();
|
||||||
|
|
||||||
for (int il = 0; il < n_layer; ++il) {
|
for (int il = 0; il < n_layer; ++il) {
|
||||||
auto residual = inpL;
|
auto residual = inpL;
|
||||||
@ -10806,7 +10807,7 @@ struct llm_build_context {
|
|||||||
|
|
||||||
cur = llm_build_kv(ctx0, lctx, kv_self, gf,
|
cur = llm_build_kv(ctx0, lctx, kv_self, gf,
|
||||||
model.layers[il].wo, model.layers[il].bo,
|
model.layers[il].wo, model.layers[il].bo,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
|
Kcur, Vcur, Qcur, KQ_mask_swa, n_tokens, kv_head, n_kv, 1.0f, cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
@ -14013,18 +14014,23 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
|
|||||||
"causal attention is not supported by this model"
|
"causal attention is not supported by this model"
|
||||||
);
|
);
|
||||||
|
|
||||||
if (lctx.inp_KQ_mask) {
|
if (lctx.inp_KQ_mask || lctx.inp_KQ_mask_swa) {
|
||||||
// NOTE: hparams.causal_attn indicates the model is capable of generation and uses the kv cache.
|
// NOTE: hparams.causal_attn indicates the model is capable of generation and uses the kv cache.
|
||||||
if (cparams.causal_attn && !lctx.is_encoding) {
|
if (cparams.causal_attn && !lctx.is_encoding) {
|
||||||
const int64_t n_kv = kv_self.n;
|
const int64_t n_kv = kv_self.n;
|
||||||
const int64_t n_tokens = batch.n_tokens;
|
const int64_t n_tokens = batch.n_tokens;
|
||||||
|
|
||||||
GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
|
|
||||||
|
|
||||||
float * data = (float *) lctx.inp_KQ_mask->data;
|
float * data = nullptr;
|
||||||
float * data_swa = nullptr;
|
float * data_swa = nullptr;
|
||||||
|
|
||||||
|
if (lctx.inp_KQ_mask) {
|
||||||
|
GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
|
||||||
|
data = (float *) lctx.inp_KQ_mask->data;
|
||||||
|
}
|
||||||
|
|
||||||
if (lctx.inp_KQ_mask_swa) {
|
if (lctx.inp_KQ_mask_swa) {
|
||||||
|
GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask_swa->buffer));
|
||||||
data_swa = (float *) lctx.inp_KQ_mask_swa->data;
|
data_swa = (float *) lctx.inp_KQ_mask_swa->data;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -14047,7 +14053,10 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
|
|||||||
f = 0.0f;
|
f = 0.0f;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
data[h*(n_kv*n_tokens) + j*n_kv + i] = f;
|
|
||||||
|
if (data) {
|
||||||
|
data[h*(n_kv*n_tokens) + j*n_kv + i] = f;
|
||||||
|
}
|
||||||
|
|
||||||
// may need to cut off old tokens for sliding window
|
// may need to cut off old tokens for sliding window
|
||||||
if (data_swa) {
|
if (data_swa) {
|
||||||
@ -14059,9 +14068,19 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
|
if (data) {
|
||||||
for (int j = 0; j < n_kv; ++j) {
|
for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
|
||||||
data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
|
for (int j = 0; j < n_kv; ++j) {
|
||||||
|
data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (data_swa) {
|
||||||
|
for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
|
||||||
|
for (int j = 0; j < n_kv; ++j) {
|
||||||
|
data_swa[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -16201,6 +16220,10 @@ int32_t llama_lora_adapter_remove(
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void llama_lora_adapter_clear(struct llama_context * ctx) {
|
||||||
|
ctx->lora_adapters.clear();
|
||||||
|
}
|
||||||
|
|
||||||
void llama_lora_adapter_free(struct llama_lora_adapter * adapter) {
|
void llama_lora_adapter_free(struct llama_lora_adapter * adapter) {
|
||||||
delete adapter;
|
delete adapter;
|
||||||
}
|
}
|
||||||
@ -16620,9 +16643,7 @@ struct llama_context * llama_new_context_with_model(
|
|||||||
for (int i = 0; i < ggml_backend_sycl_get_device_count(); ++i) {
|
for (int i = 0; i < ggml_backend_sycl_get_device_count(); ++i) {
|
||||||
ggml_backend_t backend = ggml_backend_sycl_init(i);
|
ggml_backend_t backend = ggml_backend_sycl_init(i);
|
||||||
if (backend == nullptr) {
|
if (backend == nullptr) {
|
||||||
int id_list[GGML_SYCL_MAX_DEVICES];
|
LLAMA_LOG_ERROR("%s: failed to initialize SYCL%d for No.%d backend\n", __func__, i, i);
|
||||||
ggml_sycl_get_gpu_list(id_list, GGML_SYCL_MAX_DEVICES);
|
|
||||||
LLAMA_LOG_ERROR("%s: failed to initialize SYCL%d (index %d) backend\n", __func__, id_list[i], i);
|
|
||||||
llama_free(ctx);
|
llama_free(ctx);
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
@ -19123,11 +19144,7 @@ const char * llama_print_system_info(void) {
|
|||||||
s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
|
s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
|
||||||
s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
|
s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
|
||||||
s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | ";
|
s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | ";
|
||||||
#ifdef GGML_USE_LLAMAFILE
|
s += "LLAMAFILE = " + std::to_string(ggml_cpu_has_llamafile()) + " | ";
|
||||||
s += "LLAMAFILE = 1 | ";
|
|
||||||
#else
|
|
||||||
s += "LLAMAFILE = 0 | ";
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return s.c_str();
|
return s.c_str();
|
||||||
}
|
}
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
#include <iostream>
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <sstream>
|
#include <sstream>
|
||||||
@ -133,13 +132,31 @@ int main(void) {
|
|||||||
);
|
);
|
||||||
formatted_chat.resize(res);
|
formatted_chat.resize(res);
|
||||||
std::string output(formatted_chat.data(), formatted_chat.size());
|
std::string output(formatted_chat.data(), formatted_chat.size());
|
||||||
std::cout << output << "\n-------------------------\n";
|
printf("%s\n", output.c_str());
|
||||||
|
printf("-------------------------\n");
|
||||||
assert(output == expected);
|
assert(output == expected);
|
||||||
}
|
}
|
||||||
|
|
||||||
// test llama_chat_format_single
|
|
||||||
std::cout << "\n\n=== llama_chat_format_single ===\n\n";
|
// test llama_chat_format_single for system message
|
||||||
|
printf("\n\n=== llama_chat_format_single (system message) ===\n\n");
|
||||||
std::vector<llama_chat_msg> chat2;
|
std::vector<llama_chat_msg> chat2;
|
||||||
|
llama_chat_msg sys_msg{"system", "You are a helpful assistant"};
|
||||||
|
|
||||||
|
auto fmt_sys = [&](std::string tmpl) {
|
||||||
|
auto output = llama_chat_format_single(nullptr, tmpl, chat2, sys_msg, false);
|
||||||
|
printf("fmt_sys(%s) : %s\n", tmpl.c_str(), output.c_str());
|
||||||
|
printf("-------------------------\n", output.c_str());
|
||||||
|
return output;
|
||||||
|
};
|
||||||
|
assert(fmt_sys("chatml") == "<|im_start|>system\nYou are a helpful assistant<|im_end|>\n");
|
||||||
|
assert(fmt_sys("llama2") == "[INST] You are a helpful assistant\n");
|
||||||
|
assert(fmt_sys("gemma") == ""); // for gemma, system message is merged with user message
|
||||||
|
assert(fmt_sys("llama3") == "<|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant<|eot_id|>");
|
||||||
|
|
||||||
|
|
||||||
|
// test llama_chat_format_single for user message
|
||||||
|
printf("\n\n=== llama_chat_format_single (user message) ===\n\n");
|
||||||
chat2.push_back({"system", "You are a helpful assistant"});
|
chat2.push_back({"system", "You are a helpful assistant"});
|
||||||
chat2.push_back({"user", "Hello"});
|
chat2.push_back({"user", "Hello"});
|
||||||
chat2.push_back({"assistant", "I am assistant"});
|
chat2.push_back({"assistant", "I am assistant"});
|
||||||
@ -147,12 +164,13 @@ int main(void) {
|
|||||||
|
|
||||||
auto fmt_single = [&](std::string tmpl) {
|
auto fmt_single = [&](std::string tmpl) {
|
||||||
auto output = llama_chat_format_single(nullptr, tmpl, chat2, new_msg, true);
|
auto output = llama_chat_format_single(nullptr, tmpl, chat2, new_msg, true);
|
||||||
std::cout << "fmt_single(" << tmpl << ")\n" << output << "\n-------------------------\n";
|
printf("fmt_single(%s) : %s\n", tmpl.c_str(), output.c_str());
|
||||||
|
printf("-------------------------\n", output.c_str());
|
||||||
return output;
|
return output;
|
||||||
};
|
};
|
||||||
assert(fmt_single("chatml") == "\n<|im_start|>user\nHow are you<|im_end|>\n<|im_start|>assistant\n");
|
assert(fmt_single("chatml") == "\n<|im_start|>user\nHow are you<|im_end|>\n<|im_start|>assistant\n");
|
||||||
assert(fmt_single("llama2") == "[INST] How are you [/INST]");
|
assert(fmt_single("llama2") == "[INST] How are you [/INST]");
|
||||||
assert(fmt_single("gemma") == "\n<start_of_turn>user\nHow are you<end_of_turn>\n<start_of_turn>model\n");
|
assert(fmt_single("gemma") == "\n<start_of_turn>user\nHow are you<end_of_turn>\n<start_of_turn>model\n");
|
||||||
assert(fmt_single("llama3") == "<|start_header_id|>user<|end_header_id|>\n\nHow are you<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n");
|
assert(fmt_single("llama3") == "<|start_header_id|>user<|end_header_id|>\n\nHow are you<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n");
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
Loading…
Reference in New Issue
Block a user