mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
Merge branch 'gguf' into gguf-write-single-pass
This commit is contained in:
commit
307e09cd85
1
.gitignore
vendored
1
.gitignore
vendored
@ -50,7 +50,6 @@ models-mnt
|
|||||||
/embd-input-test
|
/embd-input-test
|
||||||
/gguf
|
/gguf
|
||||||
/gguf-llama-simple
|
/gguf-llama-simple
|
||||||
/gptneox-main
|
|
||||||
/libllama.so
|
/libllama.so
|
||||||
build-info.h
|
build-info.h
|
||||||
arm_neon.h
|
arm_neon.h
|
||||||
|
@ -296,7 +296,6 @@ if (LLAMA_METAL)
|
|||||||
find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
|
find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
|
||||||
find_library(METAL_FRAMEWORK Metal REQUIRED)
|
find_library(METAL_FRAMEWORK Metal REQUIRED)
|
||||||
find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
|
find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
|
||||||
find_library(METALPERFORMANCE_FRAMEWORK MetalPerformanceShaders REQUIRED)
|
|
||||||
|
|
||||||
set(GGML_SOURCES_METAL ggml-metal.m ggml-metal.h)
|
set(GGML_SOURCES_METAL ggml-metal.m ggml-metal.h)
|
||||||
|
|
||||||
@ -313,7 +312,6 @@ if (LLAMA_METAL)
|
|||||||
${FOUNDATION_LIBRARY}
|
${FOUNDATION_LIBRARY}
|
||||||
${METAL_FRAMEWORK}
|
${METAL_FRAMEWORK}
|
||||||
${METALKIT_FRAMEWORK}
|
${METALKIT_FRAMEWORK}
|
||||||
${METALPERFORMANCE_FRAMEWORK}
|
|
||||||
)
|
)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
@ -570,6 +568,16 @@ install(
|
|||||||
WORLD_READ
|
WORLD_READ
|
||||||
WORLD_EXECUTE
|
WORLD_EXECUTE
|
||||||
DESTINATION ${CMAKE_INSTALL_BINDIR})
|
DESTINATION ${CMAKE_INSTALL_BINDIR})
|
||||||
|
if (LLAMA_METAL)
|
||||||
|
install(
|
||||||
|
FILES ggml-metal.metal
|
||||||
|
PERMISSIONS
|
||||||
|
OWNER_READ
|
||||||
|
OWNER_WRITE
|
||||||
|
GROUP_READ
|
||||||
|
WORLD_READ
|
||||||
|
DESTINATION ${CMAKE_INSTALL_BINDIR})
|
||||||
|
endif()
|
||||||
|
|
||||||
#
|
#
|
||||||
# programs, examples and tests
|
# programs, examples and tests
|
||||||
|
12
Makefile
12
Makefile
@ -1,8 +1,8 @@
|
|||||||
# Define the default target now so that it is always the first target
|
# Define the default target now so that it is always the first target
|
||||||
BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple server embd-input-test gguf gptneox-main
|
BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple server embd-input-test gguf
|
||||||
|
|
||||||
# Binaries only useful for tests
|
# Binaries only useful for tests
|
||||||
TEST_TARGETS = tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0
|
TEST_TARGETS = tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0
|
||||||
|
|
||||||
default: $(BUILD_TARGETS)
|
default: $(BUILD_TARGETS)
|
||||||
|
|
||||||
@ -283,7 +283,7 @@ endif # LLAMA_CLBLAST
|
|||||||
ifdef LLAMA_METAL
|
ifdef LLAMA_METAL
|
||||||
CFLAGS += -DGGML_USE_METAL -DGGML_METAL_NDEBUG
|
CFLAGS += -DGGML_USE_METAL -DGGML_METAL_NDEBUG
|
||||||
CXXFLAGS += -DGGML_USE_METAL
|
CXXFLAGS += -DGGML_USE_METAL
|
||||||
LDFLAGS += -framework Foundation -framework Metal -framework MetalKit -framework MetalPerformanceShaders
|
LDFLAGS += -framework Foundation -framework Metal -framework MetalKit
|
||||||
OBJS += ggml-metal.o
|
OBJS += ggml-metal.o
|
||||||
endif # LLAMA_METAL
|
endif # LLAMA_METAL
|
||||||
|
|
||||||
@ -388,9 +388,6 @@ embd-input-test: $(LIB_PRE)embdinput$(DSO_EXT) examples/embd-input/embd-input-te
|
|||||||
gguf: examples/gguf/gguf.cpp build-info.h ggml.o llama.o $(OBJS)
|
gguf: examples/gguf/gguf.cpp build-info.h ggml.o llama.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
gptneox-main: gptneox-main.cpp ggml.o $(OBJS)
|
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
|
||||||
|
|
||||||
train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp build-info.h ggml.o llama.o $(OBJS)
|
train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp build-info.h ggml.o llama.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
@ -418,6 +415,9 @@ benchmark-matmult: examples/benchmark/benchmark-matmult.cpp build-info.h ggml.o
|
|||||||
vdot: pocs/vdot/vdot.cpp ggml.o $(OBJS)
|
vdot: pocs/vdot/vdot.cpp ggml.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
|
tests/test-llama-grammar: tests/test-llama-grammar.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
||||||
|
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
tests/test-grammar-parser: tests/test-grammar-parser.cpp examples/grammar-parser.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
tests/test-grammar-parser: tests/test-grammar-parser.cpp examples/grammar-parser.cpp build-info.h ggml.o llama.o common.o $(OBJS)
|
||||||
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
|
$(CXX) $(CXXFLAGS) $(filter-out %.txt,$^) -o $@ $(LDFLAGS)
|
||||||
|
|
||||||
|
22
README.md
22
README.md
@ -284,7 +284,7 @@ When built with Metal support, you can enable GPU inference with the `--gpu-laye
|
|||||||
Any value larger than 0 will offload the computation to the GPU. For example:
|
Any value larger than 0 will offload the computation to the GPU. For example:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./main -m ./models/7B/ggml-model-q4_0.bin -n 128 -ngl 1
|
./main -m ./models/7B/ggml-model-q4_0.gguf -n 128 -ngl 1
|
||||||
```
|
```
|
||||||
|
|
||||||
### MPI Build
|
### MPI Build
|
||||||
@ -323,7 +323,7 @@ The above will distribute the computation across 2 processes on the first host a
|
|||||||
Finally, you're ready to run a computation using `mpirun`:
|
Finally, you're ready to run a computation using `mpirun`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
mpirun -hostfile hostfile -n 3 ./main -m ./models/7B/ggml-model-q4_0.bin -n 128
|
mpirun -hostfile hostfile -n 3 ./main -m ./models/7B/ggml-model-q4_0.gguf -n 128
|
||||||
```
|
```
|
||||||
|
|
||||||
### BLAS Build
|
### BLAS Build
|
||||||
@ -506,10 +506,10 @@ python3 convert.py models/7B/
|
|||||||
python convert.py models/7B/ --vocabtype bpe
|
python convert.py models/7B/ --vocabtype bpe
|
||||||
|
|
||||||
# quantize the model to 4-bits (using q4_0 method)
|
# quantize the model to 4-bits (using q4_0 method)
|
||||||
./quantize ./models/7B/ggml-model-f16.bin ./models/7B/ggml-model-q4_0.bin q4_0
|
./quantize ./models/7B/ggml-model-f16.gguf ./models/7B/ggml-model-q4_0.gguf q4_0
|
||||||
|
|
||||||
# run the inference
|
# run the inference
|
||||||
./main -m ./models/7B/ggml-model-q4_0.bin -n 128
|
./main -m ./models/7B/ggml-model-q4_0.gguf -n 128
|
||||||
```
|
```
|
||||||
|
|
||||||
When running the larger models, make sure you have enough disk space to store all the intermediate files.
|
When running the larger models, make sure you have enough disk space to store all the intermediate files.
|
||||||
@ -565,7 +565,7 @@ Here is an example of a few-shot interaction, invoked with the command
|
|||||||
./examples/chat-13B.sh
|
./examples/chat-13B.sh
|
||||||
|
|
||||||
# custom arguments using a 13B model
|
# custom arguments using a 13B model
|
||||||
./main -m ./models/13B/ggml-model-q4_0.bin -n 256 --repeat_penalty 1.0 --color -i -r "User:" -f prompts/chat-with-bob.txt
|
./main -m ./models/13B/ggml-model-q4_0.gguf -n 256 --repeat_penalty 1.0 --color -i -r "User:" -f prompts/chat-with-bob.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
Note the use of `--color` to distinguish between user input and generated text. Other parameters are explained in more detail in the [README](examples/main/README.md) for the `main` example program.
|
Note the use of `--color` to distinguish between user input and generated text. Other parameters are explained in more detail in the [README](examples/main/README.md) for the `main` example program.
|
||||||
@ -628,6 +628,8 @@ OpenLLaMA is an openly licensed reproduction of Meta's original LLaMA model. It
|
|||||||
|
|
||||||
### Using [GPT4All](https://github.com/nomic-ai/gpt4all)
|
### Using [GPT4All](https://github.com/nomic-ai/gpt4all)
|
||||||
|
|
||||||
|
*Note: these instructions are likely obsoleted by the GGUF update*
|
||||||
|
|
||||||
- Obtain the `tokenizer.model` file from LLaMA model and put it to `models`
|
- Obtain the `tokenizer.model` file from LLaMA model and put it to `models`
|
||||||
- Obtain the `added_tokens.json` file from Alpaca model and put it to `models`
|
- Obtain the `added_tokens.json` file from Alpaca model and put it to `models`
|
||||||
- Obtain the `gpt4all-lora-quantized.bin` file from GPT4All model and put it to `models/gpt4all-7B`
|
- Obtain the `gpt4all-lora-quantized.bin` file from GPT4All model and put it to `models/gpt4all-7B`
|
||||||
@ -703,7 +705,7 @@ If your issue is with model generation quality, then please at least scan the fo
|
|||||||
#### How to run
|
#### How to run
|
||||||
|
|
||||||
1. Download/extract: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
|
1. Download/extract: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
|
||||||
2. Run `./perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
|
2. Run `./perplexity -m models/7B/ggml-model-q4_0.gguf -f wiki.test.raw`
|
||||||
3. Output:
|
3. Output:
|
||||||
```
|
```
|
||||||
perplexity : calculating perplexity over 655 chunks
|
perplexity : calculating perplexity over 655 chunks
|
||||||
@ -802,13 +804,13 @@ docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --all-in-
|
|||||||
On completion, you are ready to play!
|
On completion, you are ready to play!
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --run -m /models/7B/ggml-model-q4_0.bin -p "Building a website can be done in 10 simple steps:" -n 512
|
docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512
|
||||||
```
|
```
|
||||||
|
|
||||||
or with a light image:
|
or with a light image:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:light -m /models/7B/ggml-model-q4_0.bin -p "Building a website can be done in 10 simple steps:" -n 512
|
docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:light -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512
|
||||||
```
|
```
|
||||||
|
|
||||||
### Docker With CUDA
|
### Docker With CUDA
|
||||||
@ -839,8 +841,8 @@ The resulting images, are essentially the same as the non-CUDA images:
|
|||||||
After building locally, Usage is similar to the non-CUDA examples, but you'll need to add the `--gpus` flag. You will also want to use the `--n-gpu-layers` flag.
|
After building locally, Usage is similar to the non-CUDA examples, but you'll need to add the `--gpus` flag. You will also want to use the `--n-gpu-layers` flag.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
docker run --gpus all -v /path/to/models:/models local/llama.cpp:full-cuda --run -m /models/7B/ggml-model-q4_0.bin -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1
|
docker run --gpus all -v /path/to/models:/models local/llama.cpp:full-cuda --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1
|
||||||
docker run --gpus all -v /path/to/models:/models local/llama.cpp:light-cuda -m /models/7B/ggml-model-q4_0.bin -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1
|
docker run --gpus all -v /path/to/models:/models local/llama.cpp:light-cuda -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1
|
||||||
```
|
```
|
||||||
|
|
||||||
### Contributing
|
### Contributing
|
||||||
|
44
ci/run.sh
44
ci/run.sh
@ -159,17 +159,17 @@ function gg_run_open_llama_3b_v2 {
|
|||||||
|
|
||||||
python3 ../convert.py ${path_models}
|
python3 ../convert.py ${path_models}
|
||||||
|
|
||||||
model_f16="${path_models}/ggml-model-f16.bin"
|
model_f16="${path_models}/ggml-model-f16.gguf"
|
||||||
model_q8_0="${path_models}/ggml-model-q8_0.bin"
|
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
||||||
model_q4_0="${path_models}/ggml-model-q4_0.bin"
|
model_q4_0="${path_models}/ggml-model-q4_0.gguf"
|
||||||
model_q4_1="${path_models}/ggml-model-q4_1.bin"
|
model_q4_1="${path_models}/ggml-model-q4_1.gguf"
|
||||||
model_q5_0="${path_models}/ggml-model-q5_0.bin"
|
model_q5_0="${path_models}/ggml-model-q5_0.gguf"
|
||||||
model_q5_1="${path_models}/ggml-model-q5_1.bin"
|
model_q5_1="${path_models}/ggml-model-q5_1.gguf"
|
||||||
model_q2_k="${path_models}/ggml-model-q2_k.bin"
|
model_q2_k="${path_models}/ggml-model-q2_k.gguf"
|
||||||
model_q3_k="${path_models}/ggml-model-q3_k.bin"
|
model_q3_k="${path_models}/ggml-model-q3_k.gguf"
|
||||||
model_q4_k="${path_models}/ggml-model-q4_k.bin"
|
model_q4_k="${path_models}/ggml-model-q4_k.gguf"
|
||||||
model_q5_k="${path_models}/ggml-model-q5_k.bin"
|
model_q5_k="${path_models}/ggml-model-q5_k.gguf"
|
||||||
model_q6_k="${path_models}/ggml-model-q6_k.bin"
|
model_q6_k="${path_models}/ggml-model-q6_k.gguf"
|
||||||
|
|
||||||
wiki_test_60="${path_wiki}/wiki.test-60.raw"
|
wiki_test_60="${path_wiki}/wiki.test-60.raw"
|
||||||
|
|
||||||
@ -285,17 +285,17 @@ function gg_run_open_llama_7b_v2 {
|
|||||||
|
|
||||||
python3 ../convert.py ${path_models}
|
python3 ../convert.py ${path_models}
|
||||||
|
|
||||||
model_f16="${path_models}/ggml-model-f16.bin"
|
model_f16="${path_models}/ggml-model-f16.gguf"
|
||||||
model_q8_0="${path_models}/ggml-model-q8_0.bin"
|
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
|
||||||
model_q4_0="${path_models}/ggml-model-q4_0.bin"
|
model_q4_0="${path_models}/ggml-model-q4_0.gguf"
|
||||||
model_q4_1="${path_models}/ggml-model-q4_1.bin"
|
model_q4_1="${path_models}/ggml-model-q4_1.gguf"
|
||||||
model_q5_0="${path_models}/ggml-model-q5_0.bin"
|
model_q5_0="${path_models}/ggml-model-q5_0.gguf"
|
||||||
model_q5_1="${path_models}/ggml-model-q5_1.bin"
|
model_q5_1="${path_models}/ggml-model-q5_1.gguf"
|
||||||
model_q2_k="${path_models}/ggml-model-q2_k.bin"
|
model_q2_k="${path_models}/ggml-model-q2_k.gguf"
|
||||||
model_q3_k="${path_models}/ggml-model-q3_k.bin"
|
model_q3_k="${path_models}/ggml-model-q3_k.gguf"
|
||||||
model_q4_k="${path_models}/ggml-model-q4_k.bin"
|
model_q4_k="${path_models}/ggml-model-q4_k.gguf"
|
||||||
model_q5_k="${path_models}/ggml-model-q5_k.bin"
|
model_q5_k="${path_models}/ggml-model-q5_k.gguf"
|
||||||
model_q6_k="${path_models}/ggml-model-q6_k.bin"
|
model_q6_k="${path_models}/ggml-model-q6_k.gguf"
|
||||||
|
|
||||||
wiki_test="${path_wiki}/wiki.test.raw"
|
wiki_test="${path_wiki}/wiki.test.raw"
|
||||||
|
|
||||||
|
29
convert.py
29
convert.py
@ -155,12 +155,7 @@ class Params:
|
|||||||
n_layer = config["num_hidden_layers"]
|
n_layer = config["num_hidden_layers"]
|
||||||
n_ff = config["intermediate_size"]
|
n_ff = config["intermediate_size"]
|
||||||
n_head = config["num_attention_heads"]
|
n_head = config["num_attention_heads"]
|
||||||
|
n_head_kv = config["num_key_value_heads"] if "num_key_value_heads" in config else n_head
|
||||||
if "num_key_value_heads" in config:
|
|
||||||
n_head_kv = config["num_key_value_heads"]
|
|
||||||
else:
|
|
||||||
n_head_kv = None
|
|
||||||
|
|
||||||
f_norm_eps = config["rms_norm_eps"]
|
f_norm_eps = config["rms_norm_eps"]
|
||||||
|
|
||||||
n_mult = Params.find_n_mult(n_ff, n_embd)
|
n_mult = Params.find_n_mult(n_ff, n_embd)
|
||||||
@ -719,7 +714,7 @@ class OutputFile:
|
|||||||
self.gguf.add_feed_forward_length (params.n_ff)
|
self.gguf.add_feed_forward_length (params.n_ff)
|
||||||
self.gguf.add_rope_dimension_count(params.n_embd // params.n_head)
|
self.gguf.add_rope_dimension_count(params.n_embd // params.n_head)
|
||||||
self.gguf.add_head_count (params.n_head)
|
self.gguf.add_head_count (params.n_head)
|
||||||
if params.n_head_kv is not None: self.gguf.add_head_count_kv(params.n_head_kv)
|
self.gguf.add_head_count_kv (params.n_head_kv)
|
||||||
self.gguf.add_layer_norm_rms_eps (params.f_norm_eps)
|
self.gguf.add_layer_norm_rms_eps (params.f_norm_eps)
|
||||||
|
|
||||||
def add_meta_vocab(self, vocab: Vocab) -> None:
|
def add_meta_vocab(self, vocab: Vocab) -> None:
|
||||||
@ -817,6 +812,23 @@ def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyM
|
|||||||
def convert_model_names(model: LazyModel, params: Params) -> LazyModel:
|
def convert_model_names(model: LazyModel, params: Params) -> LazyModel:
|
||||||
tmap = gguf.get_tensor_name_map(ARCH, params.n_layer)
|
tmap = gguf.get_tensor_name_map(ARCH, params.n_layer)
|
||||||
|
|
||||||
|
tmp = model
|
||||||
|
|
||||||
|
# HF models permut or pack some of the tensors, so we need to undo that
|
||||||
|
for i in itertools.count():
|
||||||
|
if f"model.layers.{i}.self_attn.q_proj.weight" in model:
|
||||||
|
print(f"Permuting layer {i}")
|
||||||
|
tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head, params.n_head_kv)
|
||||||
|
tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head, params.n_head_kv)
|
||||||
|
#tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"]
|
||||||
|
elif f"model.layers.{i}.self_attn.W_pack.weight" in model:
|
||||||
|
print(f"Unpacking and permuting layer {i}")
|
||||||
|
tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head, params.n_head_kv)
|
||||||
|
tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head, params.n_head_kv)
|
||||||
|
tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = part_lazy (model[f"model.layers.{i}.self_attn.W_pack.weight"], 2)
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
out: LazyModel = {}
|
out: LazyModel = {}
|
||||||
for name, lazy_tensor in model.items():
|
for name, lazy_tensor in model.items():
|
||||||
name_new = name
|
name_new = name
|
||||||
@ -830,8 +842,9 @@ def convert_model_names(model: LazyModel, params: Params) -> LazyModel:
|
|||||||
else:
|
else:
|
||||||
raise Exception(f"Unexpected tensor name: {name}")
|
raise Exception(f"Unexpected tensor name: {name}")
|
||||||
|
|
||||||
if gguf.should_skip_tensor(ARCH, params.n_layer, name_new):
|
if gguf.should_skip_tensor_TMP(ARCH, params.n_layer, name_new):
|
||||||
print(f"skipping tensor {name_new}")
|
print(f"skipping tensor {name_new}")
|
||||||
|
continue
|
||||||
else:
|
else:
|
||||||
print(f"{name:48s} -> {name_new:40s} | {lazy_tensor.data_type} | {lazy_tensor.shape}")
|
print(f"{name:48s} -> {name_new:40s} | {lazy_tensor.data_type} | {lazy_tensor.shape}")
|
||||||
out[name_new] = lazy_tensor
|
out[name_new] = lazy_tensor
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
## Verifying that the model is running on the GPU with cuBLAS
|
## Verifying that the model is running on the GPU with cuBLAS
|
||||||
Make sure you compiled llama with the correct env variables according to [this guide](../README.md#cublas), so that llama accepts the `-ngl N` (or `--n-gpu-layers N`) flag. When running llama, you may configure `N` to be very large, and llama will offload the maximum possible number of layers to the GPU, even if it's less than the number you configured. For example:
|
Make sure you compiled llama with the correct env variables according to [this guide](../README.md#cublas), so that llama accepts the `-ngl N` (or `--n-gpu-layers N`) flag. When running llama, you may configure `N` to be very large, and llama will offload the maximum possible number of layers to the GPU, even if it's less than the number you configured. For example:
|
||||||
```shell
|
```shell
|
||||||
./main -m "path/to/model.bin" -ngl 200000 -p "Please sir, may I have some "
|
./main -m "path/to/model.gguf" -ngl 200000 -p "Please sir, may I have some "
|
||||||
```
|
```
|
||||||
|
|
||||||
When running llama, before it starts the inference work, it will output diagnostic information that shows whether cuBLAS is offloading work to the GPU. Look for these lines:
|
When running llama, before it starts the inference work, it will output diagnostic information that shows whether cuBLAS is offloading work to the GPU. Look for these lines:
|
||||||
@ -25,9 +25,9 @@ GPU: A6000 (48GB VRAM)
|
|||||||
CPU: 7 physical cores
|
CPU: 7 physical cores
|
||||||
RAM: 32GB
|
RAM: 32GB
|
||||||
|
|
||||||
Model: `TheBloke_Wizard-Vicuna-30B-Uncensored-GGML/Wizard-Vicuna-30B-Uncensored.ggmlv3.q4_0.bin` (30B parameters, 4bit quantization, GGML)
|
Model: `TheBloke_Wizard-Vicuna-30B-Uncensored-GGML/Wizard-Vicuna-30B-Uncensored.q4_0.gguf` (30B parameters, 4bit quantization, GGML)
|
||||||
|
|
||||||
Run command: `./main -m "path/to/model.bin" -p "-p "An extremely detailed description of the 10 best ethnic dishes will follow, with recipes: " -n 1000 [additional benchmark flags]`
|
Run command: `./main -m "path/to/model.gguf" -p "An extremely detailed description of the 10 best ethnic dishes will follow, with recipes: " -n 1000 [additional benchmark flags]`
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
|
|
||||||
|
@ -262,6 +262,21 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
params.cfg_negative_prompt = argv[i];
|
params.cfg_negative_prompt = argv[i];
|
||||||
|
} else if (arg == "--cfg-negative-prompt-file") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
std::ifstream file(argv[i]);
|
||||||
|
if (!file) {
|
||||||
|
fprintf(stderr, "error: failed to open file '%s'\n", argv[i]);
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
std::copy(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>(), back_inserter(params.cfg_negative_prompt));
|
||||||
|
if (params.cfg_negative_prompt.back() == '\n') {
|
||||||
|
params.cfg_negative_prompt.pop_back();
|
||||||
|
}
|
||||||
} else if (arg == "--cfg-scale") {
|
} else if (arg == "--cfg-scale") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
@ -555,6 +570,8 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
|||||||
fprintf(stdout, " --grammar-file FNAME file to read grammar from\n");
|
fprintf(stdout, " --grammar-file FNAME file to read grammar from\n");
|
||||||
fprintf(stdout, " --cfg-negative-prompt PROMPT\n");
|
fprintf(stdout, " --cfg-negative-prompt PROMPT\n");
|
||||||
fprintf(stdout, " negative prompt to use for guidance. (default: empty)\n");
|
fprintf(stdout, " negative prompt to use for guidance. (default: empty)\n");
|
||||||
|
fprintf(stdout, " --cfg-negative-prompt-file FNAME\n");
|
||||||
|
fprintf(stdout, " negative prompt file to use for guidance. (default: empty)\n");
|
||||||
fprintf(stdout, " --cfg-scale N strength of guidance (default: %f, 1.0 = disable)\n", params.cfg_scale);
|
fprintf(stdout, " --cfg-scale N strength of guidance (default: %f, 1.0 = disable)\n", params.cfg_scale);
|
||||||
fprintf(stdout, " --rope-scale N RoPE context linear scaling factor, inverse of --rope-freq-scale (default: %g)\n", 1.0f/params.rope_freq_scale);
|
fprintf(stdout, " --rope-scale N RoPE context linear scaling factor, inverse of --rope-freq-scale (default: %g)\n", 1.0f/params.rope_freq_scale);
|
||||||
fprintf(stdout, " --rope-freq-base N RoPE base frequency, used by NTK-aware scaling (default: %.1f)\n", params.rope_freq_base);
|
fprintf(stdout, " --rope-freq-base N RoPE base frequency, used by NTK-aware scaling (default: %.1f)\n", params.rope_freq_base);
|
||||||
|
@ -52,7 +52,7 @@ struct gpt_params {
|
|||||||
std::string cfg_negative_prompt; // string to help guidance
|
std::string cfg_negative_prompt; // string to help guidance
|
||||||
float cfg_scale = 1.f; // How strong is guidance
|
float cfg_scale = 1.f; // How strong is guidance
|
||||||
|
|
||||||
std::string model = "models/7B/ggml-model.bin"; // model path
|
std::string model = "models/7B/ggml-model-f16.gguf"; // model path
|
||||||
std::string model_alias = "unknown"; // model alias
|
std::string model_alias = "unknown"; // model alias
|
||||||
std::string prompt = "";
|
std::string prompt = "";
|
||||||
std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
|
std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
//
|
//
|
||||||
// - First, export a LLaMA graph:
|
// - First, export a LLaMA graph:
|
||||||
//
|
//
|
||||||
// $ ./bin/main -m ../models/7B/ggml-model-q4_0.bin --export
|
// $ ./bin/main -m ../models/7B/ggml-model-q4_0.gguf --export
|
||||||
//
|
//
|
||||||
// - Run this tool to evaluate the exported graph:
|
// - Run this tool to evaluate the exported graph:
|
||||||
//
|
//
|
||||||
|
@ -25,7 +25,7 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct quantize_stats_params {
|
struct quantize_stats_params {
|
||||||
std::string model = "models/7B/ggml-model-f16.bin";
|
std::string model = "models/7B/ggml-model-f16.gguf";
|
||||||
bool verbose = false;
|
bool verbose = false;
|
||||||
bool per_layer_stats = false;
|
bool per_layer_stats = false;
|
||||||
bool print_histogram = false;
|
bool print_histogram = false;
|
||||||
|
@ -5,7 +5,7 @@ This example demonstrates a simple HTTP API server and a simple web front end to
|
|||||||
Command line options:
|
Command line options:
|
||||||
|
|
||||||
- `--threads N`, `-t N`: Set the number of threads to use during computation.
|
- `--threads N`, `-t N`: Set the number of threads to use during computation.
|
||||||
- `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.bin`).
|
- `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.gguf`).
|
||||||
- `-m ALIAS`, `--alias ALIAS`: Set an alias for the model. The alias will be returned in API responses.
|
- `-m ALIAS`, `--alias ALIAS`: Set an alias for the model. The alias will be returned in API responses.
|
||||||
- `-c N`, `--ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference. The size may differ in other models, for example, baichuan models were build with a context of 4096.
|
- `-c N`, `--ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference. The size may differ in other models, for example, baichuan models were build with a context of 4096.
|
||||||
- `-ngl N`, `--n-gpu-layers N`: When compiled with appropriate support (currently CLBlast or cuBLAS), this option allows offloading some layers to the GPU for computation. Generally results in increased performance.
|
- `-ngl N`, `--n-gpu-layers N`: When compiled with appropriate support (currently CLBlast or cuBLAS), this option allows offloading some layers to the GPU for computation. Generally results in increased performance.
|
||||||
@ -48,14 +48,12 @@ To get started right away, run the following command, making sure to use the cor
|
|||||||
### Unix-based systems (Linux, macOS, etc.):
|
### Unix-based systems (Linux, macOS, etc.):
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./server -m models/7B/ggml-model.bin -c 2048
|
./server -m models/7B/ggml-model.gguf -c 2048
|
||||||
```
|
```
|
||||||
|
|
||||||
### Windows:
|
### Windows:
|
||||||
|
|
||||||
```powershell
|
```powershell
|
||||||
server.exe -m models\7B\ggml-model.bin -c 2048
|
|
||||||
```
|
|
||||||
|
|
||||||
The above command will start a server that by default listens on `127.0.0.1:8080`.
|
The above command will start a server that by default listens on `127.0.0.1:8080`.
|
||||||
You can consume the endpoints with Postman or NodeJS with axios library. You can visit the web front end at the same url.
|
You can consume the endpoints with Postman or NodeJS with axios library. You can visit the web front end at the same url.
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
#include "index.html.hpp"
|
#include "index.html.hpp"
|
||||||
#include "index.js.hpp"
|
#include "index.js.hpp"
|
||||||
#include "completion.js.hpp"
|
#include "completion.js.hpp"
|
||||||
|
#include "json-schema-to-grammar.mjs.hpp"
|
||||||
|
|
||||||
#ifndef SERVER_VERBOSE
|
#ifndef SERVER_VERBOSE
|
||||||
#define SERVER_VERBOSE 1
|
#define SERVER_VERBOSE 1
|
||||||
@ -1199,6 +1200,12 @@ int main(int argc, char **argv)
|
|||||||
res.set_content(reinterpret_cast<const char*>(&completion_js), completion_js_len, "application/javascript");
|
res.set_content(reinterpret_cast<const char*>(&completion_js), completion_js_len, "application/javascript");
|
||||||
return false; });
|
return false; });
|
||||||
|
|
||||||
|
// this is only called if no index.html is found in the public --path
|
||||||
|
svr.Get("/json-schema-to-grammar.mjs", [](const Request &, Response &res)
|
||||||
|
{
|
||||||
|
res.set_content(reinterpret_cast<const char*>(&json_schema_to_grammar_mjs), json_schema_to_grammar_mjs_len, "application/javascript");
|
||||||
|
return false; });
|
||||||
|
|
||||||
svr.Post("/completion", [&llama](const Request &req, Response &res)
|
svr.Post("/completion", [&llama](const Request &req, Response &res)
|
||||||
{
|
{
|
||||||
auto lock = llama.lock();
|
auto lock = llama.lock();
|
||||||
|
@ -14,8 +14,6 @@
|
|||||||
with pkgs.darwin.apple_sdk_11_0.frameworks; [
|
with pkgs.darwin.apple_sdk_11_0.frameworks; [
|
||||||
Accelerate
|
Accelerate
|
||||||
MetalKit
|
MetalKit
|
||||||
MetalPerformanceShaders
|
|
||||||
MetalPerformanceShadersGraph
|
|
||||||
]
|
]
|
||||||
else if isAarch32 && isDarwin then
|
else if isAarch32 && isDarwin then
|
||||||
with pkgs.darwin.apple_sdk.frameworks; [
|
with pkgs.darwin.apple_sdk.frameworks; [
|
||||||
|
36
ggml-alloc.c
36
ggml-alloc.c
@ -67,6 +67,8 @@ struct ggml_allocr {
|
|||||||
struct hash_node hash_table[GGML_GRAPH_HASHTABLE_SIZE];
|
struct hash_node hash_table[GGML_GRAPH_HASHTABLE_SIZE];
|
||||||
size_t max_size;
|
size_t max_size;
|
||||||
bool measure;
|
bool measure;
|
||||||
|
int parse_seq[GGML_MAX_NODES];
|
||||||
|
bool has_parse_seq;
|
||||||
|
|
||||||
#ifdef GGML_ALLOCATOR_DEBUG
|
#ifdef GGML_ALLOCATOR_DEBUG
|
||||||
struct ggml_tensor * allocated_tensors[1024];
|
struct ggml_tensor * allocated_tensors[1024];
|
||||||
@ -111,10 +113,10 @@ void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor)
|
|||||||
|
|
||||||
size_t max_avail = 0;
|
size_t max_avail = 0;
|
||||||
|
|
||||||
// find the best fitting free block
|
// find the best fitting free block besides the last block
|
||||||
int best_fit_block = -1;
|
int best_fit_block = -1;
|
||||||
size_t best_fit_size = SIZE_MAX;
|
size_t best_fit_size = SIZE_MAX;
|
||||||
for (int i = 0; i < alloc->n_free_blocks; i++) {
|
for (int i = 0; i < alloc->n_free_blocks - 1; i++) {
|
||||||
struct free_block * block = &alloc->free_blocks[i];
|
struct free_block * block = &alloc->free_blocks[i];
|
||||||
max_avail = MAX(max_avail, block->size);
|
max_avail = MAX(max_avail, block->size);
|
||||||
if (block->size >= size && block->size <= best_fit_size) {
|
if (block->size >= size && block->size <= best_fit_size) {
|
||||||
@ -126,11 +128,18 @@ void ggml_allocr_alloc(struct ggml_allocr * alloc, struct ggml_tensor * tensor)
|
|||||||
AT_PRINTF("block %d\n", best_fit_block);
|
AT_PRINTF("block %d\n", best_fit_block);
|
||||||
|
|
||||||
if (best_fit_block == -1) {
|
if (best_fit_block == -1) {
|
||||||
|
// the last block is our last resort
|
||||||
|
struct free_block * block = &alloc->free_blocks[alloc->n_free_blocks - 1];
|
||||||
|
if (block->size >= size) {
|
||||||
|
best_fit_block = alloc->n_free_blocks - 1;
|
||||||
|
max_avail = MAX(max_avail, block->size);
|
||||||
|
} else {
|
||||||
fprintf(stderr, "%s: not enough space in the buffer (needed %zu, largest block available %zu)\n",
|
fprintf(stderr, "%s: not enough space in the buffer (needed %zu, largest block available %zu)\n",
|
||||||
__func__, size, max_avail);
|
__func__, size, max_avail);
|
||||||
GGML_ASSERT(!"not enough space in the buffer");
|
GGML_ASSERT(!"not enough space in the buffer");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
struct free_block * block = &alloc->free_blocks[best_fit_block];
|
struct free_block * block = &alloc->free_blocks[best_fit_block];
|
||||||
void * addr = block->addr;
|
void * addr = block->addr;
|
||||||
block->addr = (char*)block->addr + size;
|
block->addr = (char*)block->addr + size;
|
||||||
@ -229,6 +238,17 @@ static void ggml_allocator_free_tensor(struct ggml_allocr * alloc, struct ggml_t
|
|||||||
alloc->n_free_blocks++;
|
alloc->n_free_blocks++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ggml_allocr_set_parse_seq(struct ggml_allocr * alloc, int * list, int n) {
|
||||||
|
int pos = 0;
|
||||||
|
for (int i = 0; i < n; i++) {
|
||||||
|
if (list[i] != -1) {
|
||||||
|
alloc->parse_seq[pos] = list[i];
|
||||||
|
pos++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
alloc->has_parse_seq = true;
|
||||||
|
}
|
||||||
|
|
||||||
void ggml_allocr_reset(struct ggml_allocr * alloc) {
|
void ggml_allocr_reset(struct ggml_allocr * alloc) {
|
||||||
alloc->n_free_blocks = 1;
|
alloc->n_free_blocks = 1;
|
||||||
size_t align_offset = aligned_offset(alloc->data, 0, alloc->alignment);
|
size_t align_offset = aligned_offset(alloc->data, 0, alloc->alignment);
|
||||||
@ -248,6 +268,8 @@ struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment)
|
|||||||
/*.hash_table = */ {{0}},
|
/*.hash_table = */ {{0}},
|
||||||
/*.max_size = */ 0,
|
/*.max_size = */ 0,
|
||||||
/*.measure = */ false,
|
/*.measure = */ false,
|
||||||
|
/*.parse_seq = */ {0},
|
||||||
|
/*.has_parse_seq = */ false,
|
||||||
#ifdef GGML_ALLOCATOR_DEBUG
|
#ifdef GGML_ALLOCATOR_DEBUG
|
||||||
/*.allocated_tensors = */ = {0},
|
/*.allocated_tensors = */ = {0},
|
||||||
#endif
|
#endif
|
||||||
@ -275,6 +297,8 @@ struct ggml_allocr * ggml_allocr_new_measure(size_t alignment) {
|
|||||||
/*.hash_table = */ {{0}},
|
/*.hash_table = */ {{0}},
|
||||||
/*.max_size = */ 0,
|
/*.max_size = */ 0,
|
||||||
/*.measure = */ true,
|
/*.measure = */ true,
|
||||||
|
/*.parse_seq = */ {0},
|
||||||
|
/*.has_parse_seq = */ false,
|
||||||
#ifdef GGML_ALLOCATOR_DEBUG
|
#ifdef GGML_ALLOCATOR_DEBUG
|
||||||
/*.allocated_tensors = */ = {0},
|
/*.allocated_tensors = */ = {0},
|
||||||
#endif
|
#endif
|
||||||
@ -473,7 +497,13 @@ static size_t ggml_allocator_alloc_graph_tensors_n(
|
|||||||
allocate_node(alloc, input);
|
allocate_node(alloc, input);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (int i = 0; i < gf->n_nodes; i++) {
|
for (int ind = 0; ind < gf->n_nodes; ind++) {
|
||||||
|
int i;
|
||||||
|
if (alloc->has_parse_seq) {
|
||||||
|
i = alloc->parse_seq[ind];
|
||||||
|
} else {
|
||||||
|
i = ind;
|
||||||
|
}
|
||||||
struct ggml_tensor * node = gf->nodes[i];
|
struct ggml_tensor * node = gf->nodes[i];
|
||||||
|
|
||||||
// allocate parents (leafs)
|
// allocate parents (leafs)
|
||||||
|
@ -10,6 +10,10 @@ extern "C" {
|
|||||||
GGML_API struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment);
|
GGML_API struct ggml_allocr * ggml_allocr_new(void * data, size_t size, size_t alignment);
|
||||||
GGML_API struct ggml_allocr * ggml_allocr_new_measure(size_t alignment);
|
GGML_API struct ggml_allocr * ggml_allocr_new_measure(size_t alignment);
|
||||||
|
|
||||||
|
// tell the allocator to parse nodes following the order described in the list
|
||||||
|
// you should call this if your graph are optimized to execute out-of-order
|
||||||
|
GGML_API void ggml_allocr_set_parse_seq(struct ggml_allocr * alloc, int * list, int n);
|
||||||
|
|
||||||
GGML_API void ggml_allocr_free(struct ggml_allocr * alloc);
|
GGML_API void ggml_allocr_free(struct ggml_allocr * alloc);
|
||||||
GGML_API bool ggml_allocr_is_measure(struct ggml_allocr * alloc);
|
GGML_API bool ggml_allocr_is_measure(struct ggml_allocr * alloc);
|
||||||
GGML_API void ggml_allocr_reset(struct ggml_allocr * alloc);
|
GGML_API void ggml_allocr_reset(struct ggml_allocr * alloc);
|
||||||
|
@ -66,10 +66,13 @@ void ggml_metal_get_tensor(struct ggml_metal_context * ctx, struct ggml_tensor *
|
|||||||
|
|
||||||
// try to find operations that can be run concurrently in the graph
|
// try to find operations that can be run concurrently in the graph
|
||||||
// you should run it again if the topology of your graph changes
|
// you should run it again if the topology of your graph changes
|
||||||
void ggml_metal_graph_find_concurrency(struct ggml_metal_context * ctx, struct ggml_cgraph * gf);
|
void ggml_metal_graph_find_concurrency(struct ggml_metal_context * ctx, struct ggml_cgraph * gf, bool check_mem);
|
||||||
|
|
||||||
// if the graph has been optimized for concurrently dispatch
|
// if the graph has been optimized for concurrently dispatch, return length of the concur_list if optimized
|
||||||
bool ggml_metal_if_optimized(struct ggml_metal_context * ctx);
|
int ggml_metal_if_optimized(struct ggml_metal_context * ctx);
|
||||||
|
|
||||||
|
// output the concur_list for ggml_alloc
|
||||||
|
int * ggml_metal_get_concur_list(struct ggml_metal_context * ctx);
|
||||||
|
|
||||||
// same as ggml_graph_compute but uses Metal
|
// same as ggml_graph_compute but uses Metal
|
||||||
// creates gf->n_threads command buffers in parallel
|
// creates gf->n_threads command buffers in parallel
|
||||||
|
191
ggml-metal.m
191
ggml-metal.m
@ -5,7 +5,6 @@
|
|||||||
#import <Foundation/Foundation.h>
|
#import <Foundation/Foundation.h>
|
||||||
|
|
||||||
#import <Metal/Metal.h>
|
#import <Metal/Metal.h>
|
||||||
#import <MetalPerformanceShaders/MetalPerformanceShaders.h>
|
|
||||||
|
|
||||||
#undef MIN
|
#undef MIN
|
||||||
#undef MAX
|
#undef MAX
|
||||||
@ -79,6 +78,14 @@ struct ggml_metal_context {
|
|||||||
GGML_METAL_DECL_KERNEL(mul_mat_q4_K_f32);
|
GGML_METAL_DECL_KERNEL(mul_mat_q4_K_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mat_q5_K_f32);
|
GGML_METAL_DECL_KERNEL(mul_mat_q5_K_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mat_q6_K_f32);
|
GGML_METAL_DECL_KERNEL(mul_mat_q6_K_f32);
|
||||||
|
GGML_METAL_DECL_KERNEL(mul_mm_f16_f32);
|
||||||
|
GGML_METAL_DECL_KERNEL(mul_mm_q4_0_f32);
|
||||||
|
GGML_METAL_DECL_KERNEL(mul_mm_q4_1_f32);
|
||||||
|
GGML_METAL_DECL_KERNEL(mul_mm_q2_K_f32);
|
||||||
|
GGML_METAL_DECL_KERNEL(mul_mm_q3_K_f32);
|
||||||
|
GGML_METAL_DECL_KERNEL(mul_mm_q4_K_f32);
|
||||||
|
GGML_METAL_DECL_KERNEL(mul_mm_q5_K_f32);
|
||||||
|
GGML_METAL_DECL_KERNEL(mul_mm_q6_K_f32);
|
||||||
GGML_METAL_DECL_KERNEL(rope);
|
GGML_METAL_DECL_KERNEL(rope);
|
||||||
GGML_METAL_DECL_KERNEL(alibi_f32);
|
GGML_METAL_DECL_KERNEL(alibi_f32);
|
||||||
GGML_METAL_DECL_KERNEL(cpy_f32_f16);
|
GGML_METAL_DECL_KERNEL(cpy_f32_f16);
|
||||||
@ -110,13 +117,6 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
|||||||
ctx->n_buffers = 0;
|
ctx->n_buffers = 0;
|
||||||
ctx->concur_list_len = 0;
|
ctx->concur_list_len = 0;
|
||||||
|
|
||||||
// determine if we can use MPS
|
|
||||||
if (MPSSupportsMTLDevice(ctx->device)) {
|
|
||||||
fprintf(stderr, "%s: using MPS\n", __func__);
|
|
||||||
} else {
|
|
||||||
fprintf(stderr, "%s: not using MPS\n", __func__);
|
|
||||||
GGML_ASSERT(false && "MPS not supported");
|
|
||||||
}
|
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
// compile from source string and show compile log
|
// compile from source string and show compile log
|
||||||
@ -163,10 +163,15 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
|||||||
|
|
||||||
// load kernels
|
// load kernels
|
||||||
{
|
{
|
||||||
|
NSError * error = nil;
|
||||||
#define GGML_METAL_ADD_KERNEL(name) \
|
#define GGML_METAL_ADD_KERNEL(name) \
|
||||||
ctx->function_##name = [ctx->library newFunctionWithName:@"kernel_"#name]; \
|
ctx->function_##name = [ctx->library newFunctionWithName:@"kernel_"#name]; \
|
||||||
ctx->pipeline_##name = [ctx->device newComputePipelineStateWithFunction:ctx->function_##name error:nil]; \
|
ctx->pipeline_##name = [ctx->device newComputePipelineStateWithFunction:ctx->function_##name error:&error]; \
|
||||||
fprintf(stderr, "%s: loaded %-32s %16p\n", __func__, "kernel_"#name, (void *) ctx->pipeline_##name);
|
fprintf(stderr, "%s: loaded %-32s %16p\n", __func__, "kernel_"#name, (void *) ctx->pipeline_##name); \
|
||||||
|
if (error) { \
|
||||||
|
fprintf(stderr, "%s: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \
|
||||||
|
return NULL; \
|
||||||
|
}
|
||||||
|
|
||||||
GGML_METAL_ADD_KERNEL(add);
|
GGML_METAL_ADD_KERNEL(add);
|
||||||
GGML_METAL_ADD_KERNEL(add_row);
|
GGML_METAL_ADD_KERNEL(add_row);
|
||||||
@ -196,6 +201,14 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
|||||||
GGML_METAL_ADD_KERNEL(mul_mat_q4_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mat_q4_K_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mat_q5_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mat_q5_K_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mat_q6_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mat_q6_K_f32);
|
||||||
|
GGML_METAL_ADD_KERNEL(mul_mm_f16_f32);
|
||||||
|
GGML_METAL_ADD_KERNEL(mul_mm_q4_0_f32);
|
||||||
|
GGML_METAL_ADD_KERNEL(mul_mm_q4_1_f32);
|
||||||
|
GGML_METAL_ADD_KERNEL(mul_mm_q2_K_f32);
|
||||||
|
GGML_METAL_ADD_KERNEL(mul_mm_q3_K_f32);
|
||||||
|
GGML_METAL_ADD_KERNEL(mul_mm_q4_K_f32);
|
||||||
|
GGML_METAL_ADD_KERNEL(mul_mm_q5_K_f32);
|
||||||
|
GGML_METAL_ADD_KERNEL(mul_mm_q6_K_f32);
|
||||||
GGML_METAL_ADD_KERNEL(rope);
|
GGML_METAL_ADD_KERNEL(rope);
|
||||||
GGML_METAL_ADD_KERNEL(alibi_f32);
|
GGML_METAL_ADD_KERNEL(alibi_f32);
|
||||||
GGML_METAL_ADD_KERNEL(cpy_f32_f16);
|
GGML_METAL_ADD_KERNEL(cpy_f32_f16);
|
||||||
@ -243,11 +256,12 @@ void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb) {
|
|||||||
ctx->n_cb = n_cb;
|
ctx->n_cb = n_cb;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ggml_metal_if_optimized(struct ggml_metal_context * ctx) {
|
int ggml_metal_if_optimized(struct ggml_metal_context * ctx) {
|
||||||
if (ctx->concur_list_len) {
|
return ctx->concur_list_len;
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
return false;
|
|
||||||
|
int * ggml_metal_get_concur_list(struct ggml_metal_context * ctx) {
|
||||||
|
return ctx->concur_list;
|
||||||
}
|
}
|
||||||
|
|
||||||
// finds the Metal buffer that contains the tensor data on the GPU device
|
// finds the Metal buffer that contains the tensor data on the GPU device
|
||||||
@ -390,7 +404,7 @@ void ggml_metal_get_tensor(
|
|||||||
|
|
||||||
void ggml_metal_graph_find_concurrency(
|
void ggml_metal_graph_find_concurrency(
|
||||||
struct ggml_metal_context * ctx,
|
struct ggml_metal_context * ctx,
|
||||||
struct ggml_cgraph * gf) {
|
struct ggml_cgraph * gf, bool check_mem) {
|
||||||
int search_depth = gf->n_nodes; //we only find concurrency in this range to avoid wasting too much time
|
int search_depth = gf->n_nodes; //we only find concurrency in this range to avoid wasting too much time
|
||||||
int nodes_unused[GGML_MAX_CONCUR];
|
int nodes_unused[GGML_MAX_CONCUR];
|
||||||
|
|
||||||
@ -437,7 +451,7 @@ void ggml_metal_graph_find_concurrency(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (exe_flag) {
|
if (exe_flag && check_mem) {
|
||||||
// check if nodes[i]'s data will be overwritten by a node before nodes[i].
|
// check if nodes[i]'s data will be overwritten by a node before nodes[i].
|
||||||
// if node[5] and node[3] write to the same memory region, then we can't issue node[5] before node[3]
|
// if node[5] and node[3] write to the same memory region, then we can't issue node[5] before node[3]
|
||||||
int64_t data_start = (int64_t) gf->nodes[i]->data;
|
int64_t data_start = (int64_t) gf->nodes[i]->data;
|
||||||
@ -521,7 +535,7 @@ void ggml_metal_graph_compute(
|
|||||||
|
|
||||||
id<MTLCommandBuffer> command_buffer = command_buffers[cb_idx];
|
id<MTLCommandBuffer> command_buffer = command_buffers[cb_idx];
|
||||||
|
|
||||||
id<MTLComputeCommandEncoder> encoder = nil;
|
id<MTLComputeCommandEncoder> encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
|
||||||
|
|
||||||
const int node_start = (cb_idx + 0) * n_nodes_per_cb;
|
const int node_start = (cb_idx + 0) * n_nodes_per_cb;
|
||||||
const int node_end = (cb_idx == n_cb - 1) ? n_nodes : (cb_idx + 1) * n_nodes_per_cb;
|
const int node_end = (cb_idx == n_cb - 1) ? n_nodes : (cb_idx + 1) * n_nodes_per_cb;
|
||||||
@ -530,10 +544,6 @@ void ggml_metal_graph_compute(
|
|||||||
const int i = has_concur ? ctx->concur_list[ind] : ind;
|
const int i = has_concur ? ctx->concur_list[ind] : ind;
|
||||||
|
|
||||||
if (i == -1) {
|
if (i == -1) {
|
||||||
if (encoder == nil) {
|
|
||||||
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
[encoder memoryBarrierWithScope:MTLBarrierScopeBuffers];
|
[encoder memoryBarrierWithScope:MTLBarrierScopeBuffers];
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -607,10 +617,6 @@ void ggml_metal_graph_compute(
|
|||||||
} break;
|
} break;
|
||||||
case GGML_OP_ADD:
|
case GGML_OP_ADD:
|
||||||
{
|
{
|
||||||
if (encoder == nil) {
|
|
||||||
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ggml_nelements(src1) == ne10) {
|
if (ggml_nelements(src1) == ne10) {
|
||||||
// src1 is a row
|
// src1 is a row
|
||||||
[encoder setComputePipelineState:ctx->pipeline_add_row];
|
[encoder setComputePipelineState:ctx->pipeline_add_row];
|
||||||
@ -628,10 +634,6 @@ void ggml_metal_graph_compute(
|
|||||||
} break;
|
} break;
|
||||||
case GGML_OP_MUL:
|
case GGML_OP_MUL:
|
||||||
{
|
{
|
||||||
if (encoder == nil) {
|
|
||||||
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ggml_nelements(src1) == ne10) {
|
if (ggml_nelements(src1) == ne10) {
|
||||||
// src1 is a row
|
// src1 is a row
|
||||||
[encoder setComputePipelineState:ctx->pipeline_mul_row];
|
[encoder setComputePipelineState:ctx->pipeline_mul_row];
|
||||||
@ -649,10 +651,6 @@ void ggml_metal_graph_compute(
|
|||||||
} break;
|
} break;
|
||||||
case GGML_OP_SCALE:
|
case GGML_OP_SCALE:
|
||||||
{
|
{
|
||||||
if (encoder == nil) {
|
|
||||||
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
|
|
||||||
}
|
|
||||||
|
|
||||||
const float scale = *(const float *) src1->data;
|
const float scale = *(const float *) src1->data;
|
||||||
|
|
||||||
[encoder setComputePipelineState:ctx->pipeline_scale];
|
[encoder setComputePipelineState:ctx->pipeline_scale];
|
||||||
@ -668,10 +666,6 @@ void ggml_metal_graph_compute(
|
|||||||
switch (ggml_get_unary_op(gf->nodes[i])) {
|
switch (ggml_get_unary_op(gf->nodes[i])) {
|
||||||
case GGML_UNARY_OP_SILU:
|
case GGML_UNARY_OP_SILU:
|
||||||
{
|
{
|
||||||
if (encoder == nil) {
|
|
||||||
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
|
|
||||||
}
|
|
||||||
|
|
||||||
[encoder setComputePipelineState:ctx->pipeline_silu];
|
[encoder setComputePipelineState:ctx->pipeline_silu];
|
||||||
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
||||||
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
||||||
@ -682,10 +676,6 @@ void ggml_metal_graph_compute(
|
|||||||
} break;
|
} break;
|
||||||
case GGML_UNARY_OP_RELU:
|
case GGML_UNARY_OP_RELU:
|
||||||
{
|
{
|
||||||
if (encoder == nil) {
|
|
||||||
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
|
|
||||||
}
|
|
||||||
|
|
||||||
[encoder setComputePipelineState:ctx->pipeline_relu];
|
[encoder setComputePipelineState:ctx->pipeline_relu];
|
||||||
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
||||||
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
||||||
@ -696,10 +686,6 @@ void ggml_metal_graph_compute(
|
|||||||
} break;
|
} break;
|
||||||
case GGML_UNARY_OP_GELU:
|
case GGML_UNARY_OP_GELU:
|
||||||
{
|
{
|
||||||
if (encoder == nil) {
|
|
||||||
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
|
|
||||||
}
|
|
||||||
|
|
||||||
[encoder setComputePipelineState:ctx->pipeline_gelu];
|
[encoder setComputePipelineState:ctx->pipeline_gelu];
|
||||||
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
||||||
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
||||||
@ -716,10 +702,6 @@ void ggml_metal_graph_compute(
|
|||||||
} break;
|
} break;
|
||||||
case GGML_OP_SOFT_MAX:
|
case GGML_OP_SOFT_MAX:
|
||||||
{
|
{
|
||||||
if (encoder == nil) {
|
|
||||||
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
|
|
||||||
}
|
|
||||||
|
|
||||||
const int nth = 32;
|
const int nth = 32;
|
||||||
|
|
||||||
[encoder setComputePipelineState:ctx->pipeline_soft_max];
|
[encoder setComputePipelineState:ctx->pipeline_soft_max];
|
||||||
@ -734,10 +716,6 @@ void ggml_metal_graph_compute(
|
|||||||
} break;
|
} break;
|
||||||
case GGML_OP_DIAG_MASK_INF:
|
case GGML_OP_DIAG_MASK_INF:
|
||||||
{
|
{
|
||||||
if (encoder == nil) {
|
|
||||||
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
|
|
||||||
}
|
|
||||||
|
|
||||||
const int n_past = ((int32_t *)(dst->op_params))[0];
|
const int n_past = ((int32_t *)(dst->op_params))[0];
|
||||||
|
|
||||||
[encoder setComputePipelineState:ctx->pipeline_diag_mask_inf];
|
[encoder setComputePipelineState:ctx->pipeline_diag_mask_inf];
|
||||||
@ -755,53 +733,43 @@ void ggml_metal_graph_compute(
|
|||||||
|
|
||||||
GGML_ASSERT(ne00 == ne10);
|
GGML_ASSERT(ne00 == ne10);
|
||||||
// GGML_ASSERT(ne02 == ne12); // Should be checked on individual data types until broadcast is implemented everywhere
|
// GGML_ASSERT(ne02 == ne12); // Should be checked on individual data types until broadcast is implemented everywhere
|
||||||
|
uint gqa = ne12/ne02;
|
||||||
GGML_ASSERT(ne03 == ne13);
|
GGML_ASSERT(ne03 == ne13);
|
||||||
|
|
||||||
|
// for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
|
||||||
|
// AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
|
||||||
if (ggml_is_contiguous(src0) &&
|
if (ggml_is_contiguous(src0) &&
|
||||||
ggml_is_contiguous(src1) &&
|
ggml_is_contiguous(src1) &&
|
||||||
(src0t == GGML_TYPE_F32 || src0t == GGML_TYPE_F16) && ne11 > 1) {
|
src1t == GGML_TYPE_F32 &&
|
||||||
|
[ctx->device supportsFamily:MTLGPUFamilyApple7] &&
|
||||||
if (encoder != nil) {
|
ne00%32 == 0 &&
|
||||||
[encoder endEncoding];
|
ne11 > 1) {
|
||||||
encoder = nil;
|
switch (src0->type) {
|
||||||
|
case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_mul_mm_f16_f32]; break;
|
||||||
|
case GGML_TYPE_Q4_0: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_0_f32]; break;
|
||||||
|
case GGML_TYPE_Q4_1: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_1_f32]; break;
|
||||||
|
case GGML_TYPE_Q2_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q2_K_f32]; break;
|
||||||
|
case GGML_TYPE_Q3_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q3_K_f32]; break;
|
||||||
|
case GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_K_f32]; break;
|
||||||
|
case GGML_TYPE_Q5_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q5_K_f32]; break;
|
||||||
|
case GGML_TYPE_Q6_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q6_K_f32]; break;
|
||||||
|
default: GGML_ASSERT(false && "MUL MAT-MAT not implemented");
|
||||||
}
|
}
|
||||||
|
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
||||||
MPSDataType src0dt = src0t == GGML_TYPE_F32 ? MPSDataTypeFloat32 : MPSDataTypeFloat16;
|
[encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
|
||||||
MPSDataType src1dt = src1t == GGML_TYPE_F32 ? MPSDataTypeFloat32 : MPSDataTypeFloat16;
|
[encoder setBuffer:id_dst offset:offs_dst atIndex:2];
|
||||||
|
[encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
|
||||||
// for F32 x F32 we use MPS
|
[encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
|
||||||
MPSMatrixDescriptor * desc0 = [MPSMatrixDescriptor
|
[encoder setBytes:&nb01 length:sizeof(nb01) atIndex:5];
|
||||||
matrixDescriptorWithRows:ne01 columns:ne00 rowBytes:src0->nb[1] dataType:src0dt];
|
[encoder setBytes:&nb02 length:sizeof(nb02) atIndex:6];
|
||||||
|
[encoder setBytes:&ne12 length:sizeof(ne12) atIndex:7];
|
||||||
MPSMatrixDescriptor * desc1 = [MPSMatrixDescriptor
|
[encoder setBytes:&ne0 length:sizeof(ne0) atIndex:8];
|
||||||
matrixDescriptorWithRows:ne11 columns:ne10 rowBytes:src1->nb[1] dataType:src1dt];
|
[encoder setBytes:&ne1 length:sizeof(ne1) atIndex:9];
|
||||||
|
[encoder setBytes:&gqa length:sizeof(gqa) atIndex:10];
|
||||||
MPSMatrixDescriptor * desc = [MPSMatrixDescriptor
|
[encoder setThreadgroupMemoryLength:8192 atIndex:0];
|
||||||
matrixDescriptorWithRows:ne1 columns:ne0 rowBytes:dst->nb[1] dataType:MPSDataTypeFloat32];
|
[encoder dispatchThreadgroups:MTLSizeMake( (ne11+31)/32, (ne01+63) / 64, ne12) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
|
||||||
|
|
||||||
MPSMatrixMultiplication * mul = [[MPSMatrixMultiplication alloc]
|
|
||||||
initWithDevice:ctx->device transposeLeft:false transposeRight:true
|
|
||||||
resultRows:ne11 resultColumns:ne01 interiorColumns:ne00 alpha:1.0 beta:0.0];
|
|
||||||
|
|
||||||
// we need to do ne12 multiplications
|
|
||||||
// TODO: is there a way to do this in parallel - currently very slow ..
|
|
||||||
// TODO: might be possible to offload part of the computation to ANE using Accelerate's CBLAS
|
|
||||||
for (int64_t i02 = 0; i02 < ne12; ++i02) {
|
|
||||||
size_t offs_src0_cur = offs_src0 + i02/(ne12/ne02)*nb02; // gqa not used for now
|
|
||||||
size_t offs_src1_cur = offs_src1 + i02*nb12;
|
|
||||||
size_t offs_dst_cur = offs_dst + i02*nb2;
|
|
||||||
|
|
||||||
MPSMatrix * mat_src0 = [[MPSMatrix alloc] initWithBuffer:id_src0 offset:offs_src0_cur descriptor:desc0];
|
|
||||||
MPSMatrix * mat_src1 = [[MPSMatrix alloc] initWithBuffer:id_src1 offset:offs_src1_cur descriptor:desc1];
|
|
||||||
MPSMatrix * mat_dst = [[MPSMatrix alloc] initWithBuffer:id_dst offset:offs_dst_cur descriptor:desc ];
|
|
||||||
|
|
||||||
[mul encodeToCommandBuffer:command_buffer leftMatrix:mat_src1 rightMatrix:mat_src0 resultMatrix:mat_dst];
|
|
||||||
}
|
}
|
||||||
} else {
|
else {
|
||||||
if (encoder == nil) {
|
|
||||||
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
|
|
||||||
}
|
|
||||||
|
|
||||||
int nth0 = 32;
|
int nth0 = 32;
|
||||||
int nth1 = 1;
|
int nth1 = 1;
|
||||||
|
|
||||||
@ -900,23 +868,24 @@ void ggml_metal_graph_compute(
|
|||||||
[encoder setBytes:&nb12 length:sizeof(nb12) atIndex:14];
|
[encoder setBytes:&nb12 length:sizeof(nb12) atIndex:14];
|
||||||
[encoder setBytes:&ne0 length:sizeof(ne0) atIndex:15];
|
[encoder setBytes:&ne0 length:sizeof(ne0) atIndex:15];
|
||||||
[encoder setBytes:&ne1 length:sizeof(ne1) atIndex:16];
|
[encoder setBytes:&ne1 length:sizeof(ne1) atIndex:16];
|
||||||
|
[encoder setBytes:&gqa length:sizeof(gqa) atIndex:17];
|
||||||
|
|
||||||
if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 ||
|
if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 ||
|
||||||
src0t == GGML_TYPE_Q2_K || src0t == GGML_TYPE_Q4_K) {
|
src0t == GGML_TYPE_Q2_K || src0t == GGML_TYPE_Q4_K) {
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7) / 8, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7) / 8, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
||||||
}
|
}
|
||||||
else if (src0t == GGML_TYPE_Q3_K) {
|
else if (src0t == GGML_TYPE_Q3_K) {
|
||||||
#ifdef GGML_QKK_64
|
#ifdef GGML_QKK_64
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake((ne01+1)/2, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake((ne01+1)/2, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
||||||
#else
|
#else
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake((ne01+3)/4, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake((ne01+3)/4, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
else if (src0t == GGML_TYPE_Q5_K) {
|
else if (src0t == GGML_TYPE_Q5_K) {
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3) / 4, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3) / 4, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
||||||
}
|
}
|
||||||
else if (src0t == GGML_TYPE_Q6_K) {
|
else if (src0t == GGML_TYPE_Q6_K) {
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake((ne01+1)/2, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake((ne01+1)/2, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
||||||
} else {
|
} else {
|
||||||
[encoder setThreadgroupMemoryLength:nth0*sizeof(float) atIndex:0];
|
[encoder setThreadgroupMemoryLength:nth0*sizeof(float) atIndex:0];
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
||||||
@ -925,10 +894,6 @@ void ggml_metal_graph_compute(
|
|||||||
} break;
|
} break;
|
||||||
case GGML_OP_GET_ROWS:
|
case GGML_OP_GET_ROWS:
|
||||||
{
|
{
|
||||||
if (encoder == nil) {
|
|
||||||
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
|
|
||||||
}
|
|
||||||
|
|
||||||
switch (src0->type) {
|
switch (src0->type) {
|
||||||
case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_get_rows_f16]; break;
|
case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_get_rows_f16]; break;
|
||||||
case GGML_TYPE_Q4_0: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_0]; break;
|
case GGML_TYPE_Q4_0: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_0]; break;
|
||||||
@ -954,10 +919,6 @@ void ggml_metal_graph_compute(
|
|||||||
} break;
|
} break;
|
||||||
case GGML_OP_RMS_NORM:
|
case GGML_OP_RMS_NORM:
|
||||||
{
|
{
|
||||||
if (encoder == nil) {
|
|
||||||
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
|
|
||||||
}
|
|
||||||
|
|
||||||
float eps;
|
float eps;
|
||||||
memcpy(&eps, dst->op_params, sizeof(float));
|
memcpy(&eps, dst->op_params, sizeof(float));
|
||||||
|
|
||||||
@ -977,10 +938,6 @@ void ggml_metal_graph_compute(
|
|||||||
} break;
|
} break;
|
||||||
case GGML_OP_NORM:
|
case GGML_OP_NORM:
|
||||||
{
|
{
|
||||||
if (encoder == nil) {
|
|
||||||
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
|
|
||||||
}
|
|
||||||
|
|
||||||
const float eps = 1e-5f;
|
const float eps = 1e-5f;
|
||||||
|
|
||||||
const int nth = 256;
|
const int nth = 256;
|
||||||
@ -999,10 +956,6 @@ void ggml_metal_graph_compute(
|
|||||||
} break;
|
} break;
|
||||||
case GGML_OP_ALIBI:
|
case GGML_OP_ALIBI:
|
||||||
{
|
{
|
||||||
if (encoder == nil) {
|
|
||||||
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
|
|
||||||
}
|
|
||||||
|
|
||||||
GGML_ASSERT((src0t == GGML_TYPE_F32));
|
GGML_ASSERT((src0t == GGML_TYPE_F32));
|
||||||
|
|
||||||
const int n_past = ((int32_t *) dst->op_params)[0]; UNUSED(n_past);
|
const int n_past = ((int32_t *) dst->op_params)[0]; UNUSED(n_past);
|
||||||
@ -1042,10 +995,6 @@ void ggml_metal_graph_compute(
|
|||||||
} break;
|
} break;
|
||||||
case GGML_OP_ROPE:
|
case GGML_OP_ROPE:
|
||||||
{
|
{
|
||||||
if (encoder == nil) {
|
|
||||||
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
|
|
||||||
}
|
|
||||||
|
|
||||||
const int n_past = ((int32_t *) dst->op_params)[0];
|
const int n_past = ((int32_t *) dst->op_params)[0];
|
||||||
const int n_dims = ((int32_t *) dst->op_params)[1];
|
const int n_dims = ((int32_t *) dst->op_params)[1];
|
||||||
const int mode = ((int32_t *) dst->op_params)[2];
|
const int mode = ((int32_t *) dst->op_params)[2];
|
||||||
@ -1086,10 +1035,6 @@ void ggml_metal_graph_compute(
|
|||||||
case GGML_OP_CPY:
|
case GGML_OP_CPY:
|
||||||
case GGML_OP_CONT:
|
case GGML_OP_CONT:
|
||||||
{
|
{
|
||||||
if (encoder == nil) {
|
|
||||||
encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
|
|
||||||
}
|
|
||||||
|
|
||||||
const int nth = 32;
|
const int nth = 32;
|
||||||
|
|
||||||
switch (src0t) {
|
switch (src0t) {
|
||||||
|
969
ggml-metal.metal
969
ggml-metal.metal
File diff suppressed because it is too large
Load Diff
2
ggml.c
2
ggml.c
@ -19290,7 +19290,7 @@ void gguf_set_kv(struct gguf_context * ctx, struct gguf_context * src) {
|
|||||||
}
|
}
|
||||||
gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n);
|
gguf_set_arr_str(ctx, src->kv[i].key.data, data, src->kv[i].value.arr.n);
|
||||||
free(data);
|
free(data);
|
||||||
} if (src->kv[i].value.arr.type == GGUF_TYPE_ARRAY) {
|
} else if (src->kv[i].value.arr.type == GGUF_TYPE_ARRAY) {
|
||||||
GGML_ASSERT(false && "nested arrays not supported");
|
GGML_ASSERT(false && "nested arrays not supported");
|
||||||
} else {
|
} else {
|
||||||
gguf_set_arr_data(ctx, src->kv[i].key.data, src->kv[i].value.arr.type, src->kv[i].value.arr.data, src->kv[i].value.arr.n);
|
gguf_set_arr_data(ctx, src->kv[i].key.data, src->kv[i].value.arr.type, src->kv[i].value.arr.data, src->kv[i].value.arr.n);
|
||||||
|
6
gguf.py
6
gguf.py
@ -147,7 +147,11 @@ MODEL_TENSOR_SKIP = {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def should_skip_tensor(arch: MODEL_ARCH, n_blocks: int, name: str) -> bool:
|
# TODO: the following helper functions should be removed
|
||||||
|
# instead, get_tensor_name_map should return tuples of (name, MODEL_TENSOR)
|
||||||
|
# however, my Python is very bad, and I couldn't figure out how to do this, hence these functions
|
||||||
|
# REMOVE
|
||||||
|
def should_skip_tensor_TMP(arch: MODEL_ARCH, n_blocks: int, name: str) -> bool:
|
||||||
for skip in MODEL_TENSOR_SKIP.get(arch, []):
|
for skip in MODEL_TENSOR_SKIP.get(arch, []):
|
||||||
for i in range(n_blocks):
|
for i in range(n_blocks):
|
||||||
if name == MODEL_TENSOR_NAMES[arch][skip].format(bid=i):
|
if name == MODEL_TENSOR_NAMES[arch][skip].format(bid=i):
|
||||||
|
88
llama.cpp
88
llama.cpp
@ -11,7 +11,7 @@
|
|||||||
|
|
||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
|
|
||||||
#if !defined(GGML_USE_CUBLAS) && !defined(GGML_USE_METAL)
|
#if !defined(GGML_USE_CUBLAS)
|
||||||
# include "ggml-alloc.h"
|
# include "ggml-alloc.h"
|
||||||
# define LLAMA_USE_ALLOCATOR
|
# define LLAMA_USE_ALLOCATOR
|
||||||
#else
|
#else
|
||||||
@ -1042,6 +1042,9 @@ struct llama_model_loader {
|
|||||||
};
|
};
|
||||||
|
|
||||||
ctx_gguf = gguf_init_from_file(fname.c_str(), params);
|
ctx_gguf = gguf_init_from_file(fname.c_str(), params);
|
||||||
|
if (!ctx_gguf) {
|
||||||
|
throw std::runtime_error(format("%s: failed to load model from %s\n", __func__, fname.c_str()));
|
||||||
|
}
|
||||||
|
|
||||||
n_kv = gguf_get_n_kv(ctx_gguf);
|
n_kv = gguf_get_n_kv(ctx_gguf);
|
||||||
n_tensors = gguf_get_n_tensors(ctx_gguf);
|
n_tensors = gguf_get_n_tensors(ctx_gguf);
|
||||||
@ -1057,7 +1060,7 @@ struct llama_model_loader {
|
|||||||
// print meta data
|
// print meta data
|
||||||
// TODO: make optional
|
// TODO: make optional
|
||||||
{
|
{
|
||||||
LLAMA_LOG_INFO("%s: loaded meta data with %d key-value paris and %d tensors from %s (version %s)\n",
|
LLAMA_LOG_INFO("%s: loaded meta data with %d key-value pairs and %d tensors from %s (version %s)\n",
|
||||||
__func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(file_version));
|
__func__, n_kv, n_tensors, fname.c_str(), llama_file_version_name(file_version));
|
||||||
|
|
||||||
for (int i = 0; i < n_tensors; i++) {
|
for (int i = 0; i < n_tensors; i++) {
|
||||||
@ -1083,6 +1086,15 @@ struct llama_model_loader {
|
|||||||
this->use_mmap = use_mmap;
|
this->use_mmap = use_mmap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
~llama_model_loader() {
|
||||||
|
if (ctx_gguf) {
|
||||||
|
gguf_free(ctx_gguf);
|
||||||
|
}
|
||||||
|
if (ctx_meta) {
|
||||||
|
ggml_free(ctx_meta);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const char * get_tensor_name(int i) const {
|
const char * get_tensor_name(int i) const {
|
||||||
return gguf_get_tensor_name(ctx_gguf, i);
|
return gguf_get_tensor_name(ctx_gguf, i);
|
||||||
}
|
}
|
||||||
@ -1895,11 +1907,11 @@ static struct ggml_cgraph * llama_build_graph(
|
|||||||
ggml_set_name(Q, "Q");
|
ggml_set_name(Q, "Q");
|
||||||
|
|
||||||
struct ggml_tensor * K =
|
struct ggml_tensor * K =
|
||||||
ggml_permute(ctx0,
|
ggml_view_3d(ctx0, kv_self.k,
|
||||||
ggml_reshape_3d(ctx0,
|
n_embd_head, n_past + N, n_head_kv,
|
||||||
ggml_view_1d(ctx0, kv_self.k, (n_past + N)*n_embd_gqa, il*n_ctx*ggml_element_size(kv_self.k)*n_embd_gqa),
|
ggml_element_size(kv_self.k)*n_embd_gqa,
|
||||||
n_embd_head, n_head_kv, n_past + N),
|
ggml_element_size(kv_self.k)*n_embd_head,
|
||||||
0, 2, 1, 3);
|
ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il);
|
||||||
offload_func_kq(K);
|
offload_func_kq(K);
|
||||||
ggml_set_name(K, "K");
|
ggml_set_name(K, "K");
|
||||||
|
|
||||||
@ -1928,9 +1940,9 @@ static struct ggml_cgraph * llama_build_graph(
|
|||||||
struct ggml_tensor * V =
|
struct ggml_tensor * V =
|
||||||
ggml_view_3d(ctx0, kv_self.v,
|
ggml_view_3d(ctx0, kv_self.v,
|
||||||
n_past + N, n_embd_head, n_head_kv,
|
n_past + N, n_embd_head, n_head_kv,
|
||||||
n_ctx*ggml_element_size(kv_self.v),
|
ggml_element_size(kv_self.v)*n_ctx,
|
||||||
n_ctx*ggml_element_size(kv_self.v)*n_embd_head,
|
ggml_element_size(kv_self.v)*n_ctx*n_embd_head,
|
||||||
n_ctx*ggml_element_size(kv_self.v)*n_embd_gqa*il);
|
ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il);
|
||||||
offload_func_v(V);
|
offload_func_v(V);
|
||||||
ggml_set_name(V, "V");
|
ggml_set_name(V, "V");
|
||||||
|
|
||||||
@ -2131,11 +2143,7 @@ static bool llama_eval_internal(
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef GGML_USE_METAL
|
#ifdef GGML_USE_METAL
|
||||||
if (lctx.ctx_metal && N == 1) {
|
if (lctx.ctx_metal) {
|
||||||
// TODO: disabled until #2413 is resolved
|
|
||||||
//if (!ggml_metal_if_optimized(lctx.ctx_metal)) {
|
|
||||||
// ggml_metal_graph_find_concurrency(lctx.ctx_metal, gf);
|
|
||||||
//}
|
|
||||||
ggml_metal_set_n_cb (lctx.ctx_metal, n_threads);
|
ggml_metal_set_n_cb (lctx.ctx_metal, n_threads);
|
||||||
ggml_metal_graph_compute(lctx.ctx_metal, gf);
|
ggml_metal_graph_compute(lctx.ctx_metal, gf);
|
||||||
ggml_metal_get_tensor (lctx.ctx_metal, res);
|
ggml_metal_get_tensor (lctx.ctx_metal, res);
|
||||||
@ -2143,22 +2151,6 @@ static bool llama_eval_internal(
|
|||||||
ggml_metal_get_tensor(lctx.ctx_metal, embeddings);
|
ggml_metal_get_tensor(lctx.ctx_metal, embeddings);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// IMPORTANT:
|
|
||||||
// Since we don't have efficient Matrix x Matrix Metal multiplication yet, we fallback to vanilla
|
|
||||||
// ggml_graph_compute(). It uses Apple's Accelerate CBLAS API which takes advantage of the ANE or the AMX
|
|
||||||
// coprocessor.
|
|
||||||
//
|
|
||||||
// When we implement Matrix x Matrix Metal multiplication, we can avoid this branch.
|
|
||||||
// But for now, we have focused only on Matrix x Vector Metal multiplication.
|
|
||||||
//
|
|
||||||
// TODO: avoid these syncs via shared memory (ref #1696)
|
|
||||||
//
|
|
||||||
if (lctx.ctx_metal) {
|
|
||||||
// We need to sync the GPU KV cache with the CPU KV cache
|
|
||||||
ggml_metal_get_tensor(lctx.ctx_metal, kv_self.k);
|
|
||||||
ggml_metal_get_tensor(lctx.ctx_metal, kv_self.v);
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_graph_compute_helper(lctx.work_buffer, gf, n_threads);
|
ggml_graph_compute_helper(lctx.work_buffer, gf, n_threads);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
@ -3440,6 +3432,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
|
|
||||||
const std::string name = ggml_get_name(meta);
|
const std::string name = ggml_get_name(meta);
|
||||||
|
|
||||||
|
// TODO: avoid hardcoded tensor names - use the TN_* constants
|
||||||
if (name.find("attn_v.weight") != std::string::npos) {
|
if (name.find("attn_v.weight") != std::string::npos) {
|
||||||
++n_attention_wv;
|
++n_attention_wv;
|
||||||
}
|
}
|
||||||
@ -3518,6 +3511,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
} else {
|
} else {
|
||||||
new_type = quantized_type;
|
new_type = quantized_type;
|
||||||
#ifdef GGML_USE_K_QUANTS
|
#ifdef GGML_USE_K_QUANTS
|
||||||
|
// TODO: avoid hardcoded tensor names - use the TN_* constants
|
||||||
if (name == TN_OUTPUT) {
|
if (name == TN_OUTPUT) {
|
||||||
int nx = tensor->ne[0];
|
int nx = tensor->ne[0];
|
||||||
int ny = tensor->ne[1];
|
int ny = tensor->ne[1];
|
||||||
@ -3532,7 +3526,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
else if (QK_K == 64 && (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S) &&
|
else if (QK_K == 64 && (ftype == LLAMA_FTYPE_MOSTLY_Q4_K_S || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_S) &&
|
||||||
(i_attention_wv < n_attention_wv/8 || i_attention_wv >= 7*n_attention_wv/8)) new_type = GGML_TYPE_Q6_K;
|
(i_attention_wv < n_attention_wv/8 || i_attention_wv >= 7*n_attention_wv/8)) new_type = GGML_TYPE_Q6_K;
|
||||||
++i_attention_wv;
|
++i_attention_wv;
|
||||||
} else if (name.find("feed_forward.w2.weight") != std::string::npos) {
|
} else if (name.find("ffn_down.weight") != std::string::npos) {
|
||||||
if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
|
if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q4_K;
|
||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
|
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_L) new_type = GGML_TYPE_Q5_K;
|
||||||
else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
|
else if ((ftype == LLAMA_FTYPE_MOSTLY_Q4_K_M || ftype == LLAMA_FTYPE_MOSTLY_Q5_K_M) &&
|
||||||
@ -3587,7 +3581,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
new_data = work.data();
|
new_data = work.data();
|
||||||
std::vector<int64_t> hist_cur(1 << 4, 0);
|
std::vector<int64_t> hist_cur(1 << 4, 0);
|
||||||
|
|
||||||
const int chunk_size = 32 * 512;
|
static const int chunk_size = 32 * 512;
|
||||||
const int nchunk = (nelements + chunk_size - 1)/chunk_size;
|
const int nchunk = (nelements + chunk_size - 1)/chunk_size;
|
||||||
const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1;
|
const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1;
|
||||||
if (nthread_use < 2) {
|
if (nthread_use < 2) {
|
||||||
@ -4141,7 +4135,18 @@ struct llama_context * llama_new_context_with_model(
|
|||||||
int n_past = hparams.n_ctx - n_tokens;
|
int n_past = hparams.n_ctx - n_tokens;
|
||||||
llama_token token = llama_token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
|
llama_token token = llama_token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
|
||||||
ggml_cgraph * gf = llama_build_graph(*ctx, &token, NULL, n_tokens, n_past);
|
ggml_cgraph * gf = llama_build_graph(*ctx, &token, NULL, n_tokens, n_past);
|
||||||
|
#ifdef GGML_USE_METAL
|
||||||
|
if (params.n_gpu_layers > 0) {
|
||||||
|
ctx->ctx_metal = ggml_metal_init(1);
|
||||||
|
if (!ctx->ctx_metal) {
|
||||||
|
LLAMA_LOG_ERROR("%s: ggml_metal_init() failed\n", __func__);
|
||||||
|
llama_free(ctx);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
ggml_metal_graph_find_concurrency(ctx->ctx_metal, gf, false);
|
||||||
|
ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
// measure memory requirements for the graph
|
// measure memory requirements for the graph
|
||||||
size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf) + tensor_alignment;
|
size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf) + tensor_alignment;
|
||||||
|
|
||||||
@ -4159,6 +4164,11 @@ struct llama_context * llama_new_context_with_model(
|
|||||||
|
|
||||||
ctx->buf_alloc.resize(alloc_size);
|
ctx->buf_alloc.resize(alloc_size);
|
||||||
ctx->alloc = ggml_allocr_new(ctx->buf_alloc.data, ctx->buf_alloc.size, tensor_alignment);
|
ctx->alloc = ggml_allocr_new(ctx->buf_alloc.data, ctx->buf_alloc.size, tensor_alignment);
|
||||||
|
#ifdef GGML_USE_METAL
|
||||||
|
if (ctx->ctx_metal) {
|
||||||
|
ggml_allocr_set_parse_seq(ctx->alloc, ggml_metal_get_concur_list(ctx->ctx_metal), ggml_metal_if_optimized(ctx->ctx_metal));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type) + ggml_graph_overhead());
|
ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type) + ggml_graph_overhead());
|
||||||
@ -4173,13 +4183,6 @@ struct llama_context * llama_new_context_with_model(
|
|||||||
#ifdef GGML_USE_METAL
|
#ifdef GGML_USE_METAL
|
||||||
if (params.n_gpu_layers > 0) {
|
if (params.n_gpu_layers > 0) {
|
||||||
// this allocates all Metal resources and memory buffers
|
// this allocates all Metal resources and memory buffers
|
||||||
ctx->ctx_metal = ggml_metal_init(1);
|
|
||||||
|
|
||||||
if (!ctx->ctx_metal) {
|
|
||||||
LLAMA_LOG_ERROR("%s: ggml_metal_init() failed\n", __func__);
|
|
||||||
llama_free(ctx);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
void * data_ptr = NULL;
|
void * data_ptr = NULL;
|
||||||
size_t data_size = 0;
|
size_t data_size = 0;
|
||||||
@ -4208,8 +4211,7 @@ struct llama_context * llama_new_context_with_model(
|
|||||||
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "eval", ctx->buf_compute.data, ctx->buf_compute.size, 0));
|
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "eval", ctx->buf_compute.data, ctx->buf_compute.size, 0));
|
||||||
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.data, ctx->kv_self.buf.size, 0));
|
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.data, ctx->kv_self.buf.size, 0));
|
||||||
|
|
||||||
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr0", ctx->buf_scratch[0].data, ctx->buf_scratch[0].size, 0));
|
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "alloc", ctx->buf_alloc.data, ctx->buf_alloc.size, 0));
|
||||||
LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr1", ctx->buf_scratch[1].data, ctx->buf_scratch[1].size, 0));
|
|
||||||
#undef LLAMA_METAL_CHECK_BUF
|
#undef LLAMA_METAL_CHECK_BUF
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
1
models/.editorconfig
Normal file
1
models/.editorconfig
Normal file
@ -0,0 +1 @@
|
|||||||
|
root = true
|
3
scripts/get-wikitext-2.sh
Normal file
3
scripts/get-wikitext-2.sh
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip
|
@ -31,5 +31,6 @@ llama_build_executable(test-tokenizer-1.cpp)
|
|||||||
llama_test_executable(test-tokenizer-1.llama test-tokenizer-1.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
|
llama_test_executable(test-tokenizer-1.llama test-tokenizer-1.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
|
||||||
#llama_test_executable(test-tokenizer-1.aquila test-tokenizer-1.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
|
#llama_test_executable(test-tokenizer-1.aquila test-tokenizer-1.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
|
||||||
llama_build_and_test_executable(test-grammar-parser.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../examples/grammar-parser.cpp)
|
llama_build_and_test_executable(test-grammar-parser.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../examples/grammar-parser.cpp)
|
||||||
|
llama_build_and_test_executable(test-llama-grammar.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../examples/grammar-parser.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../examples/common.cpp)
|
||||||
llama_build_and_test_executable(test-grad0.cpp) # SLOW
|
llama_build_and_test_executable(test-grad0.cpp) # SLOW
|
||||||
# llama_build_and_test_executable(test-opt.cpp) # SLOW
|
# llama_build_and_test_executable(test-opt.cpp) # SLOW
|
||||||
|
403
tests/test-llama-grammar.cpp
Normal file
403
tests/test-llama-grammar.cpp
Normal file
@ -0,0 +1,403 @@
|
|||||||
|
#ifdef NDEBUG
|
||||||
|
#undef NDEBUG
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include "llama.cpp"
|
||||||
|
#include "examples/common.cpp"
|
||||||
|
#include "examples/grammar-parser.cpp"
|
||||||
|
#include <cassert>
|
||||||
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
grammar_parser::parse_state parsed_grammar;
|
||||||
|
|
||||||
|
std::vector<std::pair<std::string, uint32_t>> expected = {
|
||||||
|
{"expr", 2},
|
||||||
|
{"expr_6", 6},
|
||||||
|
{"expr_7", 7},
|
||||||
|
{"ident", 8},
|
||||||
|
{"ident_10", 10},
|
||||||
|
{"num", 9},
|
||||||
|
{"num_11", 11},
|
||||||
|
{"root", 0},
|
||||||
|
{"root_1", 1},
|
||||||
|
{"root_5", 5},
|
||||||
|
{"term", 4},
|
||||||
|
{"ws", 3},
|
||||||
|
{"ws_12", 12},
|
||||||
|
};
|
||||||
|
|
||||||
|
std::vector<std::vector<llama_grammar_element>> expected_rules = {
|
||||||
|
{{LLAMA_GRETYPE_RULE_REF, 5}, {LLAMA_GRETYPE_END, 0}},
|
||||||
|
{
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 2},
|
||||||
|
{LLAMA_GRETYPE_CHAR, 61},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 3},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 4},
|
||||||
|
{LLAMA_GRETYPE_CHAR, 10},
|
||||||
|
{LLAMA_GRETYPE_END, 0},
|
||||||
|
},
|
||||||
|
{{LLAMA_GRETYPE_RULE_REF, 4}, {LLAMA_GRETYPE_RULE_REF, 7}, {LLAMA_GRETYPE_END, 0}},
|
||||||
|
{{LLAMA_GRETYPE_RULE_REF, 12}, {LLAMA_GRETYPE_END, 0}},
|
||||||
|
{
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 8},
|
||||||
|
{LLAMA_GRETYPE_ALT, 0},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 9},
|
||||||
|
{LLAMA_GRETYPE_ALT, 0},
|
||||||
|
{LLAMA_GRETYPE_CHAR, 40},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 3},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 2},
|
||||||
|
{LLAMA_GRETYPE_CHAR, 41},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 3},
|
||||||
|
{LLAMA_GRETYPE_END, 0},
|
||||||
|
},
|
||||||
|
{{LLAMA_GRETYPE_RULE_REF, 1}, {LLAMA_GRETYPE_RULE_REF, 5}, {LLAMA_GRETYPE_ALT, 0}, {LLAMA_GRETYPE_RULE_REF, 1}, {LLAMA_GRETYPE_END, 0}},
|
||||||
|
{
|
||||||
|
{LLAMA_GRETYPE_CHAR, 45},
|
||||||
|
{LLAMA_GRETYPE_CHAR_ALT, 43},
|
||||||
|
{LLAMA_GRETYPE_CHAR_ALT, 42},
|
||||||
|
{LLAMA_GRETYPE_CHAR_ALT, 47},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 4},
|
||||||
|
{LLAMA_GRETYPE_END, 0},
|
||||||
|
},
|
||||||
|
{{LLAMA_GRETYPE_RULE_REF, 6}, {LLAMA_GRETYPE_RULE_REF, 7}, {LLAMA_GRETYPE_ALT, 0}, {LLAMA_GRETYPE_END, 0}},
|
||||||
|
{
|
||||||
|
{LLAMA_GRETYPE_CHAR, 97},
|
||||||
|
{LLAMA_GRETYPE_CHAR_RNG_UPPER, 122},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 10},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 3},
|
||||||
|
{LLAMA_GRETYPE_END, 0},
|
||||||
|
},
|
||||||
|
{{LLAMA_GRETYPE_RULE_REF, 11}, {LLAMA_GRETYPE_RULE_REF, 3}, {LLAMA_GRETYPE_END, 0}},
|
||||||
|
{
|
||||||
|
{LLAMA_GRETYPE_CHAR, 97},
|
||||||
|
{LLAMA_GRETYPE_CHAR_RNG_UPPER, 122},
|
||||||
|
{LLAMA_GRETYPE_CHAR_ALT, 48},
|
||||||
|
{LLAMA_GRETYPE_CHAR_RNG_UPPER, 57},
|
||||||
|
{LLAMA_GRETYPE_CHAR_ALT, 95},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 10},
|
||||||
|
{LLAMA_GRETYPE_ALT, 0},
|
||||||
|
{LLAMA_GRETYPE_END, 0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{LLAMA_GRETYPE_CHAR, 48},
|
||||||
|
{LLAMA_GRETYPE_CHAR_RNG_UPPER, 57},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 11},
|
||||||
|
{LLAMA_GRETYPE_ALT, 0},
|
||||||
|
{LLAMA_GRETYPE_CHAR, 48},
|
||||||
|
{LLAMA_GRETYPE_CHAR_RNG_UPPER, 57},
|
||||||
|
{LLAMA_GRETYPE_END, 0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{LLAMA_GRETYPE_CHAR, 32},
|
||||||
|
{LLAMA_GRETYPE_CHAR_ALT, 9},
|
||||||
|
{LLAMA_GRETYPE_CHAR_ALT, 10},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 12},
|
||||||
|
{LLAMA_GRETYPE_ALT, 0},
|
||||||
|
{LLAMA_GRETYPE_END, 0},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
for (auto pair : expected)
|
||||||
|
{
|
||||||
|
parsed_grammar.symbol_ids[pair.first] = pair.second;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto rule : expected_rules)
|
||||||
|
{
|
||||||
|
parsed_grammar.rules.push_back({});
|
||||||
|
for (auto element : rule)
|
||||||
|
{
|
||||||
|
parsed_grammar.rules.back().push_back(element);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
llama_grammar *grammar = NULL;
|
||||||
|
std::vector<const llama_grammar_element *> grammar_rules(parsed_grammar.c_rules());
|
||||||
|
grammar = llama_grammar_init(
|
||||||
|
grammar_rules.data(), grammar_rules.size(), parsed_grammar.symbol_ids.at("root"));
|
||||||
|
|
||||||
|
std::vector<std::vector<llama_grammar_element>> expected_stacks = {
|
||||||
|
{
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 5},
|
||||||
|
{LLAMA_GRETYPE_CHAR, 61},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 7},
|
||||||
|
{LLAMA_GRETYPE_CHAR, 97},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 5},
|
||||||
|
{LLAMA_GRETYPE_CHAR, 61},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 7},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 3},
|
||||||
|
{LLAMA_GRETYPE_CHAR, 48},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 5},
|
||||||
|
{LLAMA_GRETYPE_CHAR, 61},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 7},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 3},
|
||||||
|
{LLAMA_GRETYPE_CHAR, 48},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 5},
|
||||||
|
{LLAMA_GRETYPE_CHAR, 61},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 7},
|
||||||
|
{LLAMA_GRETYPE_CHAR, 40},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{LLAMA_GRETYPE_CHAR, 61},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 7},
|
||||||
|
{LLAMA_GRETYPE_CHAR, 97},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{LLAMA_GRETYPE_CHAR, 61},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 7},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 3},
|
||||||
|
{LLAMA_GRETYPE_CHAR, 48},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{LLAMA_GRETYPE_CHAR, 61},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 7},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 3},
|
||||||
|
{LLAMA_GRETYPE_CHAR, 48},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{LLAMA_GRETYPE_CHAR, 61},
|
||||||
|
{LLAMA_GRETYPE_RULE_REF, 7},
|
||||||
|
{LLAMA_GRETYPE_CHAR, 40},
|
||||||
|
}};
|
||||||
|
|
||||||
|
auto index = 0;
|
||||||
|
for (auto stack : grammar->stacks)
|
||||||
|
{
|
||||||
|
// compare stack to expected_stack
|
||||||
|
for (uint32_t i = 0; i < stack.size(); i++)
|
||||||
|
{
|
||||||
|
auto element = stack[i];
|
||||||
|
auto expected_element = expected_stacks[index][i];
|
||||||
|
|
||||||
|
// pretty print error message before asserting
|
||||||
|
if (expected_element.type != element->type || expected_element.value != element->value)
|
||||||
|
{
|
||||||
|
fprintf(stderr, "index: %d\n", index);
|
||||||
|
fprintf(stderr, "expected_element: %d, %d\n", expected_element.type, expected_element.value);
|
||||||
|
fprintf(stderr, "actual_element: %d, %d\n", element->type, element->value);
|
||||||
|
fprintf(stderr, "expected_element != actual_element\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(expected_element.type == element->type && expected_element.value == element->value);
|
||||||
|
}
|
||||||
|
index++;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<std::vector<const llama_grammar_element *>> next_stacks;
|
||||||
|
std::vector<llama_grammar_candidate> next_candidates;
|
||||||
|
next_candidates.resize(24);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < 24; ++i)
|
||||||
|
{
|
||||||
|
uint32_t *cp = new uint32_t[2]; // dynamically allocate memory for code_point
|
||||||
|
cp[0] = 37 + i;
|
||||||
|
cp[1] = 0;
|
||||||
|
next_candidates[i] = {i, cp};
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<std::vector<std::pair<uint32_t, uint16_t>>> expected_reject = {
|
||||||
|
{
|
||||||
|
{0, 37},
|
||||||
|
{1, 38},
|
||||||
|
{2, 39},
|
||||||
|
{3, 40},
|
||||||
|
{4, 41},
|
||||||
|
{5, 42},
|
||||||
|
{6, 43},
|
||||||
|
{7, 44},
|
||||||
|
{8, 45},
|
||||||
|
{9, 46},
|
||||||
|
{10, 47},
|
||||||
|
{11, 48},
|
||||||
|
{12, 49},
|
||||||
|
{13, 50},
|
||||||
|
{14, 51},
|
||||||
|
{15, 52},
|
||||||
|
{16, 53},
|
||||||
|
{17, 54},
|
||||||
|
{18, 55},
|
||||||
|
{19, 56},
|
||||||
|
{20, 57},
|
||||||
|
{21, 58},
|
||||||
|
{22, 59},
|
||||||
|
{23, 60},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{0, 37},
|
||||||
|
{1, 38},
|
||||||
|
{2, 39},
|
||||||
|
{3, 40},
|
||||||
|
{4, 41},
|
||||||
|
{5, 42},
|
||||||
|
{6, 43},
|
||||||
|
{7, 44},
|
||||||
|
{8, 45},
|
||||||
|
{9, 46},
|
||||||
|
{10, 47},
|
||||||
|
{21, 58},
|
||||||
|
{22, 59},
|
||||||
|
{23, 60},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{0, 37},
|
||||||
|
{1, 38},
|
||||||
|
{2, 39},
|
||||||
|
{3, 40},
|
||||||
|
{4, 41},
|
||||||
|
{5, 42},
|
||||||
|
{6, 43},
|
||||||
|
{7, 44},
|
||||||
|
{8, 45},
|
||||||
|
{9, 46},
|
||||||
|
{10, 47},
|
||||||
|
{21, 58},
|
||||||
|
{22, 59},
|
||||||
|
{23, 60},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{0, 37},
|
||||||
|
{1, 38},
|
||||||
|
{2, 39},
|
||||||
|
{4, 41},
|
||||||
|
{5, 42},
|
||||||
|
{6, 43},
|
||||||
|
{7, 44},
|
||||||
|
{8, 45},
|
||||||
|
{9, 46},
|
||||||
|
{10, 47},
|
||||||
|
{11, 48},
|
||||||
|
{12, 49},
|
||||||
|
{13, 50},
|
||||||
|
{14, 51},
|
||||||
|
{15, 52},
|
||||||
|
{16, 53},
|
||||||
|
{17, 54},
|
||||||
|
{18, 55},
|
||||||
|
{19, 56},
|
||||||
|
{20, 57},
|
||||||
|
{21, 58},
|
||||||
|
{22, 59},
|
||||||
|
{23, 60},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{0, 37},
|
||||||
|
{1, 38},
|
||||||
|
{2, 39},
|
||||||
|
{3, 40},
|
||||||
|
{4, 41},
|
||||||
|
{5, 42},
|
||||||
|
{6, 43},
|
||||||
|
{7, 44},
|
||||||
|
{8, 45},
|
||||||
|
{9, 46},
|
||||||
|
{10, 47},
|
||||||
|
{11, 48},
|
||||||
|
{12, 49},
|
||||||
|
{13, 50},
|
||||||
|
{14, 51},
|
||||||
|
{15, 52},
|
||||||
|
{16, 53},
|
||||||
|
{17, 54},
|
||||||
|
{18, 55},
|
||||||
|
{19, 56},
|
||||||
|
{20, 57},
|
||||||
|
{21, 58},
|
||||||
|
{22, 59},
|
||||||
|
{23, 60},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{0, 37},
|
||||||
|
{1, 38},
|
||||||
|
{2, 39},
|
||||||
|
{3, 40},
|
||||||
|
{4, 41},
|
||||||
|
{5, 42},
|
||||||
|
{6, 43},
|
||||||
|
{7, 44},
|
||||||
|
{8, 45},
|
||||||
|
{9, 46},
|
||||||
|
{10, 47},
|
||||||
|
{21, 58},
|
||||||
|
{22, 59},
|
||||||
|
{23, 60},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{0, 37},
|
||||||
|
{1, 38},
|
||||||
|
{2, 39},
|
||||||
|
{3, 40},
|
||||||
|
{4, 41},
|
||||||
|
{5, 42},
|
||||||
|
{6, 43},
|
||||||
|
{7, 44},
|
||||||
|
{8, 45},
|
||||||
|
{9, 46},
|
||||||
|
{10, 47},
|
||||||
|
{21, 58},
|
||||||
|
{22, 59},
|
||||||
|
{23, 60},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{0, 37},
|
||||||
|
{1, 38},
|
||||||
|
{2, 39},
|
||||||
|
{4, 41},
|
||||||
|
{5, 42},
|
||||||
|
{6, 43},
|
||||||
|
{7, 44},
|
||||||
|
{8, 45},
|
||||||
|
{9, 46},
|
||||||
|
{10, 47},
|
||||||
|
{11, 48},
|
||||||
|
{12, 49},
|
||||||
|
{13, 50},
|
||||||
|
{14, 51},
|
||||||
|
{15, 52},
|
||||||
|
{16, 53},
|
||||||
|
{17, 54},
|
||||||
|
{18, 55},
|
||||||
|
{19, 56},
|
||||||
|
{20, 57},
|
||||||
|
{21, 58},
|
||||||
|
{22, 59},
|
||||||
|
{23, 60},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
std::vector<llama_grammar_candidate> rejects = llama_grammar_reject_candidates_for_stack(grammar->rules, grammar->stacks[0], next_candidates);
|
||||||
|
|
||||||
|
std::vector<std::vector<llama_grammar_candidate>> all_rejects;
|
||||||
|
|
||||||
|
for (std::size_t count = 0; count < grammar->stacks.size(); ++count)
|
||||||
|
{
|
||||||
|
rejects = llama_grammar_reject_candidates_for_stack(grammar->rules, grammar->stacks[count], next_candidates);
|
||||||
|
all_rejects.push_back(rejects);
|
||||||
|
}
|
||||||
|
|
||||||
|
index = 0;
|
||||||
|
for (auto rej : all_rejects)
|
||||||
|
{
|
||||||
|
for (uint32_t i = 0; i < rej.size(); i++)
|
||||||
|
{
|
||||||
|
auto element = rej[i];
|
||||||
|
auto expected_element = expected_reject[index][i];
|
||||||
|
assert(element.index == expected_element.first && *element.code_points == expected_element.second);
|
||||||
|
}
|
||||||
|
index++;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (auto &candidate : next_candidates)
|
||||||
|
{
|
||||||
|
delete[] candidate.code_points;
|
||||||
|
candidate.code_points = nullptr;
|
||||||
|
}
|
||||||
|
delete grammar;
|
||||||
|
return 0;
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user