mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 11:24:35 +00:00
3420909dff
* ggml : automatic selection of best CPU backend * amx : minor opt * add GGML_AVX_VNNI to enable avx-vnni, fix checks
13 lines
313 B
Bash
Executable File
13 lines
313 B
Bash
Executable File
#!/bin/bash
|
|
|
|
name="$1"
|
|
args="${@:2}"
|
|
|
|
echo "Building $name with args: $args"
|
|
|
|
rm -fr build-cpu-$1
|
|
cmake -S . -B build-cpu-$1 -DGGML_BACKEND_DL=ON -DGGML_NATIVE=OFF $args
|
|
cmake --build build-cpu-$1 --config Release -t ggml-cpu -j $(nproc)
|
|
cp build-cpu-$1/bin/libggml-cpu.so ./libggml-cpu-$1.so
|
|
rm -fr build-cpu-$1
|