mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-11 21:39:52 +00:00
0d56246f4b
* ggml : group all experts in a single ggml_mul_mat_id cuda : improve mmid row copy * cuda : fix bin bcast with non-cont src0 * test-backend-ops : only run all mul mat tests for base types * llama : disable moe offloading with SYCL --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
26 lines
676 B
Bash
Executable File
26 lines
676 B
Bash
Executable File
#!/bin/bash
|
|
|
|
if [ $# -lt 2 ]; then
|
|
echo "usage: ./scripts/compare-commits.sh <commit1> <commit2> [additional llama-bench arguments]"
|
|
exit 1
|
|
fi
|
|
|
|
set -e
|
|
set -x
|
|
|
|
bench_args="${@:3}"
|
|
|
|
rm -f llama-bench.sqlite
|
|
|
|
# to test a backend, call the script with the corresponding environment variable (e.g. LLAMA_CUDA=1 ./scripts/compare-commits.sh ...)
|
|
|
|
git checkout $1
|
|
make clean && make -j32 $make_opts llama-bench
|
|
./llama-bench -o sql $bench_args | tee /dev/tty | sqlite3 llama-bench.sqlite
|
|
|
|
git checkout $2
|
|
make clean && make -j32 $make_opts llama-bench
|
|
./llama-bench -o sql $bench_args | tee /dev/tty | sqlite3 llama-bench.sqlite
|
|
|
|
./scripts/compare-llama-bench.py -b $1 -c $2
|