mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
367946c668
The readme tells people to use the command line option "-t 8", causing 8 threads to be started. On systems with fewer than 8 cores, this causes a significant slowdown. Remove the option from the example command lines and use /proc/cpuinfo on Linux to determine a sensible default.
47 lines
1.8 KiB
Bash
Executable File
47 lines
1.8 KiB
Bash
Executable File
#!/bin/bash
|
|
set -e
|
|
|
|
# Read the first argument into a variable
|
|
arg1="$1"
|
|
|
|
# Shift the arguments to remove the first one
|
|
shift
|
|
|
|
# Join the remaining arguments into a single string
|
|
arg2="$@"
|
|
|
|
if [[ $arg1 == '--convert' || $arg1 == '-c' ]]; then
|
|
python3 ./convert-pth-to-ggml.py $arg2
|
|
elif [[ $arg1 == '--quantize' || $arg1 == '-q' ]]; then
|
|
./quantize $arg2
|
|
elif [[ $arg1 == '--run' || $arg1 == '-r' ]]; then
|
|
./main $arg2
|
|
elif [[ $arg1 == '--download' || $arg1 == '-d' ]]; then
|
|
python3 ./download-pth.py $arg2
|
|
elif [[ $arg1 == '--all-in-one' || $arg1 == '-a' ]]; then
|
|
echo "Downloading model..."
|
|
python3 ./download-pth.py "$1" "$2"
|
|
echo "Converting PTH to GGML..."
|
|
for i in `ls $1/$2/ggml-model-f16.bin*`; do
|
|
if [ -f "${i/f16/q4_0}" ]; then
|
|
echo "Skip model quantization, it already exists: ${i/f16/q4_0}"
|
|
else
|
|
echo "Converting PTH to GGML: $i into ${i/f16/q4_0}..."
|
|
./quantize "$i" "${i/f16/q4_0}" 2
|
|
fi
|
|
done
|
|
else
|
|
echo "Unknown command: $arg1"
|
|
echo "Available commands: "
|
|
echo " --run (-r): Run a model previously converted into ggml"
|
|
echo " ex: -m /models/7B/ggml-model-q4_0.bin -p \"Building a website can be done in 10 simple steps:\" -n 512"
|
|
echo " --convert (-c): Convert a llama model into ggml"
|
|
echo " ex: \"/models/7B/\" 1"
|
|
echo " --quantize (-q): Optimize with quantization process ggml"
|
|
echo " ex: \"/models/7B/ggml-model-f16.bin\" \"/models/7B/ggml-model-q4_0.bin\" 2"
|
|
echo " --download (-d): Download original llama model from CDN: https://agi.gpt4.org/llama/"
|
|
echo " ex: \"/models/\" 7B"
|
|
echo " --all-in-one (-a): Execute --download, --convert & --quantize"
|
|
echo " ex: \"/models/\" 7B"
|
|
fi
|