mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-14 14:59:52 +00:00
tests : minor bash stuff (#6902)
* tests : minor bash stuff ggml-ci * llama : fix build ggml-ci * tests : fix CUR_DIR -> ROOT_DIR ggml-ci * tests : fix fname ggml-ci
This commit is contained in:
parent
1966eb2615
commit
aa750c1ede
@ -161,6 +161,7 @@ function gg_run_test_scripts_debug {
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
(cd ./examples/gguf-split && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
(cd ./examples/gguf-split && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
||||||
|
(cd ./examples/quantize && time bash tests.sh "$SRC/build-ci-debug/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
}
|
}
|
||||||
@ -184,6 +185,7 @@ function gg_run_test_scripts_release {
|
|||||||
set -e
|
set -e
|
||||||
|
|
||||||
(cd ./examples/gguf-split && time bash tests.sh "$SRC/build-ci-release/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
(cd ./examples/gguf-split && time bash tests.sh "$SRC/build-ci-release/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
||||||
|
(cd ./examples/quantize && time bash tests.sh "$SRC/build-ci-release/bin" "$MNT/models") 2>&1 | tee -a $OUT/${ci}-scripts.log
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
}
|
}
|
||||||
|
6
examples/gguf-split/tests.sh
Normal file → Executable file
6
examples/gguf-split/tests.sh
Normal file → Executable file
@ -21,7 +21,7 @@ set -x
|
|||||||
SPLIT=$1/gguf-split
|
SPLIT=$1/gguf-split
|
||||||
MAIN=$1/main
|
MAIN=$1/main
|
||||||
WORK_PATH=$TMP_DIR/gguf-split
|
WORK_PATH=$TMP_DIR/gguf-split
|
||||||
CUR_DIR=$(pwd)
|
ROOT_DIR=$(realpath $(dirname $0)/../../)
|
||||||
|
|
||||||
mkdir -p "$WORK_PATH"
|
mkdir -p "$WORK_PATH"
|
||||||
|
|
||||||
@ -30,8 +30,8 @@ rm -f $WORK_PATH/ggml-model-split*.gguf $WORK_PATH/ggml-model-merge*.gguf
|
|||||||
|
|
||||||
# 1. Get a model
|
# 1. Get a model
|
||||||
(
|
(
|
||||||
cd $WORK_PATH
|
cd $WORK_PATH
|
||||||
"$CUR_DIR"/../../scripts/hf.sh --repo ggml-org/gemma-1.1-2b-it-Q8_0-GGUF --file gemma-1.1-2b-it.Q8_0.gguf
|
"$ROOT_DIR"/scripts/hf.sh --repo ggml-org/gemma-1.1-2b-it-Q8_0-GGUF --file gemma-1.1-2b-it.Q8_0.gguf
|
||||||
)
|
)
|
||||||
echo PASS
|
echo PASS
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ SPLIT=$1/gguf-split
|
|||||||
QUANTIZE=$1/quantize
|
QUANTIZE=$1/quantize
|
||||||
MAIN=$1/main
|
MAIN=$1/main
|
||||||
WORK_PATH=$TMP_DIR/quantize
|
WORK_PATH=$TMP_DIR/quantize
|
||||||
CUR_DIR=$(pwd)
|
ROOT_DIR=$(realpath $(dirname $0)/../../)
|
||||||
|
|
||||||
mkdir -p "$WORK_PATH"
|
mkdir -p "$WORK_PATH"
|
||||||
|
|
||||||
@ -31,8 +31,8 @@ rm -f $WORK_PATH/ggml-model-split*.gguf $WORK_PATH/ggml-model-requant*.gguf
|
|||||||
|
|
||||||
# 1. Get a model
|
# 1. Get a model
|
||||||
(
|
(
|
||||||
cd $WORK_PATH
|
cd $WORK_PATH
|
||||||
"$CUR_DIR"/../../scripts/hf.sh --repo ggml-org/gemma-1.1-2b-it-Q8_0-GGUF --file gemma-1.1-2b-it.Q8_0.gguf
|
"$ROOT_DIR"/scripts/hf.sh --repo ggml-org/gemma-1.1-2b-it-Q8_0-GGUF --file gemma-1.1-2b-it.Q8_0.gguf
|
||||||
)
|
)
|
||||||
echo PASS
|
echo PASS
|
||||||
|
|
@ -9,4 +9,3 @@ then
|
|||||||
else
|
else
|
||||||
behave "$@"
|
behave "$@"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -14574,7 +14574,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
fout.close();
|
fout.close();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
auto new_ofstream = [&](int index = 0) {
|
auto new_ofstream = [&](int index) {
|
||||||
cur_split = index;
|
cur_split = index;
|
||||||
GGML_ASSERT(ctx_outs[cur_split] && "Find uninitialized gguf_context");
|
GGML_ASSERT(ctx_outs[cur_split] && "Find uninitialized gguf_context");
|
||||||
std::string fname = fname_out;
|
std::string fname = fname_out;
|
||||||
@ -14592,7 +14592,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
};
|
};
|
||||||
|
|
||||||
const auto tn = LLM_TN(model.arch);
|
const auto tn = LLM_TN(model.arch);
|
||||||
new_ofstream();
|
new_ofstream(0);
|
||||||
for (int i = 0; i < ml.n_tensors; ++i) {
|
for (int i = 0; i < ml.n_tensors; ++i) {
|
||||||
auto weight = ml.get_weight(i);
|
auto weight = ml.get_weight(i);
|
||||||
struct ggml_tensor * tensor = weight->tensor;
|
struct ggml_tensor * tensor = weight->tensor;
|
||||||
|
Loading…
Reference in New Issue
Block a user