mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-29 12:54:35 +00:00
6c5bc0625f
* server : (refactoring) reduce usage of json internally * move all response types to struct * wip [no ci] * many fixes * add virtual function * fix index * minor style fix * add std::move * refactor handle_completions_generic * add virtual functions * remove server.hpp * clarify server_sent_event RFC specs * apply review comments * fix model_alias and completion_probabilities * small clean up * remove virtual for to_json_oai_compat() * naming oai_compat --> oaicompat * fix unwanted recursive call * update docs
15 lines
222 B
Bash
Executable File
15 lines
222 B
Bash
Executable File
#!/bin/bash
|
|
|
|
# make sure we are in the right directory
|
|
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
|
|
cd $SCRIPT_DIR
|
|
|
|
set -eu
|
|
|
|
if [ $# -lt 1 ]
|
|
then
|
|
pytest -v -x
|
|
else
|
|
pytest "$@"
|
|
fi
|