mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-30 13:24:35 +00:00
413e7b0559
* scripts : add lib.sh and lib_test.sh * scripts : stub out new ci-run.sh script * scripts : switch to PascalCase for functions This looks a little odd at first, but I find it very useful as a convention to know if a command is part of our code vs a builtin. * scripts : add some fancy conversion from snake_case to PascalCase * Add venv to ci/run.sh * Revert scripts work * scripts : add wrapper script for local use of ci/run.sh * Simplify .gitignore for tests, clang-tidy fixes * Label all ctest tests * ci : ctest uses -L main * Attempt at writing ctest_with_model * Update test-model-load-cancel * ci : add ctest_with_model for debug and release ggml-ci * Fix gg_get_model function ggml-ci * got stuck on CMake * Add get_model.cpp to tests/CMakeLists.txt ggml-ci * Fix README.md output for ctest_with_model ggml-ci * workflows : use `-L main` for all ctest ggml-ci * Fixes * GG_RUN_CTEST_MODELFILE => LLAMACPP_TESTMODELFILE * Always show warning rather than failing if model file variable is not set * scripts : update usage text for ci-run.sh
68 lines
3.9 KiB
CMake
68 lines
3.9 KiB
CMake
function(llama_build_executable source)
|
|
get_filename_component(TEST_TARGET ${source} NAME_WE)
|
|
add_executable(${TEST_TARGET} ${source} get-model.cpp)
|
|
install(TARGETS ${TEST_TARGET} RUNTIME)
|
|
target_link_libraries(${TEST_TARGET} PRIVATE common)
|
|
endfunction()
|
|
|
|
function(llama_test_executable name source)
|
|
get_filename_component(TEST_TARGET ${source} NAME_WE)
|
|
add_test(NAME ${name} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
|
|
set_property(TEST ${name} PROPERTY LABELS "main")
|
|
endfunction()
|
|
|
|
function(llama_build_and_test_executable source)
|
|
llama_build_and_test_executable_with_label(${source} "main")
|
|
endfunction()
|
|
|
|
function(llama_build_and_test_executable_with_label source label)
|
|
get_filename_component(TEST_TARGET ${source} NAME_WE)
|
|
add_executable(${TEST_TARGET} ${source} get-model.cpp)
|
|
install(TARGETS ${TEST_TARGET} RUNTIME)
|
|
target_link_libraries(${TEST_TARGET} PRIVATE common)
|
|
add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
|
|
set_property(TEST ${TEST_TARGET} PROPERTY LABELS ${label})
|
|
endfunction()
|
|
|
|
# llama_build_and_test_executable(test-double-float.cpp) # SLOW
|
|
llama_build_and_test_executable(test-quantize-fns.cpp)
|
|
llama_build_and_test_executable(test-quantize-perf.cpp)
|
|
llama_build_and_test_executable(test-sampling.cpp)
|
|
|
|
llama_build_executable(test-tokenizer-0-llama.cpp)
|
|
llama_test_executable (test-tokenizer-0-llama test-tokenizer-0-llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
|
|
|
|
llama_build_executable(test-tokenizer-0-falcon.cpp)
|
|
llama_test_executable (test-tokenizer-0-falcon test-tokenizer-0-falcon.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
|
|
|
|
llama_build_executable(test-tokenizer-1-llama.cpp)
|
|
llama_test_executable (test-tokenizer-1-llama test-tokenizer-1-llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
|
|
llama_test_executable (test-tokenizer-1-baichuan test-tokenizer-1-llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-baichuan.gguf)
|
|
|
|
llama_build_executable(test-tokenizer-1-bpe.cpp)
|
|
llama_test_executable (test-tokenizer-1-falcon test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
|
|
llama_test_executable (test-tokenizer-1-aquila test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
|
|
llama_test_executable (test-tokenizer-1-mpt test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf)
|
|
llama_test_executable (test-tokenizer-1-stablelm-3b-4e1t test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-stablelm-3b-4e1t.gguf)
|
|
llama_test_executable (test-tokenizer-1-gpt-neox test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-neox.gguf)
|
|
llama_test_executable (test-tokenizer-1-refact test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf)
|
|
llama_test_executable (test-tokenizer-1-starcoder test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
|
|
llama_test_executable (test-tokenizer-1-gpt2 test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt2.gguf)
|
|
# llama_test_executable (test-tokenizer-1-bloom test-tokenizer-1-bpe.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-bloom.gguf) # BIG
|
|
|
|
llama_build_and_test_executable(test-grammar-parser.cpp)
|
|
llama_build_and_test_executable(test-llama-grammar.cpp)
|
|
llama_build_and_test_executable(test-grad0.cpp)
|
|
# llama_build_and_test_executable(test-opt.cpp) # SLOW
|
|
llama_build_and_test_executable(test-backend-ops.cpp)
|
|
|
|
llama_build_and_test_executable(test-rope.cpp)
|
|
|
|
llama_build_and_test_executable_with_label(test-model-load-cancel.cpp "model")
|
|
llama_build_and_test_executable_with_label(test-autorelease.cpp "model")
|
|
|
|
# dummy executable - not installed
|
|
get_filename_component(TEST_TARGET test-c.c NAME_WE)
|
|
add_executable(${TEST_TARGET} test-c.c)
|
|
target_link_libraries(${TEST_TARGET} PRIVATE llama)
|