mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-28 04:14:35 +00:00
e0429d38e4
* convert-new.py : output gguf (WIP) * convert-new.py : add gguf key-value pairs * llama : add hparams.ctx_train + no longer print ftype * convert-new.py : minor fixes * convert-new.py : vocab-only option should work now * llama : fix tokenizer to use llama_char_to_byte * tests : add new ggml-vocab-llama.gguf * convert-new.py : tensor name mapping * convert-new.py : add map for skipping tensor serialization * convert-new.py : convert script now works * gguf.py : pick some of the refactoring from #2644 * convert-new.py : minor fixes
36 lines
1.8 KiB
CMake
36 lines
1.8 KiB
CMake
function(llama_build_executable source)
|
|
get_filename_component(TEST_TARGET ${source} NAME_WE)
|
|
add_executable(${TEST_TARGET} ${source})
|
|
install(TARGETS ${TEST_TARGET} RUNTIME)
|
|
target_link_libraries(${TEST_TARGET} PRIVATE llama)
|
|
endfunction()
|
|
|
|
function(llama_test_executable name source)
|
|
get_filename_component(TEST_TARGET ${source} NAME_WE)
|
|
# add_executable(${TEST_TARGET} ${source})
|
|
# install(TARGETS ${TEST_TARGET} RUNTIME)
|
|
# target_link_libraries(${TEST_TARGET} PRIVATE llama)
|
|
add_test(NAME ${name} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
|
|
endfunction()
|
|
|
|
function(llama_build_and_test_executable source)
|
|
get_filename_component(TEST_TARGET ${source} NAME_WE)
|
|
add_executable(${TEST_TARGET} ${source})
|
|
install(TARGETS ${TEST_TARGET} RUNTIME)
|
|
target_link_libraries(${TEST_TARGET} PRIVATE llama)
|
|
add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
|
|
endfunction()
|
|
|
|
# llama_build_and_test_executable(test-double-float.cpp) # SLOW
|
|
llama_build_and_test_executable(test-quantize-fns.cpp)
|
|
llama_build_and_test_executable(test-quantize-perf.cpp)
|
|
llama_build_and_test_executable(test-sampling.cpp)
|
|
llama_build_executable(test-tokenizer-0.cpp)
|
|
llama_test_executable(test-tokenizer-0.llama test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
|
|
llama_build_executable(test-tokenizer-1.cpp)
|
|
llama_test_executable(test-tokenizer-1.llama test-tokenizer-1.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
|
|
#llama_test_executable(test-tokenizer-1.aquila test-tokenizer-1.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
|
|
llama_build_and_test_executable(test-grammar-parser.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../examples/grammar-parser.cpp)
|
|
llama_build_and_test_executable(test-grad0.cpp) # SLOW
|
|
# llama_build_and_test_executable(test-opt.cpp) # SLOW
|