mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-24 10:24:35 +00:00
7e4ea5beff
* Added httplib support * Added readme for server example * fixed some bugs * Fix the build error on Macbook * changed json11 to nlohmann-json * removed some whitespaces * remove trailing whitespace * added support custom prompts and more functions * some corrections and added as cmake option
44 lines
849 B
CMake
44 lines
849 B
CMake
# dependencies
|
|
|
|
find_package(Threads REQUIRED)
|
|
|
|
# third-party
|
|
|
|
# ...
|
|
|
|
# common
|
|
|
|
set(TARGET common)
|
|
|
|
add_library(${TARGET} OBJECT
|
|
common.h
|
|
common.cpp
|
|
)
|
|
|
|
if (BUILD_SHARED_LIBS)
|
|
set_target_properties(${TARGET} PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
|
endif()
|
|
|
|
target_include_directories(${TARGET} PUBLIC .)
|
|
target_compile_features(${TARGET} PUBLIC cxx_std_11)
|
|
target_link_libraries(${TARGET} PRIVATE llama)
|
|
|
|
# examples
|
|
|
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
|
|
|
if (EMSCRIPTEN)
|
|
else()
|
|
add_subdirectory(main)
|
|
add_subdirectory(quantize)
|
|
add_subdirectory(quantize-stats)
|
|
add_subdirectory(perplexity)
|
|
add_subdirectory(embedding)
|
|
add_subdirectory(save-load-state)
|
|
add_subdirectory(benchmark)
|
|
add_subdirectory(baby-llama)
|
|
if(LLAMA_BUILD_SERVER)
|
|
add_subdirectory(server)
|
|
endif()
|
|
endif()
|