mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 20:04:35 +00:00
7cc2d2c889
* ggml : move AMX to the CPU backend --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
46 lines
1.6 KiB
CMake
46 lines
1.6 KiB
CMake
add_library(llava OBJECT
|
|
llava.cpp
|
|
llava.h
|
|
clip.cpp
|
|
clip.h
|
|
)
|
|
|
|
target_link_libraries(llava PRIVATE ggml llama ${CMAKE_THREAD_LIBS_INIT})
|
|
|
|
target_include_directories(llava PUBLIC .)
|
|
target_include_directories(llava PUBLIC ../..)
|
|
target_include_directories(llava PUBLIC ../../common)
|
|
|
|
target_compile_features(llava PRIVATE cxx_std_17)
|
|
|
|
add_library(llava_static STATIC $<TARGET_OBJECTS:llava>)
|
|
if (BUILD_SHARED_LIBS)
|
|
set_target_properties(llava PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
|
target_compile_definitions(llava PRIVATE LLAMA_SHARED LLAMA_BUILD)
|
|
add_library(llava_shared SHARED $<TARGET_OBJECTS:llava>)
|
|
target_link_libraries(llava_shared PRIVATE ggml llama ${CMAKE_THREAD_LIBS_INIT})
|
|
install(TARGETS llava_shared LIBRARY)
|
|
endif()
|
|
|
|
if (NOT MSVC)
|
|
target_compile_options(llava PRIVATE -Wno-cast-qual) # stb_image.h
|
|
endif()
|
|
|
|
if(TARGET BUILD_INFO)
|
|
add_dependencies(llava BUILD_INFO)
|
|
endif()
|
|
|
|
set(TARGET llama-llava-cli)
|
|
add_executable(${TARGET} llava-cli.cpp)
|
|
set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-llava-cli)
|
|
install(TARGETS ${TARGET} RUNTIME)
|
|
target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT})
|
|
target_compile_features(${TARGET} PRIVATE cxx_std_17)
|
|
|
|
set(TARGET llama-minicpmv-cli)
|
|
add_executable(${TARGET} minicpmv-cli.cpp)
|
|
set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-minicpmv-cli)
|
|
install(TARGETS ${TARGET} RUNTIME)
|
|
target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT})
|
|
target_compile_features(${TARGET} PRIVATE cxx_std_17)
|