mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 19:34:35 +00:00
ba1cb19cdd
* Barebone Qwen2VL LLM convertor * Add Qwen2VL cli entrypoint * [WIP] add qwen2vl arch * Verify m-rope output * Add vl-rope/2d-rope support for qwen2vl ViT * update qwen2vl cli tool * update 5D tensor op workaround * [WIP] qwen2vl vision model * make batch and clip utils compatible with qwen2vl * [WIP] create inference workflow, gguf convert script but fix * correcting vision-rope behavior, add the missing last layer back to ViT * add arg parser to qwen2vl_surgery * replace variable size array with vector * cuda-gdb cmake preset * add fp32 mrope, vision rope kernel * add fp16 support for qwen2vl and m-rope * add `GGML_ROPE_TYPE_MROPE`, `GGML_ROPE_TYPE_VISION` * fix rope op mode switching, out dated func args * update `llama_hparams` * update to keep up stream changes * resolve linter, test errors * add makefile entry, update speical image padding token * add mrope unit test, fix few compiler warnings * rename `mrope` related function, params * minor updates on debug util, bug fixs * add `m-rope` testcase to `test-backend-ops` * Apply suggestions from code review Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * fix traililng whitespce * store `llama_hparams.rope_sections` with fixed size array * update position id tensor size check in GGML_OP_ROPE * minor updates * update `ggml_backend_*_supports_op` of unsupported backends * remote old `rope_section` compare operator --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
53 lines
1.9 KiB
CMake
53 lines
1.9 KiB
CMake
add_library(llava OBJECT
|
|
llava.cpp
|
|
llava.h
|
|
clip.cpp
|
|
clip.h
|
|
)
|
|
|
|
target_link_libraries(llava PRIVATE ggml llama ${CMAKE_THREAD_LIBS_INIT})
|
|
|
|
target_include_directories(llava PUBLIC .)
|
|
target_include_directories(llava PUBLIC ../..)
|
|
target_include_directories(llava PUBLIC ../../common)
|
|
|
|
target_compile_features(llava PRIVATE cxx_std_17)
|
|
|
|
add_library(llava_static STATIC $<TARGET_OBJECTS:llava>)
|
|
if (BUILD_SHARED_LIBS)
|
|
set_target_properties(llava PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
|
target_compile_definitions(llava PRIVATE LLAMA_SHARED LLAMA_BUILD)
|
|
add_library(llava_shared SHARED $<TARGET_OBJECTS:llava>)
|
|
target_link_libraries(llava_shared PRIVATE ggml llama ${CMAKE_THREAD_LIBS_INIT})
|
|
install(TARGETS llava_shared LIBRARY)
|
|
endif()
|
|
|
|
if (NOT MSVC)
|
|
target_compile_options(llava PRIVATE -Wno-cast-qual) # stb_image.h
|
|
endif()
|
|
|
|
if(TARGET BUILD_INFO)
|
|
add_dependencies(llava BUILD_INFO)
|
|
endif()
|
|
|
|
set(TARGET llama-llava-cli)
|
|
add_executable(${TARGET} llava-cli.cpp)
|
|
set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-llava-cli)
|
|
install(TARGETS ${TARGET} RUNTIME)
|
|
target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT})
|
|
target_compile_features(${TARGET} PRIVATE cxx_std_17)
|
|
|
|
set(TARGET llama-minicpmv-cli)
|
|
add_executable(${TARGET} minicpmv-cli.cpp)
|
|
set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-minicpmv-cli)
|
|
install(TARGETS ${TARGET} RUNTIME)
|
|
target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT})
|
|
target_compile_features(${TARGET} PRIVATE cxx_std_17)
|
|
|
|
set(TARGET llama-qwen2vl-cli)
|
|
add_executable(${TARGET} qwen2vl-cli.cpp)
|
|
set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-qwen2vl-cli)
|
|
install(TARGETS ${TARGET} RUNTIME)
|
|
target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT})
|
|
target_compile_features(${TARGET} PRIVATE cxx_std_17)
|