mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 03:14:35 +00:00
0bf2d10c55
* server : add "tokens" output ggml-ci * server : output embeddings for all tokens when pooling = none ggml-ci * server : be explicit about the pooling type in the tests ggml-ci * server : do not normalize embeddings when there is no pooling ggml-ci * llama : add OuteTTS support (wip) * wip * extract features * first conv * group norm * resnet conv * resnet * attn * pos net * layer norm * convnext * head * hann window * fix n_embd + remove llama.cpp hacks * compute hann window * fft * spectrum processing * clean-up * tts : receive input text and generate codes * clip : fix new conv name * tts : minor fix * tts : add header + minor fixes ggml-ci * tts : add matchematical constant ggml-ci * tts : fix sampling + cut initial noise * tts : fixes * tts : update default samplers ggml-ci * tts : text pre-processing * tts : outetts-voc -> wavtokenizer-dec * tts : remove hardcoded constants ggml-ci * tts : fix tensor shapes * llama : refactor wavtokenizer tensors ggml-ci * cont ggml-ci * cont [no ci] * llama : update WavTokenizer to non-causal attn * llama : handle no-vocab detokenization * tts : add Python example for OuteTTS (wip) * tts : extend python example to generate spectrogram ggml-ci * server : fix rebase artifacts * tts : enable "return_tokens" in Python example ggml-ci * tts : minor fixes * common : support HF download for vocoder
74 lines
1.9 KiB
CMake
74 lines
1.9 KiB
CMake
# dependencies
|
|
|
|
find_package(Threads REQUIRED)
|
|
|
|
# third-party
|
|
|
|
# ...
|
|
|
|
# flags
|
|
|
|
llama_add_compile_flags()
|
|
|
|
# examples
|
|
|
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
|
|
|
if (EMSCRIPTEN)
|
|
else()
|
|
add_subdirectory(batched-bench)
|
|
add_subdirectory(batched)
|
|
add_subdirectory(embedding)
|
|
add_subdirectory(eval-callback)
|
|
|
|
if (NOT WIN32)
|
|
# disabled on Windows because it uses internal functions not exported with LLAMA_API
|
|
add_subdirectory(gbnf-validator)
|
|
endif()
|
|
|
|
add_subdirectory(gguf-hash)
|
|
add_subdirectory(gguf-split)
|
|
add_subdirectory(gguf)
|
|
add_subdirectory(gritlm)
|
|
add_subdirectory(imatrix)
|
|
add_subdirectory(infill)
|
|
add_subdirectory(llama-bench)
|
|
add_subdirectory(lookahead)
|
|
add_subdirectory(lookup)
|
|
add_subdirectory(main)
|
|
add_subdirectory(parallel)
|
|
add_subdirectory(passkey)
|
|
add_subdirectory(perplexity)
|
|
add_subdirectory(quantize)
|
|
add_subdirectory(retrieval)
|
|
if (LLAMA_BUILD_SERVER)
|
|
add_subdirectory(server)
|
|
endif()
|
|
add_subdirectory(save-load-state)
|
|
add_subdirectory(run)
|
|
add_subdirectory(simple)
|
|
add_subdirectory(simple-chat)
|
|
add_subdirectory(speculative)
|
|
add_subdirectory(speculative-simple)
|
|
add_subdirectory(tokenize)
|
|
add_subdirectory(tts)
|
|
add_subdirectory(gen-docs)
|
|
if (NOT GGML_BACKEND_DL)
|
|
# these examples use the backends directly and cannot be built with dynamic loading
|
|
add_subdirectory(convert-llama2c-to-ggml)
|
|
add_subdirectory(cvector-generator)
|
|
add_subdirectory(export-lora)
|
|
if (NOT WIN32)
|
|
# disabled on Windows because it uses internal functions not exported with LLAMA_API
|
|
add_subdirectory(quantize-stats)
|
|
endif()
|
|
add_subdirectory(llava)
|
|
if (GGML_RPC)
|
|
add_subdirectory(rpc)
|
|
endif()
|
|
if (GGML_SYCL)
|
|
add_subdirectory(sycl)
|
|
endif()
|
|
endif()
|
|
endif()
|