mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-11 21:39:52 +00:00
6381d4e110
* gguf : first API pass
* gguf : read header + meta data
* gguf : read tensor info
* gguf : initial model loading - not tested
* gguf : add gguf_get_tensor_name()
* gguf : do not support passing existing ggml_context to gguf_init
* gguf : simplify gguf_get_val
* gguf : gguf.c is now part of ggml.c
* gguf : read / write sample models
* gguf : add comments
* refactor : reduce code duplication and better API (#2415)
* gguf : expose the gguf_type enum through the API for now
* gguf : add array support
* gguf.py : some code style changes
* convert.py : start a new simplified implementation by removing old stuff
* convert.py : remove GGML vocab + other obsolete stuff
* GGUF : write tensor (#2426)
* WIP: Write tensor
* GGUF : Support writing tensors in Python
* refactor : rm unused import and upd todos
* fix : fix errors upd writing example
* rm example.gguf
* gitignore *.gguf
* undo formatting
* gguf : add gguf_find_key (#2438)
* gguf.cpp : find key example
* ggml.h : add gguf_find_key
* ggml.c : add gguf_find_key
* gguf : fix writing tensors
* gguf : do not hardcode tensor names to read
* gguf : write sample tensors to read
* gguf : add tokenization constants
* quick and dirty conversion example
* gguf : fix writing gguf arrays
* gguf : write tensors one by one and code reuse
* gguf : fix writing gguf arrays
* gguf : write tensors one by one
* gguf : write tensors one by one
* gguf : write tokenizer data
* gguf : upd gguf conversion script
* Update convert-llama-h5-to-gguf.py
* gguf : handle already encoded string
* ggml.h : get array str and f32
* ggml.c : get arr str and f32
* gguf.py : support any type
* Update convert-llama-h5-to-gguf.py
* gguf : fix set is not subscriptable
* gguf : update convert-llama-h5-to-gguf.py
* constants.py : add layer norm eps
* gguf.py : add layer norm eps and merges
* ggml.h : increase GGML_MAX_NAME to 64
* ggml.c : add gguf_get_arr_n
* Update convert-llama-h5-to-gguf.py
* add gptneox gguf example
* Makefile : add gptneox gguf example
* Update convert-llama-h5-to-gguf.py
* add gptneox gguf example
* Update convert-llama-h5-to-gguf.py
* Update convert-gptneox-h5-to-gguf.py
* Update convert-gptneox-h5-to-gguf.py
* Update convert-llama-h5-to-gguf.py
* gguf : support custom alignment value
* gguf : fix typo in function call
* gguf : mmap tensor data example
* fix : update convert-llama-h5-to-gguf.py
* Update convert-llama-h5-to-gguf.py
* convert-gptneox-h5-to-gguf.py : Special tokens
* gptneox-main.cpp : special tokens
* Update gptneox-main.cpp
* constants.py : special tokens
* gguf.py : accumulate kv and tensor info data + special tokens
* convert-gptneox-h5-to-gguf.py : accumulate kv and ti + special tokens
* gguf : gguf counterpart of llama-util.h
* gguf-util.h : update note
* convert-llama-h5-to-gguf.py : accumulate kv / ti + special tokens
* convert-llama-h5-to-gguf.py : special tokens
* Delete gptneox-common.cpp
* Delete gptneox-common.h
* convert-gptneox-h5-to-gguf.py : gpt2bpe tokenizer
* gptneox-main.cpp : gpt2 bpe tokenizer
* gpt2 bpe tokenizer (handles merges and unicode)
* Makefile : remove gptneox-common
* gguf.py : bytesarray for gpt2bpe tokenizer
* cmpnct_gpt2bpe.hpp : comments
* gguf.py : use custom alignment if present
* gguf : minor stuff
* Update gptneox-main.cpp
* map tensor names
* convert-gptneox-h5-to-gguf.py : map tensor names
* convert-llama-h5-to-gguf.py : map tensor names
* gptneox-main.cpp : map tensor names
* gguf : start implementing libllama in GGUF (WIP)
* gguf : start implementing libllama in GGUF (WIP)
* rm binary commited by mistake
* upd .gitignore
* gguf : calculate n_mult
* gguf : inference with 7B model working (WIP)
* gguf : rm deprecated function
* gguf : start implementing gguf_file_saver (WIP)
* gguf : start implementing gguf_file_saver (WIP)
* gguf : start implementing gguf_file_saver (WIP)
* gguf : add gguf_get_kv_type
* gguf : add gguf_get_kv_type
* gguf : write metadata in gguf_file_saver (WIP)
* gguf : write metadata in gguf_file_saver (WIP)
* gguf : write metadata in gguf_file_saver
* gguf : rm references to old file formats
* gguf : shorter name for member variable
* gguf : rm redundant method
* gguf : get rid of n_mult, read n_ff from file
* Update gguf_tensor_map.py
* Update gptneox-main.cpp
* gguf : rm references to old file magics
* gguf : start implementing quantization (WIP)
* gguf : start implementing quantization (WIP)
* gguf : start implementing quantization (WIP)
* gguf : start implementing quantization (WIP)
* gguf : start implementing quantization (WIP)
* gguf : start implementing quantization (WIP)
* gguf : quantization is working
* gguf : roper closing of file
* gguf.py : no need to convert tensors twice
* convert-gptneox-h5-to-gguf.py : no need to convert tensors twice
* convert-llama-h5-to-gguf.py : no need to convert tensors twice
* convert-gptneox-h5-to-gguf.py : simplify nbytes
* convert-llama-h5-to-gguf.py : simplify nbytes
* gptneox-main.cpp : n_layer --> n_block
* constants.py : n_layer --> n_block
* gguf.py : n_layer --> n_block
* convert-gptneox-h5-to-gguf.py : n_layer --> n_block
* convert-llama-h5-to-gguf.py : n_layer --> n_block
* gptneox-main.cpp : n_layer --> n_block
* Update gguf_tensor_map.py
* convert-gptneox-h5-to-gguf.py : load model in parts to save memory
* convert-llama-h5-to-gguf.py : load model in parts to save memory
* convert : write more metadata for LLaMA
* convert : rm quantization version
* convert-gptneox-h5-to-gguf.py : add file_type key
* gptneox-main.cpp : add file_type key
* fix conflicts
* gguf : add todos and comments
* convert-gptneox-h5-to-gguf.py : tensor name map changes
* Create gguf_namemap.py : tensor name map changes
* Delete gguf_tensor_map.py
* gptneox-main.cpp : tensor name map changes
* convert-llama-h5-to-gguf.py : fixes
* gguf.py : dont add empty strings
* simple : minor style changes
* gguf : use UNIX line ending
* Create convert-llama-7b-pth-to-gguf.py
* llama : sync gguf-llama.cpp with latest llama.cpp (#2608)
* llama : sync gguf-llama.cpp with latest llama.cpp
* minor : indentation + assert
* llama : refactor gguf_buffer and gguf_ctx_buffer
* llama : minor
* gitignore : add gptneox-main
* llama : tokenizer fixes (#2549)
* Merge tokenizer fixes into the gguf branch.
* Add test vocabularies
* convert : update convert-new.py with tokenizer fixes (#2614)
* Merge tokenizer fixes into the gguf branch.
* Add test vocabularies
* Adapt convert-new.py (and fix a clang-cl compiler error on windows)
* llama : sync gguf-llama with llama (#2613)
* llama : sync gguf-llama with llama
* tests : fix build + warnings (test-tokenizer-1 still fails)
* tests : fix wstring_convert
* convert : fix layer names
* llama : sync gguf-llama.cpp
* convert : update HF converter to new tokenizer voodoo magics
* llama : update tokenizer style
* convert-llama-h5-to-gguf.py : add token types
* constants.py : add token types
* gguf.py : add token types
* convert-llama-7b-pth-to-gguf.py : add token types
* gguf-llama.cpp : fix n_head_kv
* convert-llama-h5-to-gguf.py : add 70b gqa support
* gguf.py : add tensor data layout
* convert-llama-h5-to-gguf.py : add tensor data layout
* convert-llama-7b-pth-to-gguf.py : add tensor data layout
* gptneox-main.cpp : add tensor data layout
* convert-llama-h5-to-gguf.py : clarify the reverse permute
* llama : refactor model loading code (#2620)
* llama : style formatting + remove helper methods
* llama : fix quantization using gguf tool
* llama : simplify gguf_file_saver
* llama : fix method names
* llama : simplify write_header()
* llama : no need to pass full file loader to the file saver
just gguf_ctx
* llama : gguf_file_saver write I32
* llama : refactor tensor names (#2622)
* gguf: update tensor names searched in quantization
* gguf : define tensor names as constants
* gguf : initial write API (not tested yet)
* gguf : write to file API (not tested)
* gguf : initial write API ready + example
* gguf : fix header write
* gguf : fixes + simplify example + add ggml_nbytes_pad()
* gguf : minor
* llama : replace gguf_file_saver with new gguf write API
* gguf : streaming support when writing files
* gguf : remove oboslete write methods
* gguf : remove obosolete gguf_get_arr_xxx API
* llama : simplify gguf_file_loader
* llama : move hparams and vocab from gguf_file_loader to llama_model_loader
* llama : merge gguf-util.h in llama.cpp
* llama : reorder definitions in .cpp to match .h
* llama : minor simplifications
* llama : refactor llama_model_loader (WIP)
wip : remove ggml_ctx from llama_model_loader
wip : merge gguf_file_loader in llama_model_loader
* llama : fix shape prints
* llama : fix Windows build + fix norm_rms_eps key
* llama : throw error on missing KV paris in model meta data
* llama : improve printing + log meta data
* llama : switch print order of meta data
---------
Co-authored-by: M. Yusuf Sarıgöz <yusufsarigoz@gmail.com>
* gguf : deduplicate (#2629)
* gguf : better type names
* dedup : CPU + Metal is working
* ggml : fix warnings about unused results
* llama.cpp : fix line feed and compiler warning
* llama : fix strncpy warning + note token_to_str does not write null
* llama : restore the original load/save session implementation
Will migrate this to GGUF in the future
* convert-llama-h5-to-gguf.py : support alt ctx param name
* ggml : assert when using ggml_mul with non-F32 src1
* examples : dedup simple
---------
Co-authored-by: klosax <131523366+klosax@users.noreply.github.com>
* gguf.py : merge all files in gguf.py
* convert-new.py : pick #2427 for HF 70B support
* examples/gguf : no need to keep q option for quantization any more
* llama.cpp : print actual model size
* llama.cpp : use ggml_elements()
* convert-new.py : output gguf (#2635)
* convert-new.py : output gguf (WIP)
* convert-new.py : add gguf key-value pairs
* llama : add hparams.ctx_train + no longer print ftype
* convert-new.py : minor fixes
* convert-new.py : vocab-only option should work now
* llama : fix tokenizer to use llama_char_to_byte
* tests : add new ggml-vocab-llama.gguf
* convert-new.py : tensor name mapping
* convert-new.py : add map for skipping tensor serialization
* convert-new.py : convert script now works
* gguf.py : pick some of the refactoring from #2644
* convert-new.py : minor fixes
* convert.py : update to support GGUF output
* Revert "ci : disable CI temporary to not waste energy"
This reverts commit 7e82d25f40
.
* convert.py : n_head_kv optional and .gguf file extension
* convert.py : better always have n_head_kv and default it to n_head
* llama : sync with recent PRs on master
* editorconfig : ignore models folder
ggml-ci
* ci : update ".bin" to ".gguf" extension
ggml-ci
* llama : fix llama_model_loader memory leak
* gptneox : move as a WIP example
* llama : fix lambda capture
ggml-ci
* ggml : fix bug in gguf_set_kv
ggml-ci
* common.h : .bin --> .gguf
* quantize-stats.cpp : .bin --> .gguf
* convert.py : fix HF tensor permuting / unpacking
ggml-ci
* llama.cpp : typo
* llama : throw error if gguf fails to init from file
ggml-ci
* llama : fix tensor name grepping during quantization
ggml-ci
* gguf.py : write tensors in a single pass (#2644)
* gguf : single pass for writing tensors + refactoring writer
* gguf : single pass for writing tensors + refactoring writer
* gguf : single pass for writing tensors + refactoring writer
* gguf : style fixes in simple conversion script
* gguf : refactor gptneox conversion script
* gguf : rename h5 to hf (for HuggingFace)
* gguf : refactor pth to gguf conversion script
* gguf : rm file_type key and method
* gguf.py : fix vertical alignment
* gguf.py : indentation
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* convert-gptneox-hf-to-gguf.py : fixes
* gguf.py : gptneox mapping
* convert-llama-hf-to-gguf.py : fixes
* convert-llama-7b-pth-to-gguf.py : fixes
* ggml.h : reverse GGUF_MAGIC
* gguf.py : reverse GGUF_MAGIC
* test-tokenizer-0.cpp : fix warning
* llama.cpp : print kv general.name
* llama.cpp : get special token kv and linefeed token id
* llama : print number of tensors per type + print arch + style
* tests : update vocab file with new magic
* editorconfig : fix whitespaces
* llama : re-order functions
* llama : remove C++ API + reorganize common source in /common dir
* llama : minor API updates
* llama : avoid hardcoded special tokens
* llama : fix MPI build
ggml-ci
* llama : introduce enum llama_vocab_type + remove hardcoded string constants
* convert-falcon-hf-to-gguf.py : falcon HF --> gguf conversion, not tested
* falcon-main.cpp : falcon inference example
* convert-falcon-hf-to-gguf.py : remove extra kv
* convert-gptneox-hf-to-gguf.py : remove extra kv
* convert-llama-7b-pth-to-gguf.py : remove extra kv
* convert-llama-hf-to-gguf.py : remove extra kv
* gguf.py : fix for falcon 40b
* falcon-main.cpp : fix for falcon 40b
* convert-falcon-hf-to-gguf.py : update ref
* convert-falcon-hf-to-gguf.py : add tensor data layout
* cmpnct_gpt2bpe.hpp : fixes
* falcon-main.cpp : fixes
* gptneox-main.cpp : fixes
* cmpnct_gpt2bpe.hpp : remove non-general stuff
* Update examples/server/README.md
Co-authored-by: slaren <slarengh@gmail.com>
* cmpnct_gpt2bpe.hpp : cleanup
* convert-llama-hf-to-gguf.py : special tokens
* convert-llama-7b-pth-to-gguf.py : special tokens
* convert-permute-debug.py : permute debug print
* convert-permute-debug-master.py : permute debug for master
* convert-permute-debug.py : change permute type of attn_q
* convert.py : 70b model working (change attn_q permute)
* Delete convert-permute-debug-master.py
* Delete convert-permute-debug.py
* convert-llama-hf-to-gguf.py : fix attn_q permute
* gguf.py : fix rope scale kv
* convert-llama-hf-to-gguf.py : rope scale and added tokens
* convert-llama-7b-pth-to-gguf.py : rope scale and added tokens
* llama.cpp : use rope scale kv
* convert-llama-7b-pth-to-gguf.py : rope scale fix
* convert-llama-hf-to-gguf.py : rope scale fix
* py : fix whitespace
* gguf : add Python script to convert GGMLv3 LLaMA models to GGUF (#2682)
* First pass at converting GGMLv3 LLaMA models to GGUF
* Cleanups, better output during conversion
* Fix vocab space conversion logic
* More vocab conversion fixes
* Add description to converted GGUF files
* Improve help text, expand warning
* Allow specifying name and description for output GGUF
* Allow overriding vocab and hyperparams from original model metadata
* Use correct params override var name
* Fix wrong type size for Q8_K
Better handling of original style metadata
* Set default value for gguf add_tensor raw_shape KW arg
* llama : improve token type support (#2668)
* Merge tokenizer fixes into the gguf branch.
* Add test vocabularies
* Adapt convert-new.py (and fix a clang-cl compiler error on windows)
* Improved tokenizer test
But does it work on MacOS?
* Improve token type support
- Added @klosax code to convert.py
- Improved token type support in vocabulary
* Exclude platform dependent tests
* More sentencepiece compatibility by eliminating magic numbers
* Restored accidentally removed comment
* llama : add API for token type
ggml-ci
* tests : use new tokenizer type API (#2692)
* Merge tokenizer fixes into the gguf branch.
* Add test vocabularies
* Adapt convert-new.py (and fix a clang-cl compiler error on windows)
* Improved tokenizer test
But does it work on MacOS?
* Improve token type support
- Added @klosax code to convert.py
- Improved token type support in vocabulary
* Exclude platform dependent tests
* More sentencepiece compatibility by eliminating magic numbers
* Restored accidentally removed comment
* Improve commentary
* Use token type API in test-tokenizer-1.cpp
* py : cosmetics
* readme : add notice about new file format
ggml-ci
---------
Co-authored-by: M. Yusuf Sarıgöz <yusufsarigoz@gmail.com>
Co-authored-by: klosax <131523366+klosax@users.noreply.github.com>
Co-authored-by: goerch <jhr.walter@t-online.de>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com>
605 lines
21 KiB
CMake
605 lines
21 KiB
CMake
cmake_minimum_required(VERSION 3.12) # Don't bump this version for no reason
|
|
project("llama.cpp" C CXX)
|
|
|
|
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
|
|
|
if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE)
|
|
set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE)
|
|
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo")
|
|
endif()
|
|
|
|
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
|
|
|
if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
|
|
set(LLAMA_STANDALONE ON)
|
|
|
|
# configure project version
|
|
# TODO
|
|
else()
|
|
set(LLAMA_STANDALONE OFF)
|
|
endif()
|
|
|
|
if (EMSCRIPTEN)
|
|
set(BUILD_SHARED_LIBS_DEFAULT OFF)
|
|
|
|
option(LLAMA_WASM_SINGLE_FILE "llama: embed WASM inside the generated llama.js" ON)
|
|
else()
|
|
if (MINGW)
|
|
set(BUILD_SHARED_LIBS_DEFAULT OFF)
|
|
else()
|
|
set(BUILD_SHARED_LIBS_DEFAULT ON)
|
|
endif()
|
|
endif()
|
|
|
|
|
|
#
|
|
# Option list
|
|
#
|
|
|
|
# general
|
|
option(LLAMA_STATIC "llama: static link libraries" OFF)
|
|
option(LLAMA_NATIVE "llama: enable -march=native flag" OFF)
|
|
option(LLAMA_LTO "llama: enable link time optimization" OFF)
|
|
|
|
# debug
|
|
option(LLAMA_ALL_WARNINGS "llama: enable all compiler warnings" ON)
|
|
option(LLAMA_ALL_WARNINGS_3RD_PARTY "llama: enable all compiler warnings in 3rd party libs" OFF)
|
|
option(LLAMA_GPROF "llama: enable gprof" OFF)
|
|
|
|
# sanitizers
|
|
option(LLAMA_SANITIZE_THREAD "llama: enable thread sanitizer" OFF)
|
|
option(LLAMA_SANITIZE_ADDRESS "llama: enable address sanitizer" OFF)
|
|
option(LLAMA_SANITIZE_UNDEFINED "llama: enable undefined sanitizer" OFF)
|
|
|
|
# instruction set specific
|
|
option(LLAMA_AVX "llama: enable AVX" ON)
|
|
option(LLAMA_AVX2 "llama: enable AVX2" ON)
|
|
option(LLAMA_AVX512 "llama: enable AVX512" OFF)
|
|
option(LLAMA_AVX512_VBMI "llama: enable AVX512-VBMI" OFF)
|
|
option(LLAMA_AVX512_VNNI "llama: enable AVX512-VNNI" OFF)
|
|
option(LLAMA_FMA "llama: enable FMA" ON)
|
|
# in MSVC F16C is implied with AVX2/AVX512
|
|
if (NOT MSVC)
|
|
option(LLAMA_F16C "llama: enable F16C" ON)
|
|
endif()
|
|
|
|
# 3rd party libs
|
|
option(LLAMA_ACCELERATE "llama: enable Accelerate framework" ON)
|
|
option(LLAMA_BLAS "llama: use BLAS" OFF)
|
|
set(LLAMA_BLAS_VENDOR "Generic" CACHE STRING "llama: BLAS library vendor")
|
|
option(LLAMA_CUBLAS "llama: use CUDA" OFF)
|
|
#option(LLAMA_CUDA_CUBLAS "llama: use cuBLAS for prompt processing" OFF)
|
|
option(LLAMA_CUDA_FORCE_DMMV "llama: use dmmv instead of mmvq CUDA kernels" OFF)
|
|
set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels")
|
|
set(LLAMA_CUDA_MMV_Y "1" CACHE STRING "llama: y block size for mmv CUDA kernels")
|
|
option(LLAMA_CUDA_F16 "llama: use 16 bit floats for some calculations" OFF)
|
|
set(LLAMA_CUDA_KQUANTS_ITER "2" CACHE STRING "llama: iters./thread per block for Q2_K/Q6_K")
|
|
option(LLAMA_CLBLAST "llama: use CLBlast" OFF)
|
|
option(LLAMA_METAL "llama: use Metal" OFF)
|
|
option(LLAMA_MPI "llama: use MPI" OFF)
|
|
option(LLAMA_K_QUANTS "llama: use k-quants" ON)
|
|
option(LLAMA_QKK_64 "llama: use super-block size of 64 for k-quants" OFF)
|
|
|
|
option(LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE})
|
|
option(LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE})
|
|
option(LLAMA_BUILD_SERVER "llama: build server example" ON)
|
|
|
|
#
|
|
# Build info header
|
|
#
|
|
|
|
# Generate initial build-info.h
|
|
include(${CMAKE_CURRENT_SOURCE_DIR}/scripts/build-info.cmake)
|
|
|
|
if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git")
|
|
set(GIT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/.git")
|
|
|
|
# Is git submodule
|
|
if(NOT IS_DIRECTORY "${GIT_DIR}")
|
|
file(READ ${GIT_DIR} REAL_GIT_DIR_LINK)
|
|
string(REGEX REPLACE "gitdir: (.*)\n$" "\\1" REAL_GIT_DIR ${REAL_GIT_DIR_LINK})
|
|
set(GIT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/${REAL_GIT_DIR}")
|
|
endif()
|
|
|
|
# Add a custom target for build-info.h
|
|
add_custom_target(BUILD_INFO ALL DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/build-info.h")
|
|
|
|
# Add a custom command to rebuild build-info.h when .git/index changes
|
|
add_custom_command(
|
|
OUTPUT "${CMAKE_CURRENT_SOURCE_DIR}/build-info.h"
|
|
COMMENT "Generating build details from Git"
|
|
COMMAND ${CMAKE_COMMAND} -P "${CMAKE_CURRENT_SOURCE_DIR}/scripts/build-info.cmake"
|
|
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
|
DEPENDS "${GIT_DIR}/index"
|
|
VERBATIM
|
|
)
|
|
else()
|
|
message(WARNING "Git repository not found; to enable automatic generation of build info, make sure Git is installed and the project is a Git repository.")
|
|
endif()
|
|
|
|
#
|
|
# Compile flags
|
|
#
|
|
|
|
set(CMAKE_CXX_STANDARD 11)
|
|
set(CMAKE_CXX_STANDARD_REQUIRED true)
|
|
set(CMAKE_C_STANDARD 11)
|
|
set(CMAKE_C_STANDARD_REQUIRED true)
|
|
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
|
find_package(Threads REQUIRED)
|
|
|
|
if (NOT MSVC)
|
|
if (LLAMA_SANITIZE_THREAD)
|
|
add_compile_options(-fsanitize=thread)
|
|
link_libraries(-fsanitize=thread)
|
|
endif()
|
|
|
|
if (LLAMA_SANITIZE_ADDRESS)
|
|
add_compile_options(-fsanitize=address -fno-omit-frame-pointer)
|
|
link_libraries(-fsanitize=address)
|
|
endif()
|
|
|
|
if (LLAMA_SANITIZE_UNDEFINED)
|
|
add_compile_options(-fsanitize=undefined)
|
|
link_libraries(-fsanitize=undefined)
|
|
endif()
|
|
endif()
|
|
|
|
if (APPLE AND LLAMA_ACCELERATE)
|
|
find_library(ACCELERATE_FRAMEWORK Accelerate)
|
|
if (ACCELERATE_FRAMEWORK)
|
|
message(STATUS "Accelerate framework found")
|
|
|
|
add_compile_definitions(GGML_USE_ACCELERATE)
|
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ${ACCELERATE_FRAMEWORK})
|
|
else()
|
|
message(WARNING "Accelerate framework not found")
|
|
endif()
|
|
endif()
|
|
|
|
if (LLAMA_BLAS)
|
|
if (LLAMA_STATIC)
|
|
set(BLA_STATIC ON)
|
|
endif()
|
|
if ($(CMAKE_VERSION) VERSION_GREATER_EQUAL 3.22)
|
|
set(BLA_SIZEOF_INTEGER 8)
|
|
endif()
|
|
|
|
set(BLA_VENDOR ${LLAMA_BLAS_VENDOR})
|
|
find_package(BLAS)
|
|
|
|
if (BLAS_FOUND)
|
|
message(STATUS "BLAS found, Libraries: ${BLAS_LIBRARIES}")
|
|
|
|
if ("${BLAS_INCLUDE_DIRS}" STREQUAL "")
|
|
# BLAS_INCLUDE_DIRS is missing in FindBLAS.cmake.
|
|
# see https://gitlab.kitware.com/cmake/cmake/-/issues/20268
|
|
find_package(PkgConfig REQUIRED)
|
|
if (${LLAMA_BLAS_VENDOR} MATCHES "Generic")
|
|
pkg_check_modules(DepBLAS REQUIRED blas)
|
|
elseif (${LLAMA_BLAS_VENDOR} MATCHES "OpenBLAS")
|
|
pkg_check_modules(DepBLAS REQUIRED openblas)
|
|
elseif (${LLAMA_BLAS_VENDOR} MATCHES "FLAME")
|
|
pkg_check_modules(DepBLAS REQUIRED blis)
|
|
elseif (${LLAMA_BLAS_VENDOR} MATCHES "ATLAS")
|
|
pkg_check_modules(DepBLAS REQUIRED blas-atlas)
|
|
elseif (${LLAMA_BLAS_VENDOR} MATCHES "FlexiBLAS")
|
|
pkg_check_modules(DepBLAS REQUIRED flexiblas_api)
|
|
elseif (${LLAMA_BLAS_VENDOR} MATCHES "Intel")
|
|
# all Intel* libraries share the same include path
|
|
pkg_check_modules(DepBLAS REQUIRED mkl-sdl)
|
|
elseif (${LLAMA_BLAS_VENDOR} MATCHES "NVHPC")
|
|
# this doesn't provide pkg-config
|
|
# suggest to assign BLAS_INCLUDE_DIRS on your own
|
|
if ("${NVHPC_VERSION}" STREQUAL "")
|
|
message(WARNING "Better to set NVHPC_VERSION")
|
|
else()
|
|
set(DepBLAS_FOUND ON)
|
|
set(DepBLAS_INCLUDE_DIRS "/opt/nvidia/hpc_sdk/${CMAKE_SYSTEM_NAME}_${CMAKE_SYSTEM_PROCESSOR}/${NVHPC_VERSION}/math_libs/include")
|
|
endif()
|
|
endif()
|
|
if (DepBLAS_FOUND)
|
|
set(BLAS_INCLUDE_DIRS ${DepBLAS_INCLUDE_DIRS})
|
|
else()
|
|
message(WARNING "BLAS_INCLUDE_DIRS neither been provided nor been automatically"
|
|
" detected by pkgconfig, trying to find cblas.h from possible paths...")
|
|
find_path(BLAS_INCLUDE_DIRS
|
|
NAMES cblas.h
|
|
HINTS
|
|
/usr/include
|
|
/usr/local/include
|
|
/usr/include/openblas
|
|
/opt/homebrew/opt/openblas/include
|
|
/usr/local/opt/openblas/include
|
|
/usr/include/x86_64-linux-gnu/openblas/include
|
|
)
|
|
endif()
|
|
endif()
|
|
|
|
message(STATUS "BLAS found, Includes: ${BLAS_INCLUDE_DIRS}")
|
|
add_compile_options(${BLAS_LINKER_FLAGS})
|
|
add_compile_definitions(GGML_USE_OPENBLAS)
|
|
if (${BLAS_INCLUDE_DIRS} MATCHES "mkl" AND (${LLAMA_BLAS_VENDOR} MATCHES "Generic" OR ${LLAMA_BLAS_VENDOR} MATCHES "Intel"))
|
|
add_compile_definitions(GGML_BLAS_USE_MKL)
|
|
endif()
|
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ${BLAS_LIBRARIES})
|
|
set(LLAMA_EXTRA_INCLUDES ${LLAMA_EXTRA_INCLUDES} ${BLAS_INCLUDE_DIRS})
|
|
|
|
else()
|
|
message(WARNING "BLAS not found, please refer to "
|
|
"https://cmake.org/cmake/help/latest/module/FindBLAS.html#blas-lapack-vendors"
|
|
" to set correct LLAMA_BLAS_VENDOR")
|
|
endif()
|
|
endif()
|
|
|
|
if (LLAMA_K_QUANTS)
|
|
set(GGML_SOURCES_EXTRA ${GGML_SOURCES_EXTRA} k_quants.c k_quants.h)
|
|
add_compile_definitions(GGML_USE_K_QUANTS)
|
|
if (LLAMA_QKK_64)
|
|
add_compile_definitions(GGML_QKK_64)
|
|
endif()
|
|
endif()
|
|
|
|
if (LLAMA_CUBLAS)
|
|
cmake_minimum_required(VERSION 3.17)
|
|
|
|
find_package(CUDAToolkit)
|
|
if (CUDAToolkit_FOUND)
|
|
message(STATUS "cuBLAS found")
|
|
|
|
enable_language(CUDA)
|
|
|
|
set(GGML_SOURCES_CUDA ggml-cuda.cu ggml-cuda.h)
|
|
|
|
add_compile_definitions(GGML_USE_CUBLAS)
|
|
# if (LLAMA_CUDA_CUBLAS)
|
|
# add_compile_definitions(GGML_CUDA_CUBLAS)
|
|
# endif()
|
|
if (LLAMA_CUDA_FORCE_DMMV)
|
|
add_compile_definitions(GGML_CUDA_FORCE_DMMV)
|
|
endif()
|
|
add_compile_definitions(GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X})
|
|
add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y})
|
|
if (DEFINED LLAMA_CUDA_DMMV_Y)
|
|
add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_DMMV_Y}) # for backwards compatibility
|
|
endif()
|
|
if (LLAMA_CUDA_F16 OR LLAMA_CUDA_DMMV_F16)
|
|
add_compile_definitions(GGML_CUDA_F16)
|
|
endif()
|
|
add_compile_definitions(K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER})
|
|
|
|
if (LLAMA_STATIC)
|
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static)
|
|
else()
|
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt)
|
|
endif()
|
|
|
|
if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
|
|
# 52 == lowest CUDA 12 standard
|
|
# 60 == f16 CUDA intrinsics
|
|
# 61 == integer CUDA intrinsics
|
|
# 70 == compute capability at which unrolling a loop in mul_mat_q kernels is faster
|
|
if (LLAMA_CUDA_F16 OR LLAMA_CUDA_DMMV_F16)
|
|
set(CMAKE_CUDA_ARCHITECTURES "60;61;70") # needed for f16 CUDA intrinsics
|
|
else()
|
|
set(CMAKE_CUDA_ARCHITECTURES "52;61;70") # lowest CUDA 12 standard + lowest for integer intrinsics
|
|
endif()
|
|
endif()
|
|
message(STATUS "Using CUDA architectures: ${CMAKE_CUDA_ARCHITECTURES}")
|
|
|
|
else()
|
|
message(WARNING "cuBLAS not found")
|
|
endif()
|
|
endif()
|
|
|
|
if (LLAMA_METAL)
|
|
find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
|
|
find_library(METAL_FRAMEWORK Metal REQUIRED)
|
|
find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
|
|
|
|
set(GGML_SOURCES_METAL ggml-metal.m ggml-metal.h)
|
|
|
|
add_compile_definitions(GGML_USE_METAL)
|
|
add_compile_definitions(GGML_METAL_NDEBUG)
|
|
|
|
# get full path to the file
|
|
#add_compile_definitions(GGML_METAL_DIR_KERNELS="${CMAKE_CURRENT_SOURCE_DIR}/")
|
|
|
|
# copy ggml-metal.metal to bin directory
|
|
configure_file(ggml-metal.metal bin/ggml-metal.metal COPYONLY)
|
|
|
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS}
|
|
${FOUNDATION_LIBRARY}
|
|
${METAL_FRAMEWORK}
|
|
${METALKIT_FRAMEWORK}
|
|
)
|
|
endif()
|
|
|
|
if (LLAMA_MPI)
|
|
cmake_minimum_required(VERSION 3.10)
|
|
find_package(MPI)
|
|
if (MPI_C_FOUND)
|
|
message(STATUS "MPI found")
|
|
set(GGML_SOURCES_MPI ggml-mpi.c ggml-mpi.h)
|
|
add_compile_definitions(GGML_USE_MPI)
|
|
add_compile_definitions(${MPI_C_COMPILE_DEFINITIONS})
|
|
set(cxx_flags ${cxx_flags} -Wno-cast-qual)
|
|
set(c_flags ${c_flags} -Wno-cast-qual)
|
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ${MPI_C_LIBRARIES})
|
|
set(LLAMA_EXTRA_INCLUDES ${LLAMA_EXTRA_INCLUDES} ${MPI_C_INCLUDE_DIRS})
|
|
# Even if you're only using the C header, C++ programs may bring in MPI
|
|
# C++ functions, so more linkage is needed
|
|
if (MPI_CXX_FOUND)
|
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ${MPI_CXX_LIBRARIES})
|
|
endif()
|
|
else()
|
|
message(WARNING "MPI not found")
|
|
endif()
|
|
endif()
|
|
|
|
if (LLAMA_CLBLAST)
|
|
find_package(CLBlast)
|
|
if (CLBlast_FOUND)
|
|
message(STATUS "CLBlast found")
|
|
|
|
set(GGML_SOURCES_OPENCL ggml-opencl.cpp ggml-opencl.h)
|
|
|
|
add_compile_definitions(GGML_USE_CLBLAST)
|
|
|
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} clblast)
|
|
else()
|
|
message(WARNING "CLBlast not found")
|
|
endif()
|
|
endif()
|
|
|
|
if (LLAMA_ALL_WARNINGS)
|
|
if (NOT MSVC)
|
|
set(c_flags
|
|
-Wall
|
|
-Wextra
|
|
-Wpedantic
|
|
-Wcast-qual
|
|
-Wdouble-promotion
|
|
-Wshadow
|
|
-Wstrict-prototypes
|
|
-Wpointer-arith
|
|
-Wmissing-prototypes
|
|
)
|
|
set(cxx_flags
|
|
-Wall
|
|
-Wextra
|
|
-Wpedantic
|
|
-Wcast-qual
|
|
-Wno-unused-function
|
|
-Wno-multichar
|
|
)
|
|
else()
|
|
# todo : msvc
|
|
endif()
|
|
|
|
add_compile_options(
|
|
"$<$<COMPILE_LANGUAGE:C>:${c_flags}>"
|
|
"$<$<COMPILE_LANGUAGE:CXX>:${cxx_flags}>"
|
|
)
|
|
|
|
endif()
|
|
|
|
if (MSVC)
|
|
add_compile_definitions(_CRT_SECURE_NO_WARNINGS)
|
|
|
|
if (BUILD_SHARED_LIBS)
|
|
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
|
|
endif()
|
|
endif()
|
|
|
|
if (LLAMA_LTO)
|
|
include(CheckIPOSupported)
|
|
check_ipo_supported(RESULT result OUTPUT output)
|
|
if (result)
|
|
set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE)
|
|
else()
|
|
message(WARNING "IPO is not supported: ${output}")
|
|
endif()
|
|
endif()
|
|
|
|
# Architecture specific
|
|
# TODO: probably these flags need to be tweaked on some architectures
|
|
# feel free to update the Makefile for your architecture and send a pull request or issue
|
|
message(STATUS "CMAKE_SYSTEM_PROCESSOR: ${CMAKE_SYSTEM_PROCESSOR}")
|
|
if (NOT MSVC)
|
|
if (LLAMA_STATIC)
|
|
add_link_options(-static)
|
|
if (MINGW)
|
|
add_link_options(-static-libgcc -static-libstdc++)
|
|
endif()
|
|
endif()
|
|
if (LLAMA_GPROF)
|
|
add_compile_options(-pg)
|
|
endif()
|
|
if (LLAMA_NATIVE)
|
|
add_compile_options(-march=native)
|
|
endif()
|
|
endif()
|
|
|
|
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64")
|
|
message(STATUS "ARM detected")
|
|
if (MSVC)
|
|
# TODO: arm msvc?
|
|
else()
|
|
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv6")
|
|
# Raspberry Pi 1, Zero
|
|
add_compile_options(-mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access)
|
|
endif()
|
|
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv7")
|
|
# Raspberry Pi 2
|
|
add_compile_options(-mfpu=neon-fp-armv8 -mfp16-format=ieee -mno-unaligned-access -funsafe-math-optimizations)
|
|
endif()
|
|
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "armv8")
|
|
# Raspberry Pi 3, 4, Zero 2 (32-bit)
|
|
add_compile_options(-mfp16-format=ieee -mno-unaligned-access)
|
|
endif()
|
|
endif()
|
|
elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "^(x86_64|i686|AMD64)$")
|
|
message(STATUS "x86 detected")
|
|
if (MSVC)
|
|
if (LLAMA_AVX512)
|
|
add_compile_options($<$<COMPILE_LANGUAGE:C>:/arch:AVX512>)
|
|
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/arch:AVX512>)
|
|
# MSVC has no compile-time flags enabling specific
|
|
# AVX512 extensions, neither it defines the
|
|
# macros corresponding to the extensions.
|
|
# Do it manually.
|
|
if (LLAMA_AVX512_VBMI)
|
|
add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VBMI__>)
|
|
add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VBMI__>)
|
|
endif()
|
|
if (LLAMA_AVX512_VNNI)
|
|
add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VNNI__>)
|
|
add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VNNI__>)
|
|
endif()
|
|
elseif (LLAMA_AVX2)
|
|
add_compile_options($<$<COMPILE_LANGUAGE:C>:/arch:AVX2>)
|
|
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/arch:AVX2>)
|
|
elseif (LLAMA_AVX)
|
|
add_compile_options($<$<COMPILE_LANGUAGE:C>:/arch:AVX>)
|
|
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/arch:AVX>)
|
|
endif()
|
|
else()
|
|
if (LLAMA_F16C)
|
|
add_compile_options(-mf16c)
|
|
endif()
|
|
if (LLAMA_FMA)
|
|
add_compile_options(-mfma)
|
|
endif()
|
|
if (LLAMA_AVX)
|
|
add_compile_options(-mavx)
|
|
endif()
|
|
if (LLAMA_AVX2)
|
|
add_compile_options(-mavx2)
|
|
endif()
|
|
if (LLAMA_AVX512)
|
|
add_compile_options(-mavx512f)
|
|
add_compile_options(-mavx512bw)
|
|
endif()
|
|
if (LLAMA_AVX512_VBMI)
|
|
add_compile_options(-mavx512vbmi)
|
|
endif()
|
|
if (LLAMA_AVX512_VNNI)
|
|
add_compile_options(-mavx512vnni)
|
|
endif()
|
|
endif()
|
|
elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "ppc64")
|
|
message(STATUS "PowerPC detected")
|
|
add_compile_options(-mcpu=native -mtune=native)
|
|
#TODO: Add targets for Power8/Power9 (Altivec/VSX) and Power10(MMA) and query for big endian systems (ppc64/le/be)
|
|
else()
|
|
message(STATUS "Unknown architecture")
|
|
endif()
|
|
|
|
#
|
|
# libraries
|
|
#
|
|
|
|
# ggml
|
|
|
|
add_library(ggml OBJECT
|
|
ggml.c
|
|
ggml.h
|
|
ggml-alloc.c
|
|
ggml-alloc.h
|
|
${GGML_SOURCES_CUDA}
|
|
${GGML_SOURCES_OPENCL}
|
|
${GGML_SOURCES_METAL}
|
|
${GGML_SOURCES_MPI}
|
|
${GGML_SOURCES_EXTRA}
|
|
)
|
|
|
|
target_include_directories(ggml PUBLIC . ${LLAMA_EXTRA_INCLUDES})
|
|
target_compile_features(ggml PUBLIC c_std_11) # don't bump
|
|
target_link_libraries(ggml PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS})
|
|
|
|
add_library(ggml_static STATIC $<TARGET_OBJECTS:ggml>)
|
|
if (BUILD_SHARED_LIBS)
|
|
set_target_properties(ggml PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
|
add_library(ggml_shared SHARED $<TARGET_OBJECTS:ggml>)
|
|
target_link_libraries(ggml_shared PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS})
|
|
install(TARGETS ggml_shared LIBRARY)
|
|
endif()
|
|
|
|
# llama
|
|
|
|
add_library(llama
|
|
llama.cpp
|
|
llama.h
|
|
)
|
|
|
|
target_include_directories(llama PUBLIC .)
|
|
target_compile_features(llama PUBLIC cxx_std_11) # don't bump
|
|
target_link_libraries(llama PRIVATE
|
|
ggml
|
|
${LLAMA_EXTRA_LIBS}
|
|
)
|
|
|
|
if (BUILD_SHARED_LIBS)
|
|
set_target_properties(llama PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
|
target_compile_definitions(llama PRIVATE LLAMA_SHARED LLAMA_BUILD)
|
|
if (LLAMA_METAL)
|
|
set_target_properties(llama PROPERTIES RESOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal")
|
|
endif()
|
|
install(TARGETS llama LIBRARY)
|
|
endif()
|
|
|
|
#
|
|
# install
|
|
#
|
|
|
|
include(GNUInstallDirs)
|
|
install(
|
|
FILES convert.py
|
|
PERMISSIONS
|
|
OWNER_READ
|
|
OWNER_WRITE
|
|
OWNER_EXECUTE
|
|
GROUP_READ
|
|
GROUP_EXECUTE
|
|
WORLD_READ
|
|
WORLD_EXECUTE
|
|
DESTINATION ${CMAKE_INSTALL_BINDIR})
|
|
install(
|
|
FILES convert-lora-to-ggml.py
|
|
PERMISSIONS
|
|
OWNER_READ
|
|
OWNER_WRITE
|
|
OWNER_EXECUTE
|
|
GROUP_READ
|
|
GROUP_EXECUTE
|
|
WORLD_READ
|
|
WORLD_EXECUTE
|
|
DESTINATION ${CMAKE_INSTALL_BINDIR})
|
|
if (LLAMA_METAL)
|
|
install(
|
|
FILES ggml-metal.metal
|
|
PERMISSIONS
|
|
OWNER_READ
|
|
OWNER_WRITE
|
|
GROUP_READ
|
|
WORLD_READ
|
|
DESTINATION ${CMAKE_INSTALL_BINDIR})
|
|
endif()
|
|
|
|
#
|
|
# programs, examples and tests
|
|
#
|
|
|
|
add_subdirectory(common)
|
|
|
|
if (LLAMA_BUILD_TESTS AND NOT CMAKE_JS_VERSION)
|
|
include(CTest)
|
|
add_subdirectory(tests)
|
|
endif ()
|
|
|
|
if (LLAMA_BUILD_EXAMPLES)
|
|
add_subdirectory(examples)
|
|
add_subdirectory(pocs)
|
|
endif()
|