mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-11 21:39:52 +00:00
cmake : restore LLAMA_LLAMAFILE_DEFAULT
This commit is contained in:
parent
fa0b4ad252
commit
dba497e0c1
@ -43,6 +43,8 @@ else()
|
|||||||
set(LLAMA_METAL_DEFAULT OFF)
|
set(LLAMA_METAL_DEFAULT OFF)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
set(LLAMA_LLAMAFILE_DEFAULT ON)
|
||||||
|
|
||||||
# general
|
# general
|
||||||
option(BUILD_SHARED_LIBS "build shared libraries" OFF)
|
option(BUILD_SHARED_LIBS "build shared libraries" OFF)
|
||||||
option(LLAMA_STATIC "llama: static link libraries" OFF)
|
option(LLAMA_STATIC "llama: static link libraries" OFF)
|
||||||
|
@ -17653,6 +17653,11 @@ const char * llama_print_system_info(void) {
|
|||||||
s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
|
s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | ";
|
||||||
s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
|
s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
|
||||||
s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | ";
|
s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | ";
|
||||||
|
#ifdef GGML_USE_LLAMAFILE
|
||||||
|
s += "LAMMAFILE = 1 | ";
|
||||||
|
#else
|
||||||
|
s += "LAMMAFILE = 0 | ";
|
||||||
|
#endif
|
||||||
|
|
||||||
return s.c_str();
|
return s.c_str();
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user