ggml : move sgemm sources to llamafile subfolder (#8394)

ggml-ci
This commit is contained in:
Georgi Gerganov 2024-07-10 15:23:29 +03:00 committed by GitHub
parent 0f1a39f343
commit 6b2a849d1f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 9 additions and 10 deletions

View File

@ -554,7 +554,7 @@ endif # GGML_BLIS
ifndef GGML_NO_LLAMAFILE ifndef GGML_NO_LLAMAFILE
MK_CPPFLAGS += -DGGML_USE_LLAMAFILE MK_CPPFLAGS += -DGGML_USE_LLAMAFILE
OBJ_GGML += ggml/src/sgemm.o OBJ_GGML += ggml/src/llamafile/sgemm.o
endif endif
ifdef GGML_RPC ifdef GGML_RPC
@ -983,9 +983,9 @@ ggml/src/ggml-blas.o: \
$(CXX) $(CXXFLAGS) -c $< -o $@ $(CXX) $(CXXFLAGS) -c $< -o $@
ifndef GGML_NO_LLAMAFILE ifndef GGML_NO_LLAMAFILE
ggml/src/sgemm.o: \ ggml/src/llamafile/sgemm.o: \
ggml/src/sgemm.cpp \ ggml/src/llamafile/sgemm.cpp \
ggml/src/sgemm.h \ ggml/src/llamafile/sgemm.h \
ggml/include/ggml.h ggml/include/ggml.h
$(CXX) $(CXXFLAGS) -c $< -o $@ $(CXX) $(CXXFLAGS) -c $< -o $@
endif # GGML_NO_LLAMAFILE endif # GGML_NO_LLAMAFILE

View File

@ -104,7 +104,7 @@ option(GGML_ACCELERATE "ggml: enable Accelerate framework"
option(GGML_BLAS "ggml: use BLAS" ${GGML_BLAS_DEFAULT}) option(GGML_BLAS "ggml: use BLAS" ${GGML_BLAS_DEFAULT})
set(GGML_BLAS_VENDOR ${GGML_BLAS_VENDOR_DEFAULT} CACHE STRING set(GGML_BLAS_VENDOR ${GGML_BLAS_VENDOR_DEFAULT} CACHE STRING
"ggml: BLAS library vendor") "ggml: BLAS library vendor")
option(GGML_LLAMAFILE "ggml: use ggml SGEMM" OFF) option(GGML_LLAMAFILE "ggml: use LLAMAFILE" OFF)
option(GGML_CUDA "ggml: use CUDA" OFF) option(GGML_CUDA "ggml: use CUDA" OFF)
option(GGML_CUDA_FORCE_DMMV "ggml: use dmmv instead of mmvq CUDA kernels" OFF) option(GGML_CUDA_FORCE_DMMV "ggml: use dmmv instead of mmvq CUDA kernels" OFF)

View File

@ -238,12 +238,12 @@ if (GGML_BLAS)
endif() endif()
if (GGML_LLAMAFILE) if (GGML_LLAMAFILE)
message(STATUS "Using ggml SGEMM") message(STATUS "Using llamafile")
add_compile_definitions(GGML_USE_LLAMAFILE) add_compile_definitions(GGML_USE_LLAMAFILE)
set(GGML_HEADERS_LLAMAFILE sgemm.h) set(GGML_HEADERS_LLAMAFILE llamafile/sgemm.h)
set(GGML_SOURCES_LLAMAFILE sgemm.cpp) set(GGML_SOURCES_LLAMAFILE llamafile/sgemm.cpp)
endif() endif()
if (GGML_CUDA) if (GGML_CUDA)

View File

@ -6,7 +6,6 @@
#include "ggml.h" #include "ggml.h"
#include "ggml-aarch64.h" #include "ggml-aarch64.h"
#if defined(_MSC_VER) || defined(__MINGW32__) #if defined(_MSC_VER) || defined(__MINGW32__)
#include <malloc.h> // using malloc.h with MSC/MINGW #include <malloc.h> // using malloc.h with MSC/MINGW
#elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__) #elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
@ -43,7 +42,7 @@
#endif #endif
#ifdef GGML_USE_LLAMAFILE #ifdef GGML_USE_LLAMAFILE
#include "sgemm.h" #include <llamafile/sgemm.h>
#endif #endif
#if defined(_MSC_VER) #if defined(_MSC_VER)