Compare commits

...

8 Commits

Author SHA1 Message Date
Djip007
8deb1566d9
Merge 94cd488f6a into 30caac3a68 2024-12-24 10:44:23 +03:00
Georgi Gerganov
30caac3a68
llama : the WPM vocabs use the CLS token as BOS (#10930)
* llama : the WPM vocabs use the CLS token as BOS

ggml-ci

* llama : add comment
2024-12-24 09:44:20 +02:00
Diego Devesa
60cfa728e2
ggml : use wstring for backend search paths (#10960)
ggml-ci
2024-12-24 04:05:27 +01:00
Diego Devesa
3327bb0f8d
ggml : fix arm enabled features check (#10961) 2024-12-24 04:05:17 +01:00
Djip007
94cd488f6a git 2.47 use short id of len 9. 2024-12-23 00:37:10 +01:00
Djip007
ac2b53c564 sgemm: add M blocs. 2024-12-23 00:33:19 +01:00
Djip007
d732874114 tinyblas dynamic dispaching 2024-12-23 00:24:14 +01:00
Djip007
3f2bc659e7 more perfo with llamafile tinyblas on x86_64.
- add bf16 suport
- change dispache strategie (thanks:
https://github.com/ikawrakow/ik_llama.cpp/pull/71 )
- reduce memory bandwidth

simple tinyblas dispache and more cache freindly
2024-12-23 00:23:22 +01:00
9 changed files with 367 additions and 326 deletions

View File

@ -234,6 +234,7 @@ function(ggml_add_backend_library backend)
# write the shared library to the output directory # write the shared library to the output directory
set_target_properties(${backend} PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) set_target_properties(${backend} PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})
target_compile_definitions(${backend} PRIVATE GGML_BACKEND_DL) target_compile_definitions(${backend} PRIVATE GGML_BACKEND_DL)
add_dependencies(ggml ${backend})
else() else()
add_library(${backend} ${ARGN}) add_library(${backend} ${ARGN})
target_link_libraries(ggml PUBLIC ${backend}) target_link_libraries(ggml PUBLIC ${backend})

View File

@ -66,6 +66,26 @@
#include "ggml-kompute.h" #include "ggml-kompute.h"
#endif #endif
// disable C++17 deprecation warning for std::codecvt_utf8
#if defined(__clang__)
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wdeprecated-declarations"
#endif
static std::wstring utf8_to_utf16(const std::string & str) {
std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>> converter;
return converter.from_bytes(str);
}
static std::string utf16_to_utf8(const std::wstring & str) {
std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>> converter;
return converter.to_bytes(str);
}
#if defined(__clang__)
# pragma clang diagnostic pop
#endif
#ifdef _WIN32 #ifdef _WIN32
using dl_handle = std::remove_pointer_t<HMODULE>; using dl_handle = std::remove_pointer_t<HMODULE>;
@ -88,11 +108,6 @@ static dl_handle * dl_load_library(const std::wstring & path) {
return handle; return handle;
} }
static dl_handle * dl_load_library(const std::string & path) {
std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>> converter;
return dl_load_library(converter.from_bytes(path));
}
static void * dl_get_sym(dl_handle * handle, const char * name) { static void * dl_get_sym(dl_handle * handle, const char * name) {
DWORD old_mode = SetErrorMode(SEM_FAILCRITICALERRORS); DWORD old_mode = SetErrorMode(SEM_FAILCRITICALERRORS);
SetErrorMode(old_mode | SEM_FAILCRITICALERRORS); SetErrorMode(old_mode | SEM_FAILCRITICALERRORS);
@ -114,8 +129,8 @@ struct dl_handle_deleter {
} }
}; };
static void * dl_load_library(const std::string & path) { static void * dl_load_library(const std::wstring & path) {
dl_handle * handle = dlopen(path.c_str(), RTLD_NOW | RTLD_LOCAL); dl_handle * handle = dlopen(utf16_to_utf8(path).c_str(), RTLD_NOW | RTLD_LOCAL);
return handle; return handle;
} }
@ -202,11 +217,11 @@ struct ggml_backend_registry {
devices.push_back(device); devices.push_back(device);
} }
ggml_backend_reg_t load_backend(const char * path, bool silent) { ggml_backend_reg_t load_backend(const std::wstring & path, bool silent) {
dl_handle_ptr handle { dl_load_library(path) }; dl_handle_ptr handle { dl_load_library(path) };
if (!handle) { if (!handle) {
if (!silent) { if (!silent) {
GGML_LOG_ERROR("%s: failed to load %s\n", __func__, path); GGML_LOG_ERROR("%s: failed to load %s\n", __func__, utf16_to_utf8(path).c_str());
} }
return nullptr; return nullptr;
} }
@ -214,7 +229,7 @@ struct ggml_backend_registry {
auto score_fn = (ggml_backend_score_t) dl_get_sym(handle.get(), "ggml_backend_score"); auto score_fn = (ggml_backend_score_t) dl_get_sym(handle.get(), "ggml_backend_score");
if (score_fn && score_fn() == 0) { if (score_fn && score_fn() == 0) {
if (!silent) { if (!silent) {
GGML_LOG_INFO("%s: backend %s is not supported on this system\n", __func__, path); GGML_LOG_INFO("%s: backend %s is not supported on this system\n", __func__, utf16_to_utf8(path).c_str());
} }
return nullptr; return nullptr;
} }
@ -222,7 +237,7 @@ struct ggml_backend_registry {
auto backend_init_fn = (ggml_backend_init_t) dl_get_sym(handle.get(), "ggml_backend_init"); auto backend_init_fn = (ggml_backend_init_t) dl_get_sym(handle.get(), "ggml_backend_init");
if (!backend_init_fn) { if (!backend_init_fn) {
if (!silent) { if (!silent) {
GGML_LOG_ERROR("%s: failed to find ggml_backend_init in %s\n", __func__, path); GGML_LOG_ERROR("%s: failed to find ggml_backend_init in %s\n", __func__, utf16_to_utf8(path).c_str());
} }
return nullptr; return nullptr;
} }
@ -231,16 +246,16 @@ struct ggml_backend_registry {
if (!reg || reg->api_version != GGML_BACKEND_API_VERSION) { if (!reg || reg->api_version != GGML_BACKEND_API_VERSION) {
if (!silent) { if (!silent) {
if (!reg) { if (!reg) {
GGML_LOG_ERROR("%s: failed to initialize backend from %s: ggml_backend_init returned NULL\n", __func__, path); GGML_LOG_ERROR("%s: failed to initialize backend from %s: ggml_backend_init returned NULL\n", __func__, utf16_to_utf8(path).c_str());
} else { } else {
GGML_LOG_ERROR("%s: failed to initialize backend from %s: incompatible API version (backend: %d, current: %d)\n", GGML_LOG_ERROR("%s: failed to initialize backend from %s: incompatible API version (backend: %d, current: %d)\n",
__func__, path, reg->api_version, GGML_BACKEND_API_VERSION); __func__, utf16_to_utf8(path).c_str(), reg->api_version, GGML_BACKEND_API_VERSION);
} }
} }
return nullptr; return nullptr;
} }
GGML_LOG_INFO("%s: loaded %s backend from %s\n", __func__, ggml_backend_reg_name(reg), path); GGML_LOG_INFO("%s: loaded %s backend from %s\n", __func__, ggml_backend_reg_name(reg), utf16_to_utf8(path).c_str());
register_backend(reg, std::move(handle)); register_backend(reg, std::move(handle));
@ -376,14 +391,14 @@ ggml_backend_t ggml_backend_init_best(void) {
// Dynamic loading // Dynamic loading
ggml_backend_reg_t ggml_backend_load(const char * path) { ggml_backend_reg_t ggml_backend_load(const char * path) {
return get_reg().load_backend(path, false); return get_reg().load_backend(utf8_to_utf16(path), false);
} }
void ggml_backend_unload(ggml_backend_reg_t reg) { void ggml_backend_unload(ggml_backend_reg_t reg) {
get_reg().unload_backend(reg, true); get_reg().unload_backend(reg, true);
} }
static std::string get_executable_path() { static std::wstring get_executable_path() {
#if defined(__APPLE__) #if defined(__APPLE__)
// get executable path // get executable path
std::vector<char> path; std::vector<char> path;
@ -401,7 +416,7 @@ static std::string get_executable_path() {
if (last_slash != std::string::npos) { if (last_slash != std::string::npos) {
base_path = base_path.substr(0, last_slash); base_path = base_path.substr(0, last_slash);
} }
return base_path + "/"; return utf8_to_utf16(base_path + "/");
#elif defined(__linux__) || defined(__FreeBSD__) #elif defined(__linux__) || defined(__FreeBSD__)
std::string base_path = "."; std::string base_path = ".";
std::vector<char> path(1024); std::vector<char> path(1024);
@ -427,57 +442,63 @@ static std::string get_executable_path() {
path.resize(path.size() * 2); path.resize(path.size() * 2);
} }
return base_path + "/"; return utf8_to_utf16(base_path + "/");
#elif defined(_WIN32) #elif defined(_WIN32)
std::vector<char> path(MAX_PATH); std::vector<wchar_t> path(MAX_PATH);
DWORD len = GetModuleFileNameA(NULL, path.data(), path.size()); DWORD len = GetModuleFileNameW(NULL, path.data(), path.size());
if (len == 0) { if (len == 0) {
return ""; return {};
} }
std::string base_path(path.data(), len); std::wstring base_path(path.data(), len);
// remove executable name // remove executable name
auto last_slash = base_path.find_last_of('\\'); auto last_slash = base_path.find_last_of('\\');
if (last_slash != std::string::npos) { if (last_slash != std::string::npos) {
base_path = base_path.substr(0, last_slash); base_path = base_path.substr(0, last_slash);
} }
return base_path + "\\"; return base_path + L"\\";
#else
return {};
#endif #endif
} }
static std::string backend_filename_prefix() { static std::wstring backend_filename_prefix() {
#ifdef _WIN32 #ifdef _WIN32
return "ggml-"; return L"ggml-";
#else #else
return "libggml-"; return L"libggml-";
#endif #endif
} }
static std::string backend_filename_suffix() { static std::wstring backend_filename_suffix() {
#ifdef _WIN32 #ifdef _WIN32
return ".dll"; return L".dll";
#else #else
return ".so"; return L".so";
#endif
}
static std::wstring path_separator() {
#ifdef _WIN32
return L"\\";
#else
return L"/";
#endif #endif
} }
static ggml_backend_reg_t ggml_backend_load_best(const char * name, bool silent, const char * user_search_path) { static ggml_backend_reg_t ggml_backend_load_best(const char * name, bool silent, const char * user_search_path) {
// enumerate all the files that match [lib]ggml-name-*.[so|dll] in the search paths // enumerate all the files that match [lib]ggml-name-*.[so|dll] in the search paths
// TODO: search system paths // TODO: search system paths
std::string file_prefix = backend_filename_prefix() + name + "-"; std::wstring file_prefix = backend_filename_prefix() + utf8_to_utf16(name) + L"-";
std::vector<std::string> search_paths; std::vector<std::wstring> search_paths;
if (user_search_path == nullptr) { if (user_search_path == nullptr) {
search_paths.push_back("./"); search_paths.push_back(L"." + path_separator());
search_paths.push_back(get_executable_path()); search_paths.push_back(get_executable_path());
} else { } else {
#if defined(_WIN32) search_paths.push_back(utf8_to_utf16(user_search_path) + path_separator());
search_paths.push_back(std::string(user_search_path) + "\\");
#else
search_paths.push_back(std::string(user_search_path) + "/");
#endif
} }
int best_score = 0; int best_score = 0;
std::string best_path; std::wstring best_path;
namespace fs = std::filesystem; namespace fs = std::filesystem;
for (const auto & search_path : search_paths) { for (const auto & search_path : search_paths) {
@ -487,27 +508,27 @@ static ggml_backend_reg_t ggml_backend_load_best(const char * name, bool silent,
fs::directory_iterator dir_it(search_path, fs::directory_options::skip_permission_denied); fs::directory_iterator dir_it(search_path, fs::directory_options::skip_permission_denied);
for (const auto & entry : dir_it) { for (const auto & entry : dir_it) {
if (entry.is_regular_file()) { if (entry.is_regular_file()) {
std::string filename = entry.path().filename().string(); std::wstring filename = entry.path().filename().wstring();
std::string ext = entry.path().extension().string(); std::wstring ext = entry.path().extension().wstring();
if (filename.find(file_prefix) == 0 && ext == backend_filename_suffix()) { if (filename.find(file_prefix) == 0 && ext == backend_filename_suffix()) {
dl_handle_ptr handle { dl_load_library(entry.path().c_str()) }; dl_handle_ptr handle { dl_load_library(entry.path().wstring()) };
if (!handle && !silent) { if (!handle && !silent) {
GGML_LOG_ERROR("%s: failed to load %s\n", __func__, entry.path().string().c_str()); GGML_LOG_ERROR("%s: failed to load %s\n", __func__, utf16_to_utf8(entry.path().wstring()).c_str());
} }
if (handle) { if (handle) {
auto score_fn = (ggml_backend_score_t) dl_get_sym(handle.get(), "ggml_backend_score"); auto score_fn = (ggml_backend_score_t) dl_get_sym(handle.get(), "ggml_backend_score");
if (score_fn) { if (score_fn) {
int s = score_fn(); int s = score_fn();
#ifndef NDEBUG #ifndef NDEBUG
GGML_LOG_DEBUG("%s: %s score: %d\n", __func__, entry.path().string().c_str(), s); GGML_LOG_DEBUG("%s: %s score: %d\n", __func__, utf16_to_utf8(entry.path().wstring()).c_str(), s);
#endif #endif
if (s > best_score) { if (s > best_score) {
best_score = s; best_score = s;
best_path = entry.path().string(); best_path = entry.path().wstring();
} }
} else { } else {
if (!silent) { if (!silent) {
GGML_LOG_INFO("%s: failed to find ggml_backend_score in %s\n", __func__, entry.path().string().c_str()); GGML_LOG_INFO("%s: failed to find ggml_backend_score in %s\n", __func__, utf16_to_utf8(entry.path().wstring()).c_str());
} }
} }
} }
@ -519,15 +540,15 @@ static ggml_backend_reg_t ggml_backend_load_best(const char * name, bool silent,
if (best_score == 0) { if (best_score == 0) {
// try to load the base backend // try to load the base backend
for (const auto & search_path : search_paths) { for (const auto & search_path : search_paths) {
std::string path = search_path + backend_filename_prefix() + name + backend_filename_suffix(); std::wstring path = search_path + backend_filename_prefix() + utf8_to_utf16(name) + backend_filename_suffix();
if (fs::exists(path)) { if (fs::exists(path)) {
return get_reg().load_backend(path.c_str(), silent); return get_reg().load_backend(path, silent);
} }
} }
return nullptr; return nullptr;
} }
return get_reg().load_backend(best_path.c_str(), silent); return get_reg().load_backend(best_path, silent);
} }
void ggml_backend_load_all() { void ggml_backend_load_all() {

View File

@ -135,14 +135,20 @@ function(ggml_add_cpu_backend_variant_impl tag_name)
endif() endif()
# show enabled features # show enabled features
if (CMAKE_HOST_SYSTEM_NAME STREQUAL "Windows")
set(FEAT_INPUT_FILE "NUL")
else()
set(FEAT_INPUT_FILE "/dev/null")
endif()
execute_process( execute_process(
COMMAND ${CMAKE_C_COMPILER} ${ARCH_FLAGS} -dM -E - COMMAND ${CMAKE_C_COMPILER} ${ARCH_FLAGS} -dM -E -
INPUT_FILE "/dev/null" INPUT_FILE ${FEAT_INPUT_FILE}
OUTPUT_VARIABLE ARM_FEATURE OUTPUT_VARIABLE ARM_FEATURE
RESULT_VARIABLE ARM_FEATURE_RESULT RESULT_VARIABLE ARM_FEATURE_RESULT
) )
if (ARM_FEATURE_RESULT) if (ARM_FEATURE_RESULT)
message(FATAL_ERROR "Failed to get ARM features") message(WARNING "Failed to get ARM features")
else() else()
foreach(feature DOTPROD SVE MATMUL_INT8 FMA FP16_VECTOR_ARITHMETIC) foreach(feature DOTPROD SVE MATMUL_INT8 FMA FP16_VECTOR_ARITHMETIC)
string(FIND "${ARM_FEATURE}" "__ARM_FEATURE_${feature} 1" feature_pos) string(FIND "${ARM_FEATURE}" "__ARM_FEATURE_${feature} 1" feature_pos)
@ -317,6 +323,11 @@ function(ggml_add_cpu_backend_variant_impl tag_name)
target_compile_definitions(${GGML_CPU_NAME} PRIVATE ${ARCH_DEFINITIONS}) target_compile_definitions(${GGML_CPU_NAME} PRIVATE ${ARCH_DEFINITIONS})
if (GGML_BACKEND_DL) if (GGML_BACKEND_DL)
if (GGML_NATIVE)
# the feature check relies on ARCH_DEFINITIONS, but it is not set with GGML_NATIVE
message(FATAL_ERROR "GGML_NATIVE is not compatible with GGML_BACKEND_DL, consider using GGML_CPU_ALL_VARIANTS")
endif()
# The feature detection code is compiled as a separate target so that # The feature detection code is compiled as a separate target so that
# it can be built without the architecture flags # it can be built without the architecture flags
# Since multiple variants of the CPU backend may be included in the same # Since multiple variants of the CPU backend may be included in the same

View File

@ -7419,14 +7419,14 @@ static void ggml_compute_forward_mul_mat(
if (src1_cont) { if (src1_cont) {
for (int64_t i13 = 0; i13 < ne13; i13++) for (int64_t i13 = 0; i13 < ne13; i13++)
for (int64_t i12 = 0; i12 < ne12; i12++) for (int64_t i12 = 0; i12 < ne12; i12++)
if (!llamafile_sgemm(ne01, ne11, ne00/ggml_blck_size(src0->type), if (!llamafile_sgemm(params,
ne01, ne11, ne00/ggml_blck_size(src0->type),
(const char *)src0->data + i12/r2*nb02 + i13/r3*nb03, (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03,
nb01/ggml_type_size(src0->type), nb01/ggml_type_size(src0->type),
(const char *)src1->data + i12*nb12 + i13*nb13, (const char *)src1->data + i12*nb12 + i13*nb13,
nb11/ggml_type_size(src1->type), nb11/ggml_type_size(src1->type),
(char *)dst->data + i12*nb2 + i13*nb3, (char *)dst->data + i12*nb2 + i13*nb3,
nb1/ggml_type_size(dst->type), nb1/ggml_type_size(dst->type),
ith, nth,
src0->type, src0->type,
src1->type, src1->type,
dst->type)) dst->type))
@ -7471,14 +7471,14 @@ UseGgmlGemm1:;
for (int64_t i13 = 0; i13 < ne13; i13++) for (int64_t i13 = 0; i13 < ne13; i13++)
for (int64_t i12 = 0; i12 < ne12; i12++) for (int64_t i12 = 0; i12 < ne12; i12++)
if (!llamafile_sgemm(ne01, ne11, ne00/ggml_blck_size(src0->type), if (!llamafile_sgemm(params,
ne01, ne11, ne00/ggml_blck_size(src0->type),
(const char *)src0->data + i12/r2*nb02 + i13/r3*nb03, (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03,
nb01/ggml_type_size(src0->type), nb01/ggml_type_size(src0->type),
(const char *)wdata + (i12*ne11 + i13*ne12*ne11)*row_size, (const char *)wdata + (i12*ne11 + i13*ne12*ne11)*row_size,
row_size/ggml_type_size(vec_dot_type), row_size/ggml_type_size(vec_dot_type),
(char *)dst->data + i12*nb2 + i13*nb3, (char *)dst->data + i12*nb2 + i13*nb3,
nb1/ggml_type_size(dst->type), nb1/ggml_type_size(dst->type),
ith, nth,
src0->type, src0->type,
vec_dot_type, vec_dot_type,
dst->type)) dst->type))

View File

@ -53,6 +53,8 @@
#include "ggml-cpu-impl.h" #include "ggml-cpu-impl.h"
#include "ggml-quants.h" #include "ggml-quants.h"
#include <atomic>
#ifdef _MSC_VER #ifdef _MSC_VER
#define NOINLINE __declspec(noinline) #define NOINLINE __declspec(noinline)
#else #else
@ -134,6 +136,16 @@ inline __m512 madd(__m512 a, __m512 b, __m512 c) {
return _mm512_fmadd_ps(a, b, c); return _mm512_fmadd_ps(a, b, c);
} }
#endif #endif
#if defined(__AVX512BF16__)
template <>
inline __m512 madd(__m512bh a, __m512bh b, __m512 c) {
return _mm512_dpbf16_ps(c, a, b);
}
template <>
inline __m256 madd(__m256bh a, __m256bh b, __m256 c) {
return _mm256_dpbf16_ps(c, a, b);
}
#endif
#endif #endif
#if defined(__ARM_FEATURE_FMA) #if defined(__ARM_FEATURE_FMA)
@ -226,6 +238,13 @@ template <> inline __m256 load(const float *p) {
} }
#endif // __AVX__ #endif // __AVX__
#if defined(__AVX2__) || defined(__AVX512F__)
template <> inline __m256 load(const ggml_bf16_t *p) {
return _mm256_castsi256_ps(
_mm256_slli_epi32(_mm256_cvtepu16_epi32(_mm_loadu_si128((const __m128i *)p)), 16));
}
#endif // __AVX2__
#if defined(__F16C__) #if defined(__F16C__)
template <> inline __m256 load(const ggml_fp16_t *p) { template <> inline __m256 load(const ggml_fp16_t *p) {
return _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)p)); return _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)p));
@ -239,8 +258,27 @@ template <> inline __m512 load(const float *p) {
template <> inline __m512 load(const ggml_fp16_t *p) { template <> inline __m512 load(const ggml_fp16_t *p) {
return _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)p)); return _mm512_cvtph_ps(_mm256_loadu_si256((const __m256i *)p));
} }
template <> inline __m512 load(const ggml_bf16_t *p) {
return _mm512_castsi512_ps(
_mm512_slli_epi32(_mm512_cvtepu16_epi32(_mm256_loadu_si256((const __m256i *)p)), 16));
}
#endif // __AVX512F__ #endif // __AVX512F__
#if defined(__AVX512BF16__)
template <> inline __m512bh load(const ggml_bf16_t *p) {
return (__m512bh)_mm512_loadu_ps((const float *)p);
}
template <> inline __m256bh load(const ggml_bf16_t *p) {
return (__m256bh)_mm256_loadu_ps((const float *)p);
}
template <> inline __m512bh load(const float *p) {
return _mm512_cvtne2ps_pbh(_mm512_loadu_ps(p + 16), _mm512_loadu_ps(p));
}
template <> inline __m256bh load(const float *p) {
return _mm512_cvtneps_pbh(_mm512_loadu_ps(p));
}
#endif
//////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////
// CONSTANTS // CONSTANTS
@ -252,199 +290,170 @@ static const __m128i iq4nlt = _mm_loadu_si128((const __m128i *) kvalues_iq4nl);
//////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////////
// FLOATING POINT MATRIX MULTIPLICATION // FLOATING POINT MATRIX MULTIPLICATION
template <int M>
static inline int64_t BLOCK_SIZE(size_t m) {
const int64_t NB_BLOC_M = (m + M - 1) / M;
return (m % NB_BLOC_M == 0) ? m / NB_BLOC_M : (m / NB_BLOC_M) + 1;
}
static constexpr inline int64_t BLOC_POS(int64_t ib, int64_t ibN, int64_t bloc_size) {
return ib < ibN ? ib * bloc_size : ibN * bloc_size + (ib - ibN) * (bloc_size - 1);
}
template <int KN, typename D, typename V, typename TA, typename TB, typename TC> template <int KN, typename D, typename V, typename TA, typename TB, typename TC>
class tinyBLAS { class tinyBLAS {
public: public:
tinyBLAS(int64_t k, tinyBLAS(const ggml_compute_params * params, int64_t k,
const TA *A, int64_t lda, const TA *A, int64_t lda,
const TB *B, int64_t ldb, const TB *B, int64_t ldb,
TC *C, int64_t ldc, TC *C, int64_t ldc)
int ith, int nth) : params(params), A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc) {
: A(A), B(B), C(C), k(k), lda(lda), ldb(ldb), ldc(ldc), ith(ith), nth(nth) {
} }
void matmul(int64_t m, int64_t n) { bool matmul(int64_t m, int64_t n) {
mnpack(0, m, 0, n); if (k % KN != 0)
return false;
// compute RM for only need tile with size RM&RM-1
#if VECTOR_REGISTERS == 32
if (m % 16 == 0 && (m/16 >= params->nth)) {
const int64_t SIZE_N = BLOCK_SIZE<6>(n);
mnpack<4, 6, 4>(m, n, SIZE_N, 12);
return true;
}
if (m % 8 == 0 ) {
const int64_t SIZE_N = BLOCK_SIZE<6>(n);
mnpack<4, 6, 2>(m, n, SIZE_N, 12);
return true;
}
if (m % 4 == 0) {
const int64_t SIZE_N = BLOCK_SIZE<6>(n);
mnpack<4, 6, 1>(m, n, SIZE_N, 12);
return true;
}
#else // VECTOR_REGISTERS == 16
if (m % 16 == 0 && (m/16 >= params->nth)) {
const int64_t SIZE_N = BLOCK_SIZE<3>(n);
mnpack<4, 3, 4>(m, n, SIZE_N, 24);
return true;
}
if (m % 8 == 0 ) {
const int64_t SIZE_N = BLOCK_SIZE<3>(n);
mnpack<4, 3, 2>(m, n, SIZE_N, 24);
return true;
}
if (m % 4 == 0) {
const int64_t SIZE_N = BLOCK_SIZE<3>(n);
mnpack<4, 3, 1>(m, n, SIZE_N, 24);
return true;
}
#endif
return false;
} }
private: private:
NOINLINE void mnpack(int64_t m0, int64_t m, int64_t n0, int64_t n) { template <int RM, int RN, int BM>
int64_t mc, nc, mp, np; inline void mnpack(int64_t m, int64_t n, int64_t SIZE_N, int64_t BN) {
switch ((MIN(m - m0, 5) << 4) | MIN(n - n0, 5)) { if (SIZE_N == RN) {
#if VECTOR_REGISTERS == 32 return gemm<RM, RN, BM>(m, n, BN);
case 0x55: }
mc = 5; if constexpr (RN > 1) {
nc = 5; return mnpack<RM, RN-1, BM>(m, n, SIZE_N, BN);
gemm<5, 5>(m0, m, n0, n); } else {
break; GGML_LOG_ERROR("mnpack<%d, %d> bloc size not supported\n", RM, (int)SIZE_N);
case 0x45: GGML_ASSERT(false); // we have miss something.
mc = 4;
nc = 5;
gemm<4, 5>(m0, m, n0, n);
break;
case 0x54:
mc = 5;
nc = 4;
gemm<5, 4>(m0, m, n0, n);
break;
case 0x44:
mc = 4;
nc = 4;
gemm<4, 4>(m0, m, n0, n);
break;
case 0x53:
mc = 5;
nc = 3;
gemm<5, 3>(m0, m, n0, n);
break;
case 0x35:
mc = 3;
nc = 5;
gemm<3, 5>(m0, m, n0, n);
break;
case 0x43:
mc = 4;
nc = 3;
gemm<4, 3>(m0, m, n0, n);
break;
#else
case 0x55:
case 0x54:
case 0x53:
case 0x45:
case 0x44:
case 0x43:
mc = 4;
nc = 3;
gemm<4, 3>(m0, m, n0, n);
break;
case 0x35:
#endif
case 0x34:
mc = 3;
nc = 4;
gemm<3, 4>(m0, m, n0, n);
break;
case 0x52:
mc = 5;
nc = 2;
gemm<5, 2>(m0, m, n0, n);
break;
case 0x33:
mc = 3;
nc = 3;
gemm<3, 3>(m0, m, n0, n);
break;
case 0x25:
mc = 2;
nc = 5;
gemm<2, 5>(m0, m, n0, n);
break;
case 0x42:
mc = 4;
nc = 2;
gemm<4, 2>(m0, m, n0, n);
break;
case 0x24:
mc = 2;
nc = 4;
gemm<2, 4>(m0, m, n0, n);
break;
case 0x32:
mc = 3;
nc = 2;
gemm<3, 2>(m0, m, n0, n);
break;
case 0x23:
mc = 2;
nc = 3;
gemm<2, 3>(m0, m, n0, n);
break;
case 0x51:
mc = 5;
nc = 1;
gemm<5, 1>(m0, m, n0, n);
break;
case 0x41:
mc = 4;
nc = 1;
gemm<4, 1>(m0, m, n0, n);
break;
case 0x22:
mc = 2;
nc = 2;
gemm<2, 2>(m0, m, n0, n);
break;
case 0x15:
mc = 1;
nc = 5;
gemm<1, 5>(m0, m, n0, n);
break;
case 0x14:
mc = 1;
nc = 4;
gemm<1, 4>(m0, m, n0, n);
break;
case 0x31:
mc = 3;
nc = 1;
gemm<3, 1>(m0, m, n0, n);
break;
case 0x13:
mc = 1;
nc = 3;
gemm<1, 3>(m0, m, n0, n);
break;
case 0x21:
mc = 2;
nc = 1;
gemm<2, 1>(m0, m, n0, n);
break;
case 0x12:
mc = 1;
nc = 2;
gemm<1, 2>(m0, m, n0, n);
break;
case 0x11:
mc = 1;
nc = 1;
gemm<1, 1>(m0, m, n0, n);
break;
default:
return;
} }
mp = m0 + (m - m0) / mc * mc;
np = n0 + (n - n0) / nc * nc;
mnpack(mp, m, n0, np);
mnpack(m0, m, np, n);
} }
template <int RM, int RN> template <int RM, int RN>
NOINLINE void gemm(int64_t m0, int64_t m, int64_t n0, int64_t n) { inline void gemm_bloc(int64_t ii, int64_t jj) {
int64_t ytiles = (m - m0) / RM; D Cv[RN][RM] = {};
int64_t xtiles = (n - n0) / RN; for (int64_t l = 0; l < k; l += KN) {
int64_t tiles = xtiles * ytiles; // help compiler for op order.
int64_t duty = (tiles + nth - 1) / nth; if constexpr (RM <= RN) {
int64_t start = duty * ith; V Av[RM];
int64_t end = start + duty; for (int64_t i = 0; i < RM; ++i) {
if (end > tiles) Av[i] = load<V>(A + lda * (ii + i) + l);
end = tiles; }
for (int64_t job = start; job < end; ++job) { for (int64_t j = 0; j < RN; ++j) {
int64_t ii = m0 + job / xtiles * RM; V Bv = load<V>(B + ldb * (jj + j) + l);
int64_t jj = n0 + job % xtiles * RN; for (int64_t i = 0; i < RM; ++i) {
D Cv[RN][RM] = {}; Cv[j][i] = madd(Av[i], Bv, Cv[j][i]);
for (int64_t l = 0; l < k; l += KN) }
for (int64_t j = 0; j < RN; ++j) }
for (int64_t i = 0; i < RM; ++i) } else {
Cv[j][i] = madd(load<V>(A + lda * (ii + i) + l), V Bv[RN];
load<V>(B + ldb * (jj + j) + l), for (int64_t j = 0; j < RN; ++j) {
Cv[j][i]); Bv[j] = load<V>(B + ldb * (jj + j) + l);
for (int64_t j = 0; j < RN; ++j) }
for (int64_t i = 0; i < RM; ++i) for (int64_t i = 0; i < RM; ++i) {
C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]); V Av = load<V>(A + lda * (ii + i) + l);
for (int64_t j = 0; j < RN; ++j) {
Cv[j][i] = madd(Av, Bv[j], Cv[j][i]);
}
}
}
} }
for (int64_t j = 0; j < RN; ++j)
for (int64_t i = 0; i < RM; ++i)
C[ldc * (jj + j) + (ii + i)] = hsum(Cv[j][i]);
} }
template <int RM, int RN, int BM>
NOINLINE void gemm(int64_t m, int64_t n, int64_t BN) {
static std::atomic<int64_t> current_chunk;
GGML_ASSERT(m % (RM * BM) == 0);
const int64_t ytiles = m / (RM * BM);
const int64_t xtiles = (n + RN -1) / RN;
const int64_t jj_RN = (xtiles - (xtiles * RN - n));
// "round" bloc_size to "nearest" BN
const int64_t NB_BN = xtiles < BN ? 1 : (xtiles + BN / 2) / BN;
const int64_t SIZE_BN = xtiles % NB_BN == 0 ? xtiles / NB_BN : xtiles / NB_BN + 1;
const int64_t jj_BN = (NB_BN - (NB_BN * SIZE_BN - xtiles));
const int64_t nb_job = ytiles * NB_BN;
if (params->ith == 0) {
GGML_ASSERT( jj_BN * SIZE_BN + (NB_BN - jj_BN) * (SIZE_BN - 1) == xtiles);
// Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start.
std::atomic_store_explicit(&current_chunk, (int64_t)params->nth, std::memory_order_relaxed);
}
ggml_barrier(params->threadpool);
int64_t job = params->ith;
while (job < nb_job) {
const int64_t ii = (job % ytiles) * RM * BM;
const int64_t jb = job / ytiles;
const int64_t jr0 = BLOC_POS(jb , jj_BN, SIZE_BN);
const int64_t jrN = BLOC_POS(jb+1, jj_BN, SIZE_BN);
const int64_t jj0 = BLOC_POS(jr0, jj_RN, RN);
const int64_t jj2 = BLOC_POS(jrN, jj_RN, RN);
const int64_t jj1 = jj2 < jj_RN * RN ? jj2 : jj_RN * RN;
for (int64_t bi = 0; bi < BM * RM; bi += RM) {
int64_t jj = jj0;
for (; jj < jj1; jj += RN) {
gemm_bloc<RM, RN>(ii + bi, jj);
}
if constexpr (RN > 1) {
for (; jj < jj2; jj += RN - 1) {
gemm_bloc<RM, RN-1>(ii + bi, jj);
}
}
GGML_ASSERT(jj == jj2);
}
// next step.
job = std::atomic_fetch_add_explicit(&current_chunk, (int64_t)1, std::memory_order_relaxed);
}
ggml_barrier(params->threadpool);
return;
}
const ggml_compute_params * params;
const TA *const A; const TA *const A;
const TB *const B; const TB *const B;
TC *const C; TC *const C;
@ -452,8 +461,6 @@ class tinyBLAS {
const int64_t lda; const int64_t lda;
const int64_t ldb; const int64_t ldb;
const int64_t ldc; const int64_t ldc;
const int ith;
const int nth;
}; };
////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////////
@ -1657,8 +1664,9 @@ class tinyBLAS_PPC {
* @param Ctype is GGML data type of `C` * @param Ctype is GGML data type of `C`
* @return true if this function was able to service the matmul request * @return true if this function was able to service the matmul request
*/ */
bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda, const void *B, int64_t ldb, void *C, bool llamafile_sgemm(const struct ggml_compute_params * params, int64_t m, int64_t n, int64_t k,
int64_t ldc, int ith, int nth, int Atype, int Btype, int Ctype) { const void *A, int64_t lda, const void *B, int64_t ldb, void *C,
int64_t ldc, int Atype, int Btype, int Ctype) {
assert(m >= 0); assert(m >= 0);
assert(n >= 0); assert(n >= 0);
@ -1666,8 +1674,8 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda
assert(lda >= k); assert(lda >= k);
assert(ldb >= k); assert(ldb >= k);
assert(ldc >= m); assert(ldc >= m);
assert(nth > 0); assert(params->nth > 0);
assert(ith < nth); assert(params->ith < params->nth);
// only enable sgemm for prompt processing // only enable sgemm for prompt processing
if (n < 2) if (n < 2)
@ -1682,37 +1690,25 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda
if (Btype != GGML_TYPE_F32) if (Btype != GGML_TYPE_F32)
return false; return false;
#if defined(__AVX512F__) #if defined(__AVX512F__)
if (k % 16) tinyBLAS<16, __m512, __m512, float, float, float> tb{ params,
return false;
tinyBLAS<16, __m512, __m512, float, float, float> tb{
k, (const float *)A, lda, k, (const float *)A, lda,
(const float *)B, ldb, (const float *)B, ldb,
(float *)C, ldc, (float *)C, ldc};
ith, nth}; return tb.matmul(m, n);
tb.matmul(m, n);
return true;
#elif defined(__AVX__) || defined(__AVX2__) #elif defined(__AVX__) || defined(__AVX2__)
if (k % 8) tinyBLAS<8, __m256, __m256, float, float, float> tb{ params,
return false;
tinyBLAS<8, __m256, __m256, float, float, float> tb{
k, (const float *)A, lda, k, (const float *)A, lda,
(const float *)B, ldb, (const float *)B, ldb,
(float *)C, ldc, (float *)C, ldc};
ith, nth}; return tb.matmul(m, n);
tb.matmul(m, n);
return true;
#elif defined(__ARM_NEON) #elif defined(__ARM_NEON)
if (n < 4) if (n < 4)
return false; return false;
if (k % 4) tinyBLAS<4, float32x4_t, float32x4_t, float, float, float> tb{ params,
return false;
tinyBLAS<4, float32x4_t, float32x4_t, float, float, float> tb{
k, (const float *)A, lda, k, (const float *)A, lda,
(const float *)B, ldb, (const float *)B, ldb,
(float *)C, ldc, (float *)C, ldc};
ith, nth}; return tb.matmul(m, n);
tb.matmul(m, n);
return true;
#elif defined(__MMA__) #elif defined(__MMA__)
if (k % 8) if (k % 8)
return false; return false;
@ -1720,7 +1716,7 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda
k, (const float *)A, lda, k, (const float *)A, lda,
(const float *)B, ldb, (const float *)B, ldb,
(float *)C, ldc, (float *)C, ldc,
ith, nth}; params->ith, params->nth};
tb.matmul(m, n); tb.matmul(m, n);
return true; return true;
#else #else
@ -1728,60 +1724,71 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda
#endif #endif
} }
case GGML_TYPE_BF16: {
#if defined(__AVX512BF16__)
if (Btype == GGML_TYPE_BF16) {
tinyBLAS<32, __m512, __m512bh, ggml_bf16_t, ggml_bf16_t, float> tb{ params, k,
(const ggml_bf16_t *)A, lda,
(const ggml_bf16_t *)B, ldb,
(float *)C, ldc};
return tb.matmul(m, n);
}
#elif defined(__AVX512F__)
if (Btype == GGML_TYPE_BF16) {
tinyBLAS<16, __m512, __m512, ggml_bf16_t, ggml_bf16_t, float> tb{ params, k,
(const ggml_bf16_t *)A, lda,
(const ggml_bf16_t *)B, ldb,
(float *)C, ldc};
return tb.matmul(m, n);
}
#elif defined(__AVX2__)
if (Btype == GGML_TYPE_BF16) {
tinyBLAS<8, __m256, __m256, ggml_bf16_t, ggml_bf16_t, float> tb{ params, k,
(const ggml_bf16_t *)A, lda,
(const ggml_bf16_t *)B, ldb,
(float *)C, ldc};
return tb.matmul(m, n);
}
#endif
return false;
}
case GGML_TYPE_F16: { case GGML_TYPE_F16: {
#if defined(__AVX512F__) #if defined(__AVX512F__)
if (k % 16) if (Btype == GGML_TYPE_F16) {
return false; tinyBLAS<16, __m512, __m512, ggml_fp16_t, ggml_fp16_t, float> tb{ params, k,
if (Btype != GGML_TYPE_F32) (const ggml_fp16_t *)A, lda,
return false; (const ggml_fp16_t *)B, ldb,
tinyBLAS<16, __m512, __m512, ggml_fp16_t, float, float> tb{ (float *)C, ldc};
k, (const ggml_fp16_t *)A, lda, return tb.matmul(m, n);
(const float *)B, ldb, }
(float *)C, ldc,
ith, nth};
tb.matmul(m, n);
return true;
#elif (defined(__AVX__) || defined(__AVX2__)) && defined(__F16C__) #elif (defined(__AVX__) || defined(__AVX2__)) && defined(__F16C__)
if (k % 8) if (Btype == GGML_TYPE_F16) {
return false; tinyBLAS<8, __m256, __m256, ggml_fp16_t, ggml_fp16_t, float> tb{ params, k,
if (Btype != GGML_TYPE_F32) (const ggml_fp16_t *)A, lda,
return false; (const ggml_fp16_t *)B, ldb,
tinyBLAS<8, __m256, __m256, ggml_fp16_t, float, float> tb{ (float *)C, ldc};
k, (const ggml_fp16_t *)A, lda, return tb.matmul(m, n);
(const float *)B, ldb, }
(float *)C, ldc,
ith, nth};
tb.matmul(m, n);
return true;
#elif defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(_MSC_VER) #elif defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && !defined(_MSC_VER)
if (n < 8) if (n < 8)
return false; return false;
if (k % 8) if (Btype == GGML_TYPE_F16) {
return false; tinyBLAS<8, float16x8_t, float16x8_t, ggml_fp16_t, ggml_fp16_t, float> tb{ params,
if (Btype != GGML_TYPE_F16) k, (const ggml_fp16_t *)A, lda,
return false; (const ggml_fp16_t *)B, ldb,
tinyBLAS<8, float16x8_t, float16x8_t, ggml_fp16_t, ggml_fp16_t, float> tb{ (float *)C, ldc};
k, (const ggml_fp16_t *)A, lda, return tb.matmul(m, n);
(const ggml_fp16_t *)B, ldb, }
(float *)C, ldc,
ith, nth};
tb.matmul(m, n);
return true;
#elif defined(__ARM_NEON) && !defined(_MSC_VER) #elif defined(__ARM_NEON) && !defined(_MSC_VER)
if (k % 4) if (Btype == GGML_TYPE_F32) {
return false; tinyBLAS<4, float32x4_t, float32x4_t, ggml_fp16_t, float, float> tb{ params,
if (Btype != GGML_TYPE_F32) k, (const ggml_fp16_t *)A, lda,
return false; (const float *)B, ldb,
tinyBLAS<4, float32x4_t, float32x4_t, ggml_fp16_t, float, float> tb{ (float *)C, ldc};
k, (const ggml_fp16_t *)A, lda, return tb.matmul(m, n);
(const float *)B, ldb, }
(float *)C, ldc,
ith, nth};
tb.matmul(m, n);
return true;
#else
return false;
#endif #endif
return false;
} }
case GGML_TYPE_Q8_0: { case GGML_TYPE_Q8_0: {
@ -1792,7 +1799,7 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda
k, (const block_q8_0 *)A, lda, k, (const block_q8_0 *)A, lda,
(const block_q8_0 *)B, ldb, (const block_q8_0 *)B, ldb,
(float *)C, ldc, (float *)C, ldc,
ith, nth}; params->ith, params->nth};
tb.matmul(m, n); tb.matmul(m, n);
return true; return true;
#elif defined(__ARM_FEATURE_DOTPROD) #elif defined(__ARM_FEATURE_DOTPROD)
@ -1800,7 +1807,7 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda
k, (const block_q8_0 *)A, lda, k, (const block_q8_0 *)A, lda,
(const block_q8_0 *)B, ldb, (const block_q8_0 *)B, ldb,
(float *)C, ldc, (float *)C, ldc,
ith, nth}; params->ith, params->nth};
tb.matmul(m, n); tb.matmul(m, n);
return true; return true;
#else #else
@ -1816,7 +1823,7 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda
k, (const block_q4_0 *)A, lda, k, (const block_q4_0 *)A, lda,
(const block_q8_0 *)B, ldb, (const block_q8_0 *)B, ldb,
(float *)C, ldc, (float *)C, ldc,
ith, nth}; params->ith, params->nth};
tb.matmul(m, n); tb.matmul(m, n);
return true; return true;
#elif defined(__ARM_FEATURE_DOTPROD) #elif defined(__ARM_FEATURE_DOTPROD)
@ -1824,7 +1831,7 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda
k, (const block_q4_0 *)A, lda, k, (const block_q4_0 *)A, lda,
(const block_q8_0 *)B, ldb, (const block_q8_0 *)B, ldb,
(float *)C, ldc, (float *)C, ldc,
ith, nth}; params->ith, params->nth};
tb.matmul(m, n); tb.matmul(m, n);
return true; return true;
#else #else
@ -1840,7 +1847,7 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda
k, (const block_q5_0 *)A, lda, k, (const block_q5_0 *)A, lda,
(const block_q8_0 *)B, ldb, (const block_q8_0 *)B, ldb,
(float *)C, ldc, (float *)C, ldc,
ith, nth}; params->ith, params->nth};
tb.matmul(m, n); tb.matmul(m, n);
return true; return true;
#else #else
@ -1856,7 +1863,7 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda
k, (const block_iq4_nl *)A, lda, k, (const block_iq4_nl *)A, lda,
(const block_q8_0 *)B, ldb, (const block_q8_0 *)B, ldb,
(float *)C, ldc, (float *)C, ldc,
ith, nth}; params->ith, params->nth};
tb.matmul(m, n); tb.matmul(m, n);
return true; return true;
#else #else
@ -1868,6 +1875,7 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda
return false; return false;
} }
(void)params;
(void)m; (void)m;
(void)n; (void)n;
(void)k; (void)k;
@ -1877,8 +1885,6 @@ bool llamafile_sgemm(int64_t m, int64_t n, int64_t k, const void *A, int64_t lda
(void)ldb; (void)ldb;
(void)C; (void)C;
(void)ldc; (void)ldc;
(void)ith;
(void)nth;
(void)Atype; (void)Atype;
(void)Btype; (void)Btype;
(void)Ctype; (void)Ctype;

View File

@ -5,8 +5,8 @@
extern "C" { extern "C" {
#endif #endif
bool llamafile_sgemm(int64_t, int64_t, int64_t, const void *, int64_t, bool llamafile_sgemm(const struct ggml_compute_params * params, int64_t, int64_t, int64_t,
const void *, int64_t, void *, int64_t, int, int, const void *, int64_t, const void *, int64_t, void *, int64_t,
int, int, int); int, int, int);
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -126,6 +126,8 @@ connection = sqlite3.connect(input_file)
cursor = connection.cursor() cursor = connection.cursor()
builds = cursor.execute("SELECT DISTINCT build_commit FROM test;").fetchall() builds = cursor.execute("SELECT DISTINCT build_commit FROM test;").fetchall()
commit_short_len = len(builds[0][0])
try: try:
repo = git.Repo(".", search_parent_directories=True) repo = git.Repo(".", search_parent_directories=True)
except git.InvalidGitRepositoryError: except git.InvalidGitRepositoryError:
@ -138,11 +140,11 @@ def find_parent_in_data(commit: git.Commit):
seen_hexsha8 = set() seen_hexsha8 = set()
while heap: while heap:
depth, current_commit = heapq.heappop(heap) depth, current_commit = heapq.heappop(heap)
current_hexsha8 = commit.hexsha[:8] current_hexsha8 = commit.hexsha[:commit_short_len]
if (current_hexsha8,) in builds: if (current_hexsha8,) in builds:
return current_hexsha8 return current_hexsha8
for parent in commit.parents: for parent in commit.parents:
parent_hexsha8 = parent.hexsha[:8] parent_hexsha8 = parent.hexsha[:commit_short_len]
if parent_hexsha8 not in seen_hexsha8: if parent_hexsha8 not in seen_hexsha8:
seen_hexsha8.add(parent_hexsha8) seen_hexsha8.add(parent_hexsha8)
heapq.heappush(heap, (depth + 1, parent)) heapq.heappush(heap, (depth + 1, parent))
@ -156,9 +158,9 @@ def get_all_parent_hexsha8s(commit: git.Commit):
while unvisited: while unvisited:
current_commit = unvisited.pop(0) current_commit = unvisited.pop(0)
visited.append(current_commit.hexsha[:8]) visited.append(current_commit.hexsha[:commit_short_len])
for parent in current_commit.parents: for parent in current_commit.parents:
if parent.hexsha[:8] not in visited: if parent.hexsha[:commit_short_len] not in visited:
unvisited.append(parent) unvisited.append(parent)
return visited return visited
@ -169,10 +171,10 @@ def get_commit_name(hexsha8):
if repo is None: if repo is None:
return hexsha8 return hexsha8
for h in repo.heads: for h in repo.heads:
if h.commit.hexsha[:8] == hexsha8: if h.commit.hexsha[:commit_short_len] == hexsha8:
return h.name return h.name
for t in repo.tags: for t in repo.tags:
if t.commit.hexsha[:8] == hexsha8: if t.commit.hexsha[:commit_short_len] == hexsha8:
return t.name return t.name
return hexsha8 return hexsha8
@ -183,13 +185,13 @@ def get_commit_hexsha8(name):
return None return None
for h in repo.heads: for h in repo.heads:
if h.name == name: if h.name == name:
return h.commit.hexsha[:8] return h.commit.hexsha[:commit_short_len]
for t in repo.tags: for t in repo.tags:
if t.name == name: if t.name == name:
return t.commit.hexsha[:8] return t.commit.hexsha[:commit_short_len]
for c in repo.iter_commits("--all"): for c in repo.iter_commits("--all"):
if c.hexsha[:8] == name[:8]: if c.hexsha[:commit_short_len] == name[:commit_short_len]:
return c.hexsha[:8] return c.hexsha[:commit_short_len]
return None return None

View File

@ -1657,7 +1657,7 @@ bool llama_token_is_control_impl(const struct llama_vocab & vocab, llama_token t
} }
llama_token llama_token_bos_impl(const struct llama_vocab & vocab) { llama_token llama_token_bos_impl(const struct llama_vocab & vocab) {
return vocab.special_bos_id; return vocab.type != LLAMA_VOCAB_TYPE_WPM ? vocab.special_bos_id : vocab.special_cls_id;
} }
llama_token llama_token_eos_impl(const struct llama_vocab & vocab) { llama_token llama_token_eos_impl(const struct llama_vocab & vocab) {

View File

@ -45,7 +45,7 @@ struct llama_vocab {
id special_unk_id = 0; id special_unk_id = 0;
id special_sep_id = LLAMA_TOKEN_NULL; id special_sep_id = LLAMA_TOKEN_NULL;
id special_pad_id = LLAMA_TOKEN_NULL; id special_pad_id = LLAMA_TOKEN_NULL;
id special_cls_id = LLAMA_TOKEN_NULL; id special_cls_id = LLAMA_TOKEN_NULL; // TODO: revisit if this is really needed https://github.com/ggerganov/llama.cpp/pull/10930
id special_mask_id = LLAMA_TOKEN_NULL; id special_mask_id = LLAMA_TOKEN_NULL;
id linefeed_id = 13; id linefeed_id = 13;