mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
fixes
This commit is contained in:
parent
1605605b54
commit
808d434901
2
Makefile
2
Makefile
@ -251,7 +251,7 @@ endif
|
||||
#
|
||||
|
||||
# keep standard at C11 and C++11
|
||||
MK_CPPFLAGS = -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon
|
||||
MK_CPPFLAGS = -Iggml/include -Iggml/src -Iinclude -Isrc -Icommon -DGGML_USE_CPU
|
||||
MK_CFLAGS = -std=c11 -fPIC
|
||||
MK_CXXFLAGS = -std=c++11 -fPIC
|
||||
MK_NVCCFLAGS = -std=c++11
|
||||
|
@ -5,5 +5,7 @@ target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||
|
||||
set(TEST_TARGET test-eval-callback)
|
||||
add_test(NAME ${TEST_TARGET} COMMAND llama-eval-callback --hf-repo ggml-org/models --hf-file tinyllamas/stories260K.gguf --model stories260K.gguf --prompt hello --seed 42 -ngl 0)
|
||||
add_test(NAME ${TEST_TARGET}
|
||||
COMMAND llama-eval-callback --hf-repo ggml-org/models --hf-file tinyllamas/stories260K.gguf --model stories260K.gguf --prompt hello --seed 42 -ngl 0
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/../../) # HACK for dl backends
|
||||
set_property(TEST ${TEST_TARGET} PROPERTY LABELS eval-callback curl)
|
||||
|
@ -5,6 +5,17 @@
|
||||
#include <cstring>
|
||||
#include <vector>
|
||||
|
||||
#ifdef _WIN32
|
||||
# define WIN32_LEAN_AND_MEAN
|
||||
# ifndef NOMINMAX
|
||||
# define NOMINMAX
|
||||
# endif
|
||||
# include <windows.h>
|
||||
#else
|
||||
# include <dlfcn.h>
|
||||
#endif
|
||||
|
||||
|
||||
// Backend registry
|
||||
#ifdef GGML_USE_CPU
|
||||
#include "ggml-cpu.h"
|
||||
@ -90,7 +101,8 @@ struct ggml_backend_registry {
|
||||
|
||||
~ggml_backend_registry() {
|
||||
while (!backends.empty()) {
|
||||
ggml_backend_unload(backends.back().reg);
|
||||
// use silent since the log system may have been destroyed at this point
|
||||
unload_backend(backends.back().reg, true);
|
||||
}
|
||||
}
|
||||
|
||||
@ -115,6 +127,43 @@ struct ggml_backend_registry {
|
||||
#endif
|
||||
devices.push_back(device);
|
||||
}
|
||||
|
||||
void unload_backend(ggml_backend_reg_t reg, bool silent) {
|
||||
if (!silent) {
|
||||
GGML_LOG_INFO("%s: unloading %s backend\n", __func__, ggml_backend_reg_name(reg));
|
||||
}
|
||||
auto it = std::find_if(backends.begin(), backends.end(),
|
||||
[reg](ggml_backend_reg_entry entry) { return entry.reg == reg; });
|
||||
|
||||
if (it == backends.end()) {
|
||||
if (!silent) {
|
||||
GGML_LOG_ERROR("%s: backend not found\n", __func__);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (!silent) {
|
||||
GGML_LOG_DEBUG("%s: unloading %s backend\n", __func__, ggml_backend_reg_name(reg));
|
||||
}
|
||||
|
||||
// remove devices
|
||||
devices.erase(
|
||||
std::remove_if(devices.begin(), devices.end(),
|
||||
[reg](ggml_backend_dev_t dev) { return ggml_backend_dev_backend_reg(dev) == reg; }),
|
||||
devices.end());
|
||||
|
||||
// unload library
|
||||
if (it->handle) {
|
||||
#ifdef _WIN32
|
||||
FreeLibrary((HMODULE) it->handle);
|
||||
#else
|
||||
dlclose(it->handle);
|
||||
#endif
|
||||
}
|
||||
|
||||
// remove backend
|
||||
backends.erase(it);
|
||||
}
|
||||
};
|
||||
|
||||
static ggml_backend_registry & get_reg() {
|
||||
@ -209,16 +258,6 @@ ggml_backend_t ggml_backend_init_best(void) {
|
||||
return ggml_backend_dev_init(dev, nullptr);
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
# define WIN32_LEAN_AND_MEAN
|
||||
# ifndef NOMINMAX
|
||||
# define NOMINMAX
|
||||
# endif
|
||||
# include <windows.h>
|
||||
#else
|
||||
# include <dlfcn.h>
|
||||
#endif
|
||||
|
||||
typedef ggml_backend_reg_t (*ggml_backend_init_t)(void);
|
||||
|
||||
ggml_backend_reg_t ggml_backend_load(const char * path) {
|
||||
@ -264,33 +303,7 @@ ggml_backend_reg_t ggml_backend_load(const char * path) {
|
||||
}
|
||||
|
||||
void ggml_backend_unload(ggml_backend_reg_t reg) {
|
||||
auto it = std::find_if(get_reg().backends.begin(), get_reg().backends.end(),
|
||||
[reg](ggml_backend_reg_entry entry) { return entry.reg == reg; });
|
||||
|
||||
if (it == get_reg().backends.end()) {
|
||||
GGML_LOG_ERROR("%s: backend not found\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
GGML_LOG_DEBUG("%s: unloading %s backend\n", __func__, ggml_backend_reg_name(reg));
|
||||
|
||||
// remove devices
|
||||
get_reg().devices.erase(
|
||||
std::remove_if(get_reg().devices.begin(), get_reg().devices.end(),
|
||||
[reg](ggml_backend_dev_t dev) { return ggml_backend_dev_backend_reg(dev) == reg; }),
|
||||
get_reg().devices.end());
|
||||
|
||||
// unload library
|
||||
if (it->handle) {
|
||||
#ifdef _WIN32
|
||||
FreeLibrary((HMODULE) it->handle);
|
||||
#else
|
||||
dlclose(it->handle);
|
||||
#endif
|
||||
}
|
||||
|
||||
// remove backend
|
||||
get_reg().backends.erase(it);
|
||||
get_reg().unload_backend(reg, true);
|
||||
}
|
||||
|
||||
void ggml_backend_load_all() {
|
||||
|
@ -22209,7 +22209,7 @@ int llama_split_prefix(char * dest, size_t maxlen, const char * split_path, int
|
||||
const char * llama_print_system_info(void) {
|
||||
static std::string s;
|
||||
|
||||
for (int i = 0; i < ggml_backend_reg_count(); i++) {
|
||||
for (size_t i = 0; i < ggml_backend_reg_count(); i++) {
|
||||
auto * reg = ggml_backend_reg_get(i);
|
||||
auto * get_features_fn = (ggml_backend_get_features_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_get_features");
|
||||
if (get_features_fn) {
|
||||
|
Loading…
Reference in New Issue
Block a user