mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 20:04:35 +00:00
45a55b91aa
* llama : better replace_all (cont) ggml-ci * code : deduplicate replace_all ggml-ci
42 lines
1.2 KiB
C++
42 lines
1.2 KiB
C++
#pragma once
|
|
|
|
#define LLAMA_API_INTERNAL
|
|
#include "llama.h"
|
|
|
|
#ifdef __GNUC__
|
|
#ifdef __MINGW32__
|
|
#define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(gnu_printf, __VA_ARGS__)))
|
|
#else
|
|
#define LLAMA_ATTRIBUTE_FORMAT(...) __attribute__((format(printf, __VA_ARGS__)))
|
|
#endif
|
|
#else
|
|
#define LLAMA_ATTRIBUTE_FORMAT(...)
|
|
#endif
|
|
|
|
//
|
|
// logging
|
|
//
|
|
|
|
LLAMA_ATTRIBUTE_FORMAT(2, 3)
|
|
void llama_log_internal (ggml_log_level level, const char * format, ...);
|
|
void llama_log_callback_default(ggml_log_level level, const char * text, void * user_data);
|
|
|
|
#define LLAMA_LOG_INFO(...) llama_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__)
|
|
#define LLAMA_LOG_WARN(...) llama_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__)
|
|
#define LLAMA_LOG_ERROR(...) llama_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
|
|
|
|
//
|
|
// helpers
|
|
//
|
|
|
|
static void replace_all(std::string & s, const std::string & search, const std::string & replace) {
|
|
if (search.empty()) {
|
|
return; // Avoid infinite loop if 'search' is an empty string
|
|
}
|
|
size_t pos = 0;
|
|
while ((pos = s.find(search, pos)) != std::string::npos) {
|
|
s.replace(pos, search.length(), replace);
|
|
pos += replace.length();
|
|
}
|
|
}
|