mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
log : cleanup, comments, build flags
Some checks failed
flake8 Lint / Lint (push) Has been cancelled
Some checks failed
flake8 Lint / Lint (push) Has been cancelled
ggml-ci
This commit is contained in:
parent
13226dc83e
commit
40638f7136
24
Makefile
24
Makefile
@ -149,6 +149,14 @@ GGML_NO_METAL := 1
|
|||||||
DEPRECATE_WARNING := 1
|
DEPRECATE_WARNING := 1
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifdef LLAMA_DISABLE_LOGS
|
||||||
|
REMOVE_WARNING := 1
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifdef LLAMA_SERVER_VERBOSE
|
||||||
|
REMOVE_WARNING := 1
|
||||||
|
endif
|
||||||
|
|
||||||
ifndef UNAME_S
|
ifndef UNAME_S
|
||||||
UNAME_S := $(shell uname -s)
|
UNAME_S := $(shell uname -s)
|
||||||
endif
|
endif
|
||||||
@ -352,19 +360,11 @@ ifdef LLAMA_SANITIZE_UNDEFINED
|
|||||||
MK_LDFLAGS += -fsanitize=undefined -g
|
MK_LDFLAGS += -fsanitize=undefined -g
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifdef LLAMA_SERVER_VERBOSE
|
|
||||||
MK_CPPFLAGS += -DSERVER_VERBOSE=$(LLAMA_SERVER_VERBOSE)
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifdef LLAMA_SERVER_SSL
|
ifdef LLAMA_SERVER_SSL
|
||||||
MK_CPPFLAGS += -DCPPHTTPLIB_OPENSSL_SUPPORT
|
MK_CPPFLAGS += -DCPPHTTPLIB_OPENSSL_SUPPORT
|
||||||
MK_LDFLAGS += -lssl -lcrypto
|
MK_LDFLAGS += -lssl -lcrypto
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifdef LLAMA_DISABLE_LOGS
|
|
||||||
MK_CPPFLAGS += -DLOG_DISABLE_LOGS
|
|
||||||
endif # LLAMA_DISABLE_LOGS
|
|
||||||
|
|
||||||
# warnings
|
# warnings
|
||||||
WARN_FLAGS = \
|
WARN_FLAGS = \
|
||||||
-Wall \
|
-Wall \
|
||||||
@ -1029,6 +1029,14 @@ $(info - LLAMA_NO_CCACHE)
|
|||||||
$(info )
|
$(info )
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
ifdef REMOVE_WARNING
|
||||||
|
$(info !!! REMOVAL WARNING !!!)
|
||||||
|
$(info The following LLAMA_ options have been removed and are no longer supported)
|
||||||
|
$(info - LLAMA_DISABLE_LOGS (https://github.com/ggerganov/llama.cpp/pull/9418))
|
||||||
|
$(info - LLAMA_SERVER_VERBOSE (https://github.com/ggerganov/llama.cpp/pull/9418))
|
||||||
|
$(info )
|
||||||
|
endif
|
||||||
|
|
||||||
#
|
#
|
||||||
# Build libraries
|
# Build libraries
|
||||||
#
|
#
|
||||||
|
@ -341,7 +341,8 @@ struct gpt_params {
|
|||||||
bool batched_bench_output_jsonl = false;
|
bool batched_bench_output_jsonl = false;
|
||||||
};
|
};
|
||||||
|
|
||||||
// call once at the start of a program using common
|
// call once at the start of a program if it uses libcommon
|
||||||
|
// initializes the logging system and prints info about the build
|
||||||
void gpt_init();
|
void gpt_init();
|
||||||
|
|
||||||
std::string gpt_params_get_system_info(const gpt_params & params);
|
std::string gpt_params_get_system_info(const gpt_params & params);
|
||||||
|
@ -69,8 +69,8 @@ struct gpt_log_entry {
|
|||||||
void print(FILE * file = nullptr) const {
|
void print(FILE * file = nullptr) const {
|
||||||
FILE * fcur = file;
|
FILE * fcur = file;
|
||||||
if (!fcur) {
|
if (!fcur) {
|
||||||
// stderr displays DBG messages only when the verbosity is high
|
// stderr displays DBG messages only when their verbosity level is not higher than the threshold
|
||||||
// these messages can still be logged to a file
|
// these messages will still be logged to a file
|
||||||
if (level == GGML_LOG_LEVEL_DEBUG && gpt_log_verbosity_thold < LOG_DEFAULT_DEBUG) {
|
if (level == GGML_LOG_LEVEL_DEBUG && gpt_log_verbosity_thold < LOG_DEFAULT_DEBUG) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
11
common/log.h
11
common/log.h
@ -1,6 +1,6 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "ggml.h"
|
#include "ggml.h" // for ggml_log_level
|
||||||
|
|
||||||
#ifndef __GNUC__
|
#ifndef __GNUC__
|
||||||
# define LOG_ATTRIBUTE_FORMAT(...)
|
# define LOG_ATTRIBUTE_FORMAT(...)
|
||||||
@ -48,7 +48,10 @@ void gpt_log_add(struct gpt_log * log, enum ggml_log_level level, const char * f
|
|||||||
// 0.00.090.578 I llm_load_tensors: offloading 32 repeating layers to GPU
|
// 0.00.090.578 I llm_load_tensors: offloading 32 repeating layers to GPU
|
||||||
// 0.00.090.579 I llm_load_tensors: offloading non-repeating layers to GPU
|
// 0.00.090.579 I llm_load_tensors: offloading non-repeating layers to GPU
|
||||||
//
|
//
|
||||||
// I - info, W - warning, E - error, D - debug
|
// I - info (stdout, V = 0)
|
||||||
|
// W - warning (stderr, V = 0)
|
||||||
|
// E - error (stderr, V = 0)
|
||||||
|
// D - debug (stderr, V = LOG_DEFAULT_DEBUG)
|
||||||
//
|
//
|
||||||
|
|
||||||
void gpt_log_set_file (struct gpt_log * log, const char * file); // not thread-safe
|
void gpt_log_set_file (struct gpt_log * log, const char * file); // not thread-safe
|
||||||
@ -57,13 +60,13 @@ void gpt_log_set_prefix (struct gpt_log * log, bool prefix); // w
|
|||||||
void gpt_log_set_timestamps(struct gpt_log * log, bool timestamps); // whether to output timestamps in the prefix
|
void gpt_log_set_timestamps(struct gpt_log * log, bool timestamps); // whether to output timestamps in the prefix
|
||||||
|
|
||||||
// helper macros for logging
|
// helper macros for logging
|
||||||
// use these to avoid computing log arguments if the verbosity is lower than the threshold
|
// use these to avoid computing log arguments if the verbosity of the log is higher than the threshold
|
||||||
//
|
//
|
||||||
// for example:
|
// for example:
|
||||||
//
|
//
|
||||||
// LOG_DBG("this is a debug message: %d\n", expensive_function());
|
// LOG_DBG("this is a debug message: %d\n", expensive_function());
|
||||||
//
|
//
|
||||||
// this will avoid calling expensive_function() if the verbosity is lower than LOG_DEFAULT_DEBUG
|
// this will avoid calling expensive_function() if LOG_DEFAULT_DEBUG > gpt_log_verbosity_thold
|
||||||
//
|
//
|
||||||
|
|
||||||
#define LOG_TMPL(level, verbosity, ...) \
|
#define LOG_TMPL(level, verbosity, ...) \
|
||||||
|
@ -42,7 +42,7 @@
|
|||||||
#define LOG_INF(...) do { fprintf(stdout, __VA_ARGS__); } while (0)
|
#define LOG_INF(...) do { fprintf(stdout, __VA_ARGS__); } while (0)
|
||||||
#define LOG_WRN(...) do { fprintf(stderr, __VA_ARGS__); } while (0)
|
#define LOG_WRN(...) do { fprintf(stderr, __VA_ARGS__); } while (0)
|
||||||
#define LOG_ERR(...) do { fprintf(stderr, __VA_ARGS__); } while (0)
|
#define LOG_ERR(...) do { fprintf(stderr, __VA_ARGS__); } while (0)
|
||||||
#define LOG_DBG(...) do { fprintf(stdout, __VA_ARGS__); } while (0)
|
#define LOG_DBG(...) do { fprintf(stderr, __VA_ARGS__); } while (0)
|
||||||
|
|
||||||
//#define CLIP_DEBUG_FUNCTIONS
|
//#define CLIP_DEBUG_FUNCTIONS
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
set(TARGET llama-server)
|
set(TARGET llama-server)
|
||||||
option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
|
|
||||||
option(LLAMA_SERVER_SSL "Build SSL support for the server" OFF)
|
option(LLAMA_SERVER_SSL "Build SSL support for the server" OFF)
|
||||||
|
|
||||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
||||||
@ -45,9 +45,6 @@ endforeach()
|
|||||||
|
|
||||||
add_executable(${TARGET} ${TARGET_SRCS})
|
add_executable(${TARGET} ${TARGET_SRCS})
|
||||||
install(TARGETS ${TARGET} RUNTIME)
|
install(TARGETS ${TARGET} RUNTIME)
|
||||||
target_compile_definitions(${TARGET} PRIVATE
|
|
||||||
SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
|
|
||||||
)
|
|
||||||
|
|
||||||
target_link_libraries(${TARGET} PRIVATE common ${CMAKE_THREAD_LIBS_INIT})
|
target_link_libraries(${TARGET} PRIVATE common ${CMAKE_THREAD_LIBS_INIT})
|
||||||
|
|
||||||
|
@ -2077,13 +2077,6 @@ struct server_context {
|
|||||||
slot_npast++;
|
slot_npast++;
|
||||||
}
|
}
|
||||||
|
|
||||||
//LOG_VERBOSE("prompt processing progress", {
|
|
||||||
// {"id_slot", slot.id},
|
|
||||||
// {"n_past", slot.n_past},
|
|
||||||
// {"n_ctx", n_ctx},
|
|
||||||
// {"n_tokens", batch.n_tokens},
|
|
||||||
// {"progress", (float) slot.n_prompt_tokens_processed / slot.n_prompt_tokens},
|
|
||||||
//});
|
|
||||||
SLT_INF(slot, "prompt processing progress, n_past = %d, n_tokens = %d, progress = %f\n", slot.n_past, batch.n_tokens, (float) slot.n_prompt_tokens_processed / slot.n_prompt_tokens);
|
SLT_INF(slot, "prompt processing progress, n_past = %d, n_tokens = %d, progress = %f\n", slot.n_past, batch.n_tokens, (float) slot.n_prompt_tokens_processed / slot.n_prompt_tokens);
|
||||||
|
|
||||||
// entire prompt has been processed
|
// entire prompt has been processed
|
||||||
@ -2098,12 +2091,6 @@ struct server_context {
|
|||||||
slot.n_decoded = 0;
|
slot.n_decoded = 0;
|
||||||
slot.i_batch = batch.n_tokens - 1;
|
slot.i_batch = batch.n_tokens - 1;
|
||||||
|
|
||||||
//LOG_VERBOSE("prompt done", {
|
|
||||||
// {"id_slot", slot.id},
|
|
||||||
// {"n_past", slot.n_past},
|
|
||||||
// {"n_ctx", n_ctx},
|
|
||||||
// {"n_tokens", batch.n_tokens},
|
|
||||||
//});
|
|
||||||
SLT_INF(slot, "prompt done, n_past = %d, n_tokens = %d\n", slot.n_past, batch.n_tokens);
|
SLT_INF(slot, "prompt done, n_past = %d, n_tokens = %d\n", slot.n_past, batch.n_tokens);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user