log : cleanup, comments, build flags
Some checks failed
flake8 Lint / Lint (push) Has been cancelled

ggml-ci
This commit is contained in:
Georgi Gerganov 2024-09-13 21:55:11 +03:00
parent 13226dc83e
commit 40638f7136
No known key found for this signature in database
GPG Key ID: 449E073F9DC10735
7 changed files with 30 additions and 34 deletions

View File

@ -149,6 +149,14 @@ GGML_NO_METAL := 1
DEPRECATE_WARNING := 1
endif
ifdef LLAMA_DISABLE_LOGS
REMOVE_WARNING := 1
endif
ifdef LLAMA_SERVER_VERBOSE
REMOVE_WARNING := 1
endif
ifndef UNAME_S
UNAME_S := $(shell uname -s)
endif
@ -352,19 +360,11 @@ ifdef LLAMA_SANITIZE_UNDEFINED
MK_LDFLAGS += -fsanitize=undefined -g
endif
ifdef LLAMA_SERVER_VERBOSE
MK_CPPFLAGS += -DSERVER_VERBOSE=$(LLAMA_SERVER_VERBOSE)
endif
ifdef LLAMA_SERVER_SSL
MK_CPPFLAGS += -DCPPHTTPLIB_OPENSSL_SUPPORT
MK_LDFLAGS += -lssl -lcrypto
endif
ifdef LLAMA_DISABLE_LOGS
MK_CPPFLAGS += -DLOG_DISABLE_LOGS
endif # LLAMA_DISABLE_LOGS
# warnings
WARN_FLAGS = \
-Wall \
@ -1029,6 +1029,14 @@ $(info - LLAMA_NO_CCACHE)
$(info )
endif
ifdef REMOVE_WARNING
$(info !!! REMOVAL WARNING !!!)
$(info The following LLAMA_ options have been removed and are no longer supported)
$(info - LLAMA_DISABLE_LOGS (https://github.com/ggerganov/llama.cpp/pull/9418))
$(info - LLAMA_SERVER_VERBOSE (https://github.com/ggerganov/llama.cpp/pull/9418))
$(info )
endif
#
# Build libraries
#

View File

@ -341,7 +341,8 @@ struct gpt_params {
bool batched_bench_output_jsonl = false;
};
// call once at the start of a program using common
// call once at the start of a program if it uses libcommon
// initializes the logging system and prints info about the build
void gpt_init();
std::string gpt_params_get_system_info(const gpt_params & params);

View File

@ -69,8 +69,8 @@ struct gpt_log_entry {
void print(FILE * file = nullptr) const {
FILE * fcur = file;
if (!fcur) {
// stderr displays DBG messages only when the verbosity is high
// these messages can still be logged to a file
// stderr displays DBG messages only when their verbosity level is not higher than the threshold
// these messages will still be logged to a file
if (level == GGML_LOG_LEVEL_DEBUG && gpt_log_verbosity_thold < LOG_DEFAULT_DEBUG) {
return;
}

View File

@ -1,6 +1,6 @@
#pragma once
#include "ggml.h"
#include "ggml.h" // for ggml_log_level
#ifndef __GNUC__
# define LOG_ATTRIBUTE_FORMAT(...)
@ -48,7 +48,10 @@ void gpt_log_add(struct gpt_log * log, enum ggml_log_level level, const char * f
// 0.00.090.578 I llm_load_tensors: offloading 32 repeating layers to GPU
// 0.00.090.579 I llm_load_tensors: offloading non-repeating layers to GPU
//
// I - info, W - warning, E - error, D - debug
// I - info (stdout, V = 0)
// W - warning (stderr, V = 0)
// E - error (stderr, V = 0)
// D - debug (stderr, V = LOG_DEFAULT_DEBUG)
//
void gpt_log_set_file (struct gpt_log * log, const char * file); // not thread-safe
@ -57,13 +60,13 @@ void gpt_log_set_prefix (struct gpt_log * log, bool prefix); // w
void gpt_log_set_timestamps(struct gpt_log * log, bool timestamps); // whether to output timestamps in the prefix
// helper macros for logging
// use these to avoid computing log arguments if the verbosity is lower than the threshold
// use these to avoid computing log arguments if the verbosity of the log is higher than the threshold
//
// for example:
//
// LOG_DBG("this is a debug message: %d\n", expensive_function());
//
// this will avoid calling expensive_function() if the verbosity is lower than LOG_DEFAULT_DEBUG
// this will avoid calling expensive_function() if LOG_DEFAULT_DEBUG > gpt_log_verbosity_thold
//
#define LOG_TMPL(level, verbosity, ...) \

View File

@ -42,7 +42,7 @@
#define LOG_INF(...) do { fprintf(stdout, __VA_ARGS__); } while (0)
#define LOG_WRN(...) do { fprintf(stderr, __VA_ARGS__); } while (0)
#define LOG_ERR(...) do { fprintf(stderr, __VA_ARGS__); } while (0)
#define LOG_DBG(...) do { fprintf(stdout, __VA_ARGS__); } while (0)
#define LOG_DBG(...) do { fprintf(stderr, __VA_ARGS__); } while (0)
//#define CLIP_DEBUG_FUNCTIONS

View File

@ -1,6 +1,6 @@
set(TARGET llama-server)
option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
option(LLAMA_SERVER_SSL "Build SSL support for the server" OFF)
option(LLAMA_SERVER_SSL "Build SSL support for the server" OFF)
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
@ -45,9 +45,6 @@ endforeach()
add_executable(${TARGET} ${TARGET_SRCS})
install(TARGETS ${TARGET} RUNTIME)
target_compile_definitions(${TARGET} PRIVATE
SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
)
target_link_libraries(${TARGET} PRIVATE common ${CMAKE_THREAD_LIBS_INIT})

View File

@ -2077,13 +2077,6 @@ struct server_context {
slot_npast++;
}
//LOG_VERBOSE("prompt processing progress", {
// {"id_slot", slot.id},
// {"n_past", slot.n_past},
// {"n_ctx", n_ctx},
// {"n_tokens", batch.n_tokens},
// {"progress", (float) slot.n_prompt_tokens_processed / slot.n_prompt_tokens},
//});
SLT_INF(slot, "prompt processing progress, n_past = %d, n_tokens = %d, progress = %f\n", slot.n_past, batch.n_tokens, (float) slot.n_prompt_tokens_processed / slot.n_prompt_tokens);
// entire prompt has been processed
@ -2098,12 +2091,6 @@ struct server_context {
slot.n_decoded = 0;
slot.i_batch = batch.n_tokens - 1;
//LOG_VERBOSE("prompt done", {
// {"id_slot", slot.id},
// {"n_past", slot.n_past},
// {"n_ctx", n_ctx},
// {"n_tokens", batch.n_tokens},
//});
SLT_INF(slot, "prompt done, n_past = %d, n_tokens = %d\n", slot.n_past, batch.n_tokens);
}
}