llama : introduce anonymous namespace in llama.cpp

This commit introduces an anonymous namespace in llama.cpp to
encapsulate the following structs and types:

* llama_state
* llama_hparams
* llama_cparams
* llama_layer
* llama_ubatch
* llama_kv_cell
* llama_kv_cache
* llama_control_vector
* e_model

There are potentially more structs, and also functions that are currently
declared as static, which could be included in this anonymous namespace
in the future.

The motivation for this change is to avoid polluting the global
namespace with these types.

Refs: https://github.com/ggerganov/llama.cpp/pull/9557
This commit is contained in:
Daniel Bevenius 2024-09-23 08:44:49 +02:00
parent e62e9789cd
commit bfb1058d74

View File

@ -2247,6 +2247,8 @@ static ggml_backend_buffer_type_t llama_default_buffer_type_cpu(bool host_buffer
GGML_UNUSED(host_buffer);
}
namespace {
//
// globals
//
@ -2267,7 +2269,7 @@ struct llama_state {
void * log_callback_user_data = nullptr;
};
static llama_state g_state;
llama_state g_state;
// available llama models
enum e_model {
@ -2333,9 +2335,9 @@ enum e_model {
MODEL_27B,
};
static const size_t kiB = 1024;
static const size_t MiB = 1024*kiB;
static const size_t GiB = 1024*MiB;
const size_t kiB = 1024;
const size_t MiB = 1024*kiB;
const size_t GiB = 1024*MiB;
struct llama_hparams {
bool vocab_only;
@ -2839,6 +2841,8 @@ struct llama_control_vector {
}
};
}
struct llama_model {
e_model type = MODEL_UNKNOWN;
llm_arch arch = LLM_ARCH_UNKNOWN;