mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-11 13:30:35 +00:00
ggml : add numa options (#5377)
* Added numa options to allow finer grained control as well as plumbing for a new mirror mode that will require numa.h * Reverted Makefile * Fixed include * Removed sched.h from ggml.h, moved ggml_get_numa_affinity into ggml.c, removed trailing whitespace and fixed up a few inconsistent variables * removed trailing whitespace * Added numa options to allow finer grained control as well as plumbing for a new mirror mode that will require numa.h * Reverting Makefile * Fixed a number of issues with the move from BOOL to ggml_numa_strategies. Added a note about mirror mode note being implemented yet * Removing MIRROR_MODE code for this PR * Removing last bit of MIRROR_MODE code for this PR * Removing unneeded branch in server.cpp example and moving get_numa_affinity and making it static * Fixed lingering init_llama_backend() bool calls in tests and examples * Remote enum llama_numa_strategies * Revert bad merge with dynatemp flags * add missing enum ggml_numa_strategies declaration and revert sync problem with master * add missing enum ggml_numa_strategies declaration * fixed ggml_init_numa variable * Update ggml.h Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> * Update READMEs with info about numa flags, change INTERLEAVE strategy name to DISTRIBUTE everywhere, implement the improved distribution strategy from @rankaiyx, fix a spelling mistake and un-merge some bad merges * split numa init out from llama_backend_init and created llama_numa_init. Updated all code paths and samples * Fix up some boolean vs enum comparisons * Added #ifdefs for non-Linux OS that don't have cpu_set_t datatype * Update ggml.h Align enum values Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Update ggml.c Remove whitespace Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Update ggml.c align paremeters Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Update examples/server/server.cpp remove whitespace and align brace Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Update common/common.cpp Remove whitespace and align brace Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * unified ggml_numa_strategy enum and fixed text alignment in server.cpp example * Update ggml.c simplified return for platforms without NUMA support Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> * removed redundant else from cli argument processing of --numa * whitespace --------- Co-authored-by: root <root@nenya.lothlorien.ca> Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> Co-authored-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
parent
60ed04cf82
commit
f486f6e1e5
@ -671,7 +671,15 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
|
|||||||
} else if (arg == "--no-mmap") {
|
} else if (arg == "--no-mmap") {
|
||||||
params.use_mmap = false;
|
params.use_mmap = false;
|
||||||
} else if (arg == "--numa") {
|
} else if (arg == "--numa") {
|
||||||
params.numa = true;
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
std::string value(argv[i]);
|
||||||
|
/**/ if (value == "distribute" || value == "") { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; }
|
||||||
|
else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; }
|
||||||
|
else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; }
|
||||||
|
else { invalid_param = true; break; }
|
||||||
} else if (arg == "--verbose-prompt") {
|
} else if (arg == "--verbose-prompt") {
|
||||||
params.verbose_prompt = true;
|
params.verbose_prompt = true;
|
||||||
} else if (arg == "--no-display-prompt") {
|
} else if (arg == "--no-display-prompt") {
|
||||||
@ -935,7 +943,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
|||||||
printf(" -tb N, --threads-batch N\n");
|
printf(" -tb N, --threads-batch N\n");
|
||||||
printf(" number of threads to use during batch and prompt processing (default: same as --threads)\n");
|
printf(" number of threads to use during batch and prompt processing (default: same as --threads)\n");
|
||||||
printf(" -td N, --threads-draft N");
|
printf(" -td N, --threads-draft N");
|
||||||
printf(" number of threads to use during generation (default: same as --threads)");
|
printf(" number of threads to use during generation (default: same as --threads)\n");
|
||||||
printf(" -tbd N, --threads-batch-draft N\n");
|
printf(" -tbd N, --threads-batch-draft N\n");
|
||||||
printf(" number of threads to use during batch and prompt processing (default: same as --threads-draft)\n");
|
printf(" number of threads to use during batch and prompt processing (default: same as --threads-draft)\n");
|
||||||
printf(" -p PROMPT, --prompt PROMPT\n");
|
printf(" -p PROMPT, --prompt PROMPT\n");
|
||||||
@ -1005,7 +1013,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
|||||||
printf(" --winogrande-tasks N number of tasks to use when computing the Winogrande score (default: %zu)\n", params.winogrande_tasks);
|
printf(" --winogrande-tasks N number of tasks to use when computing the Winogrande score (default: %zu)\n", params.winogrande_tasks);
|
||||||
printf(" --multiple-choice compute multiple choice score over random tasks from datafile supplied with -f\n");
|
printf(" --multiple-choice compute multiple choice score over random tasks from datafile supplied with -f\n");
|
||||||
printf(" --multiple-choice-tasks N number of tasks to use when computing the multiple choice score (default: %zu)\n", params.winogrande_tasks);
|
printf(" --multiple-choice-tasks N number of tasks to use when computing the multiple choice score (default: %zu)\n", params.winogrande_tasks);
|
||||||
printf(" --kl-divergence computes KL-divergence to logits provided via --kl-divergence-base");
|
printf(" --kl-divergence computes KL-divergence to logits provided via --kl-divergence-base\n");
|
||||||
printf(" --keep N number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep);
|
printf(" --keep N number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep);
|
||||||
printf(" --draft N number of tokens to draft for speculative decoding (default: %d)\n", params.n_draft);
|
printf(" --draft N number of tokens to draft for speculative decoding (default: %d)\n", params.n_draft);
|
||||||
printf(" --chunks N max number of chunks to process (default: %d, -1 = all)\n", params.n_chunks);
|
printf(" --chunks N max number of chunks to process (default: %d, -1 = all)\n", params.n_chunks);
|
||||||
@ -1022,7 +1030,10 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
|||||||
if (llama_supports_mmap()) {
|
if (llama_supports_mmap()) {
|
||||||
printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
|
printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
|
||||||
}
|
}
|
||||||
printf(" --numa attempt optimizations that help on some NUMA systems\n");
|
printf(" --numa TYPE attempt optimizations that help on some NUMA systems\n");
|
||||||
|
printf(" - distribute: spread execution evenly over all nodes\n");
|
||||||
|
printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n");
|
||||||
|
printf(" - numactl: use the CPU map provided by numactl\n");
|
||||||
printf(" if run without this previously, it is recommended to drop the system page cache before using this\n");
|
printf(" if run without this previously, it is recommended to drop the system page cache before using this\n");
|
||||||
printf(" see https://github.com/ggerganov/llama.cpp/issues/1437\n");
|
printf(" see https://github.com/ggerganov/llama.cpp/issues/1437\n");
|
||||||
if (llama_supports_gpu_offload()) {
|
if (llama_supports_gpu_offload()) {
|
||||||
@ -1689,7 +1700,6 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l
|
|||||||
fprintf(stream, "no_mmap: %s # default: false\n", !params.use_mmap ? "true" : "false");
|
fprintf(stream, "no_mmap: %s # default: false\n", !params.use_mmap ? "true" : "false");
|
||||||
fprintf(stream, "no_mul_mat_q: %s # default: false\n", !params.mul_mat_q ? "true" : "false");
|
fprintf(stream, "no_mul_mat_q: %s # default: false\n", !params.mul_mat_q ? "true" : "false");
|
||||||
fprintf(stream, "no_penalize_nl: %s # default: false\n", !sparams.penalize_nl ? "true" : "false");
|
fprintf(stream, "no_penalize_nl: %s # default: false\n", !sparams.penalize_nl ? "true" : "false");
|
||||||
fprintf(stream, "numa: %s # default: false\n", params.numa ? "true" : "false");
|
|
||||||
fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type);
|
fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type);
|
||||||
fprintf(stream, "ppl_stride: %d # default: 0\n", params.ppl_stride);
|
fprintf(stream, "ppl_stride: %d # default: 0\n", params.ppl_stride);
|
||||||
fprintf(stream, "presence_penalty: %f # default: 0.0\n", sparams.penalty_present);
|
fprintf(stream, "presence_penalty: %f # default: 0.0\n", sparams.penalty_present);
|
||||||
|
@ -76,6 +76,7 @@ struct gpt_params {
|
|||||||
float yarn_beta_slow = 1.0f; // YaRN high correction dim
|
float yarn_beta_slow = 1.0f; // YaRN high correction dim
|
||||||
int32_t yarn_orig_ctx = 0; // YaRN original context length
|
int32_t yarn_orig_ctx = 0; // YaRN original context length
|
||||||
int32_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED;
|
int32_t rope_scaling_type = LLAMA_ROPE_SCALING_UNSPECIFIED;
|
||||||
|
ggml_numa_strategy numa = GGML_NUMA_STRATEGY_DISABLED;
|
||||||
|
|
||||||
// // sampling parameters
|
// // sampling parameters
|
||||||
struct llama_sampling_params sparams;
|
struct llama_sampling_params sparams;
|
||||||
@ -134,7 +135,6 @@ struct gpt_params {
|
|||||||
bool logits_all = false; // return logits for all tokens in the batch
|
bool logits_all = false; // return logits for all tokens in the batch
|
||||||
bool use_mmap = true; // use mmap for faster loads
|
bool use_mmap = true; // use mmap for faster loads
|
||||||
bool use_mlock = false; // use mlock to keep model in memory
|
bool use_mlock = false; // use mlock to keep model in memory
|
||||||
bool numa = false; // attempt optimizations that help on some NUMA systems
|
|
||||||
bool verbose_prompt = false; // print prompt tokens before generation
|
bool verbose_prompt = false; // print prompt tokens before generation
|
||||||
bool display_prompt = true; // print prompt before generation
|
bool display_prompt = true; // print prompt before generation
|
||||||
bool infill = false; // use infill mode
|
bool infill = false; // use infill mode
|
||||||
|
@ -82,7 +82,8 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
// init LLM
|
// init LLM
|
||||||
|
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
// initialize the model
|
// initialize the model
|
||||||
|
|
||||||
|
@ -17,7 +17,7 @@ let n_parallel: Int = arguments.count > 3 && Int(arguments[3]) != nil ? Int(argu
|
|||||||
let n_len: Int = 32
|
let n_len: Int = 32
|
||||||
|
|
||||||
// init LLM
|
// init LLM
|
||||||
llama_backend_init(false)
|
llama_backend_init()
|
||||||
defer {
|
defer {
|
||||||
llama_backend_free()
|
llama_backend_free()
|
||||||
}
|
}
|
||||||
|
@ -50,7 +50,8 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
// init LLM
|
// init LLM
|
||||||
|
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
// initialize the model
|
// initialize the model
|
||||||
|
|
||||||
|
@ -119,7 +119,8 @@ int main(int argc, char ** argv)
|
|||||||
// Init LLM :
|
// Init LLM :
|
||||||
//---------------------------------
|
//---------------------------------
|
||||||
|
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
llama_model * model;
|
llama_model * model;
|
||||||
llama_context * ctx;
|
llama_context * ctx;
|
||||||
|
@ -74,7 +74,8 @@ int main(int argc, char ** argv) {
|
|||||||
params.prompt = gpt_random_prompt(rng);
|
params.prompt = gpt_random_prompt(rng);
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
llama_model * model;
|
llama_model * model;
|
||||||
llama_context * ctx;
|
llama_context * ctx;
|
||||||
|
@ -568,7 +568,8 @@ int main(int argc, char ** argv) {
|
|||||||
params.prompt = gpt_random_prompt(rng);
|
params.prompt = gpt_random_prompt(rng);
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
llama_model_params mparams = llama_model_params_from_gpt_params(params);
|
llama_model_params mparams = llama_model_params_from_gpt_params(params);
|
||||||
|
|
||||||
|
@ -202,7 +202,8 @@ int main(int argc, char ** argv) {
|
|||||||
std::mt19937 rng(params.seed);
|
std::mt19937 rng(params.seed);
|
||||||
|
|
||||||
LOG("%s: llama backend init\n", __func__);
|
LOG("%s: llama backend init\n", __func__);
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
llama_model * model;
|
llama_model * model;
|
||||||
llama_context * ctx;
|
llama_context * ctx;
|
||||||
|
@ -1151,8 +1151,7 @@ int main(int argc, char ** argv) {
|
|||||||
if (!params.verbose) {
|
if (!params.verbose) {
|
||||||
llama_log_set(llama_null_log_callback, NULL);
|
llama_log_set(llama_null_log_callback, NULL);
|
||||||
}
|
}
|
||||||
bool numa = false;
|
llama_backend_init();
|
||||||
llama_backend_init(numa);
|
|
||||||
|
|
||||||
// initialize printer
|
// initialize printer
|
||||||
std::unique_ptr<printer> p;
|
std::unique_ptr<printer> p;
|
||||||
|
@ -274,8 +274,8 @@ Java_com_example_llama_Llm_new_1batch(JNIEnv *, jobject, jint n_tokens, jint emb
|
|||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
JNIEXPORT void JNICALL
|
JNIEXPORT void JNICALL
|
||||||
Java_com_example_llama_Llm_backend_1init(JNIEnv *, jobject, jboolean numa) {
|
Java_com_example_llama_Llm_backend_1init(JNIEnv *, jobject) {
|
||||||
llama_backend_init(numa);
|
llama_backend_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C"
|
extern "C"
|
||||||
|
@ -51,7 +51,7 @@ actor LlamaContext {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static func create_context(path: String) throws -> LlamaContext {
|
static func create_context(path: String) throws -> LlamaContext {
|
||||||
llama_backend_init(false)
|
llama_backend_init()
|
||||||
var model_params = llama_model_default_params()
|
var model_params = llama_model_default_params()
|
||||||
|
|
||||||
#if targetEnvironment(simulator)
|
#if targetEnvironment(simulator)
|
||||||
|
@ -218,7 +218,8 @@ static struct llava_context * llava_init(gpt_params * params) {
|
|||||||
|
|
||||||
auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1);
|
auto ctx_clip = clip_model_load(clip_path, /*verbosity=*/ 1);
|
||||||
|
|
||||||
llama_backend_init(params->numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params->numa);
|
||||||
|
|
||||||
llama_model_params model_params = llama_model_params_from_gpt_params(*params);
|
llama_model_params model_params = llama_model_params_from_gpt_params(*params);
|
||||||
|
|
||||||
|
@ -54,7 +54,8 @@ int main(int argc, char ** argv) {
|
|||||||
#endif // LOG_DISABLE_LOGS
|
#endif // LOG_DISABLE_LOGS
|
||||||
|
|
||||||
// init llama.cpp
|
// init llama.cpp
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
llama_model * model = NULL;
|
llama_model * model = NULL;
|
||||||
llama_context * ctx = NULL;
|
llama_context * ctx = NULL;
|
||||||
|
@ -31,7 +31,8 @@ int main(int argc, char ** argv){
|
|||||||
#endif // LOG_DISABLE_LOGS
|
#endif // LOG_DISABLE_LOGS
|
||||||
|
|
||||||
// init llama.cpp
|
// init llama.cpp
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
llama_model * model = NULL;
|
llama_model * model = NULL;
|
||||||
llama_context * ctx = NULL;
|
llama_context * ctx = NULL;
|
||||||
|
@ -283,7 +283,11 @@ These options help improve the performance and memory usage of the LLaMA models.
|
|||||||
|
|
||||||
### NUMA support
|
### NUMA support
|
||||||
|
|
||||||
- `--numa`: Attempt optimizations that help on some systems with non-uniform memory access. This currently consists of pinning an equal proportion of the threads to the cores on each NUMA node, and disabling prefetch and readahead for mmap. The latter causes mapped pages to be faulted in on first access instead of all at once, and in combination with pinning threads to NUMA nodes, more of the pages end up on the NUMA node where they are used. Note that if the model is already in the system page cache, for example because of a previous run without this option, this will have little effect unless you drop the page cache first. This can be done by rebooting the system or on Linux by writing '3' to '/proc/sys/vm/drop_caches' as root.
|
- `--numa distribute`: Pin an equal proportion of the threads to the cores on each NUMA node. This will spread the load amongst all cores on the system, utilitizing all memory channels at the expense of potentially requiring memory to travel over the slow links between nodes.
|
||||||
|
- `--numa isolate`: Pin all threads to the NUMA node that the program starts on. This limits the number of cores and amount of memory that can be used, but guarantees all memory access remains local to the NUMA node.
|
||||||
|
- `--numa numactl`: Pin threads to the CPUMAP that is passed to the program by starting it with the numactl utility. This is the most flexible mode, and allow arbitraty core usage patterns, for example a map that uses all the cores on one NUMA nodes, and just enough cores on a second node to saturate the inter-node memory bus.
|
||||||
|
|
||||||
|
These flags attempt optimizations that help on some systems with non-uniform memory access. This currently consists of one of the above strategies, and disabling prefetch and readahead for mmap. The latter causes mapped pages to be faulted in on first access instead of all at once, and in combination with pinning threads to NUMA nodes, more of the pages end up on the NUMA node where they are used. Note that if the model is already in the system page cache, for example because of a previous run without this option, this will have little effect unless you drop the page cache first. This can be done by rebooting the system or on Linux by writing '3' to '/proc/sys/vm/drop_caches' as root.
|
||||||
|
|
||||||
### Memory Float 32
|
### Memory Float 32
|
||||||
|
|
||||||
|
@ -185,7 +185,8 @@ int main(int argc, char ** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
LOG("%s: llama backend init\n", __func__);
|
LOG("%s: llama backend init\n", __func__);
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
llama_model * model;
|
llama_model * model;
|
||||||
llama_context * ctx;
|
llama_context * ctx;
|
||||||
|
@ -122,7 +122,8 @@ int main(int argc, char ** argv) {
|
|||||||
#endif // LOG_DISABLE_LOGS
|
#endif // LOG_DISABLE_LOGS
|
||||||
|
|
||||||
// init llama.cpp
|
// init llama.cpp
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
llama_model * model = NULL;
|
llama_model * model = NULL;
|
||||||
llama_context * ctx = NULL;
|
llama_context * ctx = NULL;
|
||||||
|
@ -71,7 +71,8 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
// init LLM
|
// init LLM
|
||||||
|
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
// initialize the model
|
// initialize the model
|
||||||
|
|
||||||
|
@ -1809,7 +1809,8 @@ int main(int argc, char ** argv) {
|
|||||||
params.prompt = gpt_random_prompt(rng);
|
params.prompt = gpt_random_prompt(rng);
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
llama_model * model;
|
llama_model * model;
|
||||||
llama_context * ctx;
|
llama_context * ctx;
|
||||||
|
@ -237,7 +237,7 @@ int main(int argc, char ** argv) {
|
|||||||
params.imatrix = &imatrix_data;
|
params.imatrix = &imatrix_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_backend_init(false);
|
llama_backend_init();
|
||||||
|
|
||||||
// parse command line arguments
|
// parse command line arguments
|
||||||
const std::string fname_inp = argv[arg_idx];
|
const std::string fname_inp = argv[arg_idx];
|
||||||
|
@ -16,6 +16,13 @@ Command line options:
|
|||||||
- `--memory-f32`: Use 32-bit floats instead of 16-bit floats for memory key+value. Not recommended.
|
- `--memory-f32`: Use 32-bit floats instead of 16-bit floats for memory key+value. Not recommended.
|
||||||
- `--mlock`: Lock the model in memory, preventing it from being swapped out when memory-mapped.
|
- `--mlock`: Lock the model in memory, preventing it from being swapped out when memory-mapped.
|
||||||
- `--no-mmap`: Do not memory-map the model. By default, models are mapped into memory, which allows the system to load only the necessary parts of the model as needed.
|
- `--no-mmap`: Do not memory-map the model. By default, models are mapped into memory, which allows the system to load only the necessary parts of the model as needed.
|
||||||
|
- `--numa STRATEGY`: Attempt one of the below optimization strategies that help on some NUMA systems
|
||||||
|
- `--numa distribute`: Spread execution evenly over all nodes
|
||||||
|
- `--numa isolate`: Only spawn threads on CPUs on the node that execution started on
|
||||||
|
- `--numa numactl`: Use the CPU map provided by numactl
|
||||||
|
if run without this previously, it is recommended to drop the system page cache before using this
|
||||||
|
see https://github.com/ggerganov/llama.cpp/issues/1437
|
||||||
|
|
||||||
- `--numa`: Attempt optimizations that help on some NUMA systems.
|
- `--numa`: Attempt optimizations that help on some NUMA systems.
|
||||||
- `--lora FNAME`: Apply a LoRA (Low-Rank Adaptation) adapter to the model (implies --no-mmap). This allows you to adapt the pretrained model to specific tasks or domains.
|
- `--lora FNAME`: Apply a LoRA (Low-Rank Adaptation) adapter to the model (implies --no-mmap). This allows you to adapt the pretrained model to specific tasks or domains.
|
||||||
- `--lora-base FNAME`: Optional model to use as a base for the layers modified by the LoRA adapter. This flag is used in conjunction with the `--lora` flag, and specifies the base model for the adaptation.
|
- `--lora-base FNAME`: Optional model to use as a base for the layers modified by the LoRA adapter. This flag is used in conjunction with the `--lora` flag, and specifies the base model for the adaptation.
|
||||||
|
@ -1855,7 +1855,10 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms,
|
|||||||
{
|
{
|
||||||
printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
|
printf(" --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
|
||||||
}
|
}
|
||||||
printf(" --numa attempt optimizations that help on some NUMA systems\n");
|
printf(" --numa TYPE attempt optimizations that help on some NUMA systems\n");
|
||||||
|
printf(" - distribute: spread execution evenly over all nodes\n");
|
||||||
|
printf(" - isolate: only spawn threads on CPUs on the node that execution started on\n");
|
||||||
|
printf(" - numactl: use the CPU map provided my numactl\n");
|
||||||
if (llama_supports_gpu_offload()) {
|
if (llama_supports_gpu_offload()) {
|
||||||
printf(" -ngl N, --n-gpu-layers N\n");
|
printf(" -ngl N, --n-gpu-layers N\n");
|
||||||
printf(" number of layers to store in VRAM\n");
|
printf(" number of layers to store in VRAM\n");
|
||||||
@ -2264,9 +2267,17 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
|
|||||||
{
|
{
|
||||||
params.use_mmap = false;
|
params.use_mmap = false;
|
||||||
}
|
}
|
||||||
else if (arg == "--numa")
|
else if (arg == "--numa") {
|
||||||
{
|
if (++i >= argc) {
|
||||||
params.numa = true;
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
std::string value(argv[i]);
|
||||||
|
/**/ if (value == "distribute" || value == "" ) { params.numa = GGML_NUMA_STRATEGY_DISTRIBUTE; }
|
||||||
|
else if (value == "isolate") { params.numa = GGML_NUMA_STRATEGY_ISOLATE; }
|
||||||
|
else if (value == "numactl") { params.numa = GGML_NUMA_STRATEGY_NUMACTL; }
|
||||||
|
else { invalid_param = true; break; }
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else if (arg == "--embedding")
|
else if (arg == "--embedding")
|
||||||
{
|
{
|
||||||
@ -2497,7 +2508,8 @@ int main(int argc, char **argv)
|
|||||||
params.model_alias = params.model;
|
params.model_alias = params.model;
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
LOG_INFO("build info", {{"build", LLAMA_BUILD_NUMBER},
|
LOG_INFO("build info", {{"build", LLAMA_BUILD_NUMBER},
|
||||||
{"commit", LLAMA_COMMIT}});
|
{"commit", LLAMA_COMMIT}});
|
||||||
|
@ -31,7 +31,8 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
// init LLM
|
// init LLM
|
||||||
|
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
// initialize the model
|
// initialize the model
|
||||||
|
|
||||||
|
@ -50,7 +50,8 @@ int main(int argc, char ** argv) {
|
|||||||
#endif // LOG_DISABLE_LOGS
|
#endif // LOG_DISABLE_LOGS
|
||||||
|
|
||||||
// init llama.cpp
|
// init llama.cpp
|
||||||
llama_backend_init(params.numa);
|
llama_backend_init();
|
||||||
|
llama_numa_init(params.numa);
|
||||||
|
|
||||||
llama_model * model_tgt = NULL;
|
llama_model * model_tgt = NULL;
|
||||||
llama_model * model_dft = NULL;
|
llama_model * model_dft = NULL;
|
||||||
|
@ -17,7 +17,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
const bool printing_ids = argc > 3 && std::string(argv[3]) == "--ids";
|
const bool printing_ids = argc > 3 && std::string(argv[3]) == "--ids";
|
||||||
|
|
||||||
llama_backend_init(false);
|
llama_backend_init();
|
||||||
|
|
||||||
llama_model_params model_params = llama_model_default_params();
|
llama_model_params model_params = llama_model_default_params();
|
||||||
model_params.vocab_only = true;
|
model_params.vocab_only = true;
|
||||||
|
80
ggml.c
80
ggml.c
@ -1954,9 +1954,16 @@ struct ggml_numa_node {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct ggml_numa_nodes {
|
struct ggml_numa_nodes {
|
||||||
|
enum ggml_numa_strategy numa_strategy;
|
||||||
struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES];
|
struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES];
|
||||||
uint32_t n_nodes;
|
uint32_t n_nodes;
|
||||||
uint32_t total_cpus; // hardware threads on system
|
uint32_t total_cpus; // hardware threads on system
|
||||||
|
uint32_t current_node; // node on which main process is execting
|
||||||
|
#ifdef __linux__
|
||||||
|
cpu_set_t cpuset; // cpuset from numactl
|
||||||
|
#else
|
||||||
|
uint32_t cpuset; // no NUMA support outside of Linux at this time. Use a portable datatype
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
//
|
//
|
||||||
@ -1990,7 +1997,22 @@ inline static void ggml_critical_section_end(void) {
|
|||||||
atomic_fetch_sub(&g_state_barrier, 1);
|
atomic_fetch_sub(&g_state_barrier, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_numa_init(void) {
|
#ifdef __linux__
|
||||||
|
static cpu_set_t ggml_get_numa_affinity(void) {
|
||||||
|
cpu_set_t cpuset;
|
||||||
|
pthread_t thread;
|
||||||
|
thread = pthread_self();
|
||||||
|
CPU_ZERO(&cpuset);
|
||||||
|
pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
|
||||||
|
return cpuset;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static uint32_t ggml_get_numa_affinity(void) {
|
||||||
|
return 0; // no NUMA support
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void ggml_numa_init(enum ggml_numa_strategy numa_flag) {
|
||||||
if (g_state.numa.n_nodes > 0) {
|
if (g_state.numa.n_nodes > 0) {
|
||||||
fprintf(stderr, "ggml_numa_init: NUMA already initialized\n");
|
fprintf(stderr, "ggml_numa_init: NUMA already initialized\n");
|
||||||
|
|
||||||
@ -2002,6 +2024,13 @@ void ggml_numa_init(void) {
|
|||||||
char path[256];
|
char path[256];
|
||||||
int rv;
|
int rv;
|
||||||
|
|
||||||
|
// set numa scheme
|
||||||
|
g_state.numa.numa_strategy = numa_flag;
|
||||||
|
|
||||||
|
GGML_PRINT_DEBUG("numa strategy %u\n",g_state.numa.numa_strategy);
|
||||||
|
|
||||||
|
g_state.numa.cpuset = ggml_get_numa_affinity();
|
||||||
|
|
||||||
// enumerate nodes
|
// enumerate nodes
|
||||||
while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) {
|
while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) {
|
||||||
rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes);
|
rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes);
|
||||||
@ -2020,11 +2049,17 @@ void ggml_numa_init(void) {
|
|||||||
|
|
||||||
GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus);
|
GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus);
|
||||||
|
|
||||||
if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1) {
|
// figure out which node we're on
|
||||||
|
uint current_cpu;
|
||||||
|
int getcpu_ret = getcpu(¤t_cpu, &g_state.numa.current_node);
|
||||||
|
|
||||||
|
if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1 || getcpu_ret != 0) {
|
||||||
g_state.numa.n_nodes = 0;
|
g_state.numa.n_nodes = 0;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
GGML_PRINT_DEBUG("found our process on numa node %u, CPU %u\n", g_state.numa.current_node, current_cpu);
|
||||||
|
|
||||||
for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) {
|
for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) {
|
||||||
struct ggml_numa_node * node = &g_state.numa.nodes[n];
|
struct ggml_numa_node * node = &g_state.numa.nodes[n];
|
||||||
GGML_PRINT_DEBUG("CPUs on node %u:", n);
|
GGML_PRINT_DEBUG("CPUs on node %u:", n);
|
||||||
@ -16638,26 +16673,46 @@ typedef pthread_t ggml_thread_t;
|
|||||||
|
|
||||||
// Android's libc implementation "bionic" does not support setting affinity
|
// Android's libc implementation "bionic" does not support setting affinity
|
||||||
#if defined(__linux__) && !defined(__BIONIC__)
|
#if defined(__linux__) && !defined(__BIONIC__)
|
||||||
static void set_numa_thread_affinity(int thread_n, int n_threads) {
|
static void set_numa_thread_affinity(int thread_n) {
|
||||||
if (!ggml_is_numa()) {
|
if (!ggml_is_numa()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// run thread on node_num thread_n / (threads per node)
|
int node_num;
|
||||||
const int node_num = thread_n / ((n_threads + g_state.numa.n_nodes - 1) / g_state.numa.n_nodes);
|
int rv;
|
||||||
struct ggml_numa_node * node = &g_state.numa.nodes[node_num];
|
|
||||||
size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
|
size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
|
||||||
|
|
||||||
|
switch(g_state.numa.numa_strategy) {
|
||||||
|
case GGML_NUMA_STRATEGY_DISTRIBUTE:
|
||||||
|
// run thread on node_num thread_n / (threads per node)
|
||||||
|
node_num = thread_n % g_state.numa.n_nodes;
|
||||||
|
break;
|
||||||
|
case GGML_NUMA_STRATEGY_ISOLATE:
|
||||||
|
// run thread on current_node
|
||||||
|
node_num = g_state.numa.current_node;
|
||||||
|
break;
|
||||||
|
case GGML_NUMA_STRATEGY_NUMACTL:
|
||||||
|
// use the cpuset that numactl gave us
|
||||||
|
rv = pthread_setaffinity_np(pthread_self(), setsize, &g_state.numa.cpuset);
|
||||||
|
if (rv) {
|
||||||
|
fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",strerror(rv));
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
default:
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_numa_node * node = &g_state.numa.nodes[node_num];
|
||||||
|
|
||||||
cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
|
cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
|
||||||
CPU_ZERO_S(setsize, cpus);
|
CPU_ZERO_S(setsize, cpus);
|
||||||
for (size_t i = 0; i < node->n_cpus; ++i) {
|
for (size_t i = 0; i < node->n_cpus; ++i) {
|
||||||
CPU_SET_S(node->cpus[i], setsize, cpus);
|
CPU_SET_S(node->cpus[i], setsize, cpus);
|
||||||
}
|
}
|
||||||
|
|
||||||
int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
|
rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
|
||||||
if (rv) {
|
if (rv) {
|
||||||
fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",
|
fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv));
|
||||||
strerror(rv));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
CPU_FREE(cpus);
|
CPU_FREE(cpus);
|
||||||
@ -16678,8 +16733,7 @@ static void clear_numa_thread_affinity(void) {
|
|||||||
|
|
||||||
int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
|
int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
|
||||||
if (rv) {
|
if (rv) {
|
||||||
fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",
|
fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv));
|
||||||
strerror(rv));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
CPU_FREE(cpus);
|
CPU_FREE(cpus);
|
||||||
@ -16687,7 +16741,7 @@ static void clear_numa_thread_affinity(void) {
|
|||||||
#else
|
#else
|
||||||
// TODO: Windows etc.
|
// TODO: Windows etc.
|
||||||
// (the linux implementation may also work on BSD, someone should test)
|
// (the linux implementation may also work on BSD, someone should test)
|
||||||
static void set_numa_thread_affinity(int thread_n, int n_threads) { UNUSED(thread_n); UNUSED(n_threads); }
|
static void set_numa_thread_affinity(int thread_n) { UNUSED(thread_n); }
|
||||||
static void clear_numa_thread_affinity(void) {}
|
static void clear_numa_thread_affinity(void) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -16987,7 +17041,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
|
|||||||
|
|
||||||
const int n_threads = state->shared->n_threads;
|
const int n_threads = state->shared->n_threads;
|
||||||
|
|
||||||
set_numa_thread_affinity(state->ith, n_threads);
|
set_numa_thread_affinity(state->ith);
|
||||||
|
|
||||||
int node_n = -1;
|
int node_n = -1;
|
||||||
int task_phase = GGML_TASK_FINALIZE;
|
int task_phase = GGML_TASK_FINALIZE;
|
||||||
|
12
ggml.h
12
ggml.h
@ -658,6 +658,16 @@ extern "C" {
|
|||||||
void * wdata;
|
void * wdata;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// numa strategies
|
||||||
|
enum ggml_numa_strategy {
|
||||||
|
GGML_NUMA_STRATEGY_DISABLED = 0,
|
||||||
|
GGML_NUMA_STRATEGY_DISTRIBUTE = 1,
|
||||||
|
GGML_NUMA_STRATEGY_ISOLATE = 2,
|
||||||
|
GGML_NUMA_STRATEGY_NUMACTL = 3,
|
||||||
|
GGML_NUMA_STRATEGY_MIRROR = 4,
|
||||||
|
GGML_NUMA_STRATEGY_COUNT
|
||||||
|
};
|
||||||
|
|
||||||
// misc
|
// misc
|
||||||
|
|
||||||
GGML_API void ggml_time_init(void); // call this once at the beginning of the program
|
GGML_API void ggml_time_init(void); // call this once at the beginning of the program
|
||||||
@ -668,7 +678,7 @@ extern "C" {
|
|||||||
|
|
||||||
GGML_API void ggml_print_backtrace(void);
|
GGML_API void ggml_print_backtrace(void);
|
||||||
|
|
||||||
GGML_API void ggml_numa_init(void); // call once for better performance on NUMA systems
|
GGML_API void ggml_numa_init(enum ggml_numa_strategy numa); // call once for better performance on NUMA systems
|
||||||
GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node
|
GGML_API bool ggml_is_numa(void); // true if init detected that system has >1 NUMA node
|
||||||
|
|
||||||
GGML_API void ggml_print_object (const struct ggml_object * obj);
|
GGML_API void ggml_print_object (const struct ggml_object * obj);
|
||||||
|
12
llama.cpp
12
llama.cpp
@ -11182,7 +11182,7 @@ bool llama_mlock_supported(void) {
|
|||||||
return llama_supports_mlock();
|
return llama_supports_mlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
void llama_backend_init(bool numa) {
|
void llama_backend_init(void) {
|
||||||
ggml_time_init();
|
ggml_time_init();
|
||||||
|
|
||||||
// needed to initialize f16 tables
|
// needed to initialize f16 tables
|
||||||
@ -11192,15 +11192,17 @@ void llama_backend_init(bool numa) {
|
|||||||
ggml_free(ctx);
|
ggml_free(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (numa) {
|
|
||||||
ggml_numa_init();
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef GGML_USE_MPI
|
#ifdef GGML_USE_MPI
|
||||||
ggml_mpi_backend_init();
|
ggml_mpi_backend_init();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void llama_numa_init(enum ggml_numa_strategy numa) {
|
||||||
|
if (numa != GGML_NUMA_STRATEGY_DISABLED) {
|
||||||
|
ggml_numa_init(numa);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void llama_backend_free(void) {
|
void llama_backend_free(void) {
|
||||||
#ifdef GGML_USE_MPI
|
#ifdef GGML_USE_MPI
|
||||||
ggml_mpi_backend_free();
|
ggml_mpi_backend_free();
|
||||||
|
5
llama.h
5
llama.h
@ -312,7 +312,10 @@ extern "C" {
|
|||||||
// Initialize the llama + ggml backend
|
// Initialize the llama + ggml backend
|
||||||
// If numa is true, use NUMA optimizations
|
// If numa is true, use NUMA optimizations
|
||||||
// Call once at the start of the program
|
// Call once at the start of the program
|
||||||
LLAMA_API void llama_backend_init(bool numa);
|
LLAMA_API void llama_backend_init(void);
|
||||||
|
|
||||||
|
//optional:
|
||||||
|
LLAMA_API void llama_numa_init(enum ggml_numa_strategy numa);
|
||||||
|
|
||||||
// Call once at the end of the program - currently only used for MPI
|
// Call once at the end of the program - currently only used for MPI
|
||||||
LLAMA_API void llama_backend_free(void);
|
LLAMA_API void llama_backend_free(void);
|
||||||
|
@ -12,7 +12,7 @@ int main(int argc, char ** argv) {
|
|||||||
auto * model_path = get_model_or_exit(argc, argv);
|
auto * model_path = get_model_or_exit(argc, argv);
|
||||||
|
|
||||||
std::thread([&model_path]() {
|
std::thread([&model_path]() {
|
||||||
llama_backend_init(false);
|
llama_backend_init();
|
||||||
auto * model = llama_load_model_from_file(model_path, llama_model_default_params());
|
auto * model = llama_load_model_from_file(model_path, llama_model_default_params());
|
||||||
auto * ctx = llama_new_context_with_model(model, llama_context_default_params());
|
auto * ctx = llama_new_context_with_model(model, llama_context_default_params());
|
||||||
llama_free(ctx);
|
llama_free(ctx);
|
||||||
|
@ -14,7 +14,7 @@ int main(int argc, char *argv[] ) {
|
|||||||
fprintf(stderr, "using '%s'\n", model_path);
|
fprintf(stderr, "using '%s'\n", model_path);
|
||||||
fclose(file);
|
fclose(file);
|
||||||
|
|
||||||
llama_backend_init(false);
|
llama_backend_init();
|
||||||
auto params = llama_model_params{};
|
auto params = llama_model_params{};
|
||||||
params.use_mmap = false;
|
params.use_mmap = false;
|
||||||
params.progress_callback = [](float progress, void * ctx){
|
params.progress_callback = [](float progress, void * ctx){
|
||||||
|
@ -61,7 +61,7 @@ int main(int argc, char **argv) {
|
|||||||
llama_model * model;
|
llama_model * model;
|
||||||
llama_context * ctx;
|
llama_context * ctx;
|
||||||
|
|
||||||
llama_backend_init(false);
|
llama_backend_init();
|
||||||
|
|
||||||
// load the vocab
|
// load the vocab
|
||||||
{
|
{
|
||||||
|
@ -60,7 +60,7 @@ int main(int argc, char **argv) {
|
|||||||
llama_model * model;
|
llama_model * model;
|
||||||
llama_context * ctx;
|
llama_context * ctx;
|
||||||
|
|
||||||
llama_backend_init(false);
|
llama_backend_init();
|
||||||
|
|
||||||
// load the vocab
|
// load the vocab
|
||||||
{
|
{
|
||||||
|
@ -25,7 +25,7 @@ int main(int argc, char **argv) {
|
|||||||
llama_model * model;
|
llama_model * model;
|
||||||
llama_context * ctx;
|
llama_context * ctx;
|
||||||
|
|
||||||
llama_backend_init(false);
|
llama_backend_init();
|
||||||
|
|
||||||
// load the vocab
|
// load the vocab
|
||||||
{
|
{
|
||||||
|
@ -25,7 +25,7 @@ int main(int argc, char **argv) {
|
|||||||
llama_model * model;
|
llama_model * model;
|
||||||
llama_context * ctx;
|
llama_context * ctx;
|
||||||
|
|
||||||
llama_backend_init(false);
|
llama_backend_init();
|
||||||
|
|
||||||
// load the vocab
|
// load the vocab
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user