From f00780b2ee99955fec8c02cd9f90aa230c9e026d Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 14 Aug 2023 16:28:44 +0300 Subject: [PATCH] llama : sync gguf-llama.cpp with latest llama.cpp (#2608) * llama : sync gguf-llama.cpp with latest llama.cpp * minor : indentation + assert * llama : refactor gguf_buffer and gguf_ctx_buffer * llama : minor --- examples/gguf/gguf.cpp | 23 +- ggml-metal.h | 3 + ggml-metal.m | 15 + gguf-llama.cpp | 989 ++++++++++++++++++++++++++--------------- gguf-llama.h | 28 +- gguf-util.h | 97 ---- 6 files changed, 692 insertions(+), 463 deletions(-) diff --git a/examples/gguf/gguf.cpp b/examples/gguf/gguf.cpp index 6f454a204..b32367f30 100644 --- a/examples/gguf/gguf.cpp +++ b/examples/gguf/gguf.cpp @@ -8,14 +8,19 @@ #include #include #include -/* + +#undef MIN +#undef MAX +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define MAX(a, b) ((a) > (b) ? (a) : (b)) + template static std::string to_string(const T & val) { std::stringstream ss; ss << val; return ss.str(); } -*/ + void gguf_ex_write_str(std::ofstream & fout, const std::string & val) { const int32_t n = val.size(); fout.write((const char *) &n, sizeof(n)); @@ -377,28 +382,28 @@ bool gguf_ex_read_2(const std::string & fname) { struct gguf_file file(fname.c_str(), "rb"); gguf_mmap data_mmap(&file, 0, false); + const int n_tensors = gguf_get_n_tensors(ctx); for (int i = 0; i < n_tensors; ++i) { - const char * name = gguf_get_tensor_name(ctx, i); - const size_t offset = gguf_get_data_offset(ctx) + gguf_get_tensor_offset(ctx, i); + const char * name = gguf_get_tensor_name(ctx, i); + const size_t offset = gguf_get_data_offset(ctx) + gguf_get_tensor_offset(ctx, i); + struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name); cur->data = static_cast(data_mmap.addr) + offset; // print first 10 elements - const float * data = (const float *) cur->data; + const float * data = (const float *) cur->data; printf("%s data[:10] : ", name); - - for (int j = 0; j < 10; ++j) { + for (int j = 0; j < MIN(10, ggml_nelements(cur)); ++j) { printf("%f ", data[j]); } - printf("\n\n"); } -fprintf(stdout, "%s: ctx_data size: %zu\n", __func__, ggml_get_mem_size(ctx_data)); + fprintf(stdout, "%s: ctx_data size: %zu\n", __func__, ggml_get_mem_size(ctx_data)); ggml_free(ctx_data); gguf_free(ctx); diff --git a/ggml-metal.h b/ggml-metal.h index 16f1a0caa..1a5d96c33 100644 --- a/ggml-metal.h +++ b/ggml-metal.h @@ -38,6 +38,9 @@ struct ggml_metal_context; struct ggml_metal_context * ggml_metal_init(int n_cb); void ggml_metal_free(struct ggml_metal_context * ctx); +void * ggml_metal_host_malloc(size_t n); +void ggml_metal_host_free (void * data); + // set the number of command buffers to use void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb); diff --git a/ggml-metal.m b/ggml-metal.m index b47a98e21..4e4491414 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -224,6 +224,21 @@ void ggml_metal_free(struct ggml_metal_context * ctx) { free(ctx); } +void * ggml_metal_host_malloc(size_t n) { + void * data = NULL; + const int result = posix_memalign((void **) &data, getpagesize(), n); + if (result != 0) { + fprintf(stderr, "%s: error: posix_memalign failed\n", __func__); + return NULL; + } + + return data; +} + +void ggml_metal_host_free(void * data) { + free(data); +} + void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb) { ctx->n_cb = n_cb; } diff --git a/gguf-llama.cpp b/gguf-llama.cpp index 99da3c56d..0f8eb3c90 100644 --- a/gguf-llama.cpp +++ b/gguf-llama.cpp @@ -47,7 +47,6 @@ #include #include #include -#include #include #include #include @@ -56,29 +55,76 @@ #pragma warning(disable: 4244 4267) // possible loss of data #endif +static void llama_log_internal(llama_log_level level, const char* format, ...); +static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data); +#define LLAMA_LOG_INFO(...) llama_log_internal(LLAMA_LOG_LEVEL_INFO , __VA_ARGS__) +#define LLAMA_LOG_WARN(...) llama_log_internal(LLAMA_LOG_LEVEL_WARN , __VA_ARGS__) +#define LLAMA_LOG_ERROR(...) llama_log_internal(LLAMA_LOG_LEVEL_ERROR, __VA_ARGS__) + +template +static std::string to_string(const T & val) { + std::stringstream ss; + ss << val; + return ss.str(); +} + +#if !defined(GGML_USE_CUBLAS) && !defined(GGML_USE_METAL) +#include "ggml-alloc.h" +#define LLAMA_USE_ALLOCATOR +#else #define LLAMA_USE_SCRATCH #define LLAMA_MAX_SCRATCH_BUFFERS 16 - -// available llama models -enum e_model { - MODEL_UNKNOWN, - MODEL_3B, - MODEL_7B, - MODEL_13B, - MODEL_30B, - MODEL_65B, - MODEL_70B, -}; - -static const size_t kB = 1024; -static const size_t MB = 1024*1024; - -// computed for n_ctx == 2048 -// TODO: dynamically determine these sizes -// needs modifications in ggml +#endif typedef void (*offload_func_t)(struct ggml_tensor * tensor); +#ifdef GGML_USE_CUBLAS +#define llama_host_malloc(n) ggml_cuda_host_malloc(n) +#define llama_host_free(data) ggml_cuda_host_free(data) +#elif GGML_USE_METAL +#define llama_host_malloc(n) ggml_metal_host_malloc(n) +#define llama_host_free(data) ggml_metal_host_free(data) +#else +#define llama_host_malloc(n) malloc(n) +#define llama_host_free(data) free(data) +#endif + +struct llama_buffer { + void * data = NULL; + size_t size = 0; + + // fallback to malloc / free + // useful in cases where CUDA can try to allocate PINNED memory + bool fallback = false; + + void resize(size_t n) { + llama_host_free(data); + + data = llama_host_malloc(n); + if (!data) { + fallback = true; + data = malloc(n); + } else { + fallback = false; + } + + GGML_ASSERT(data); + size = n; + } + + ~llama_buffer() { + if (data) { + if (fallback) { // NOLINT + free(data); + } else { + llama_host_free(data); + } + } + + data = NULL; + } +}; + void llama_nop(struct ggml_tensor * tensor) { // don't offload by default (void) tensor; } @@ -102,6 +148,24 @@ static void ggml_graph_compute_helper(std::vector & buf, ggml_cgraph * // memory sizes (calculated for n_batch == 512) // +// computed for n_ctx == 2048 +// TODO: dynamically determine these sizes +// needs modifications in ggml + +// available llama models +enum e_model { + MODEL_UNKNOWN, + MODEL_3B, + MODEL_7B, + MODEL_13B, + MODEL_30B, + MODEL_65B, + MODEL_70B, +}; + +static const size_t kB = 1024; +static const size_t MB = 1024*1024; + static const std::map & MEM_REQ_SCRATCH0(int n_ctx) { static std::map k_sizes = { @@ -143,7 +207,7 @@ static const std::map & MEM_REQ_EVAL() } // amount of VRAM needed per batch size to hold temporary results -// the values for 3b and 65b are not derived from testing but instead chosen conservatively +// the values for 3b are not derived from testing but instead chosen conservatively static const std::map & VRAM_REQ_SCRATCH_BASE() { static std::map k_sizes = { @@ -151,14 +215,14 @@ static const std::map & VRAM_REQ_SCRATCH_BASE() { MODEL_7B, 512ull * kB }, { MODEL_13B, 640ull * kB }, { MODEL_30B, 768ull * kB }, - { MODEL_65B, 1536ull * kB }, - { MODEL_70B, 1536ull * kB }, // TODO (likely can be reduced) + { MODEL_65B, 1280ull * kB }, + { MODEL_70B, 1280ull * kB }, }; return k_sizes; } // amount of VRAM needed per batch size and context to hold temporary results -// the values for 3b and 65b are not derived from testing but instead chosen conservatively +// the values for 3b are not derived from testing but instead chosen conservatively static const std::map & VRAM_REQ_SCRATCH_PER_CONTEXT() { static std::map k_sizes = { @@ -166,8 +230,8 @@ static const std::map & VRAM_REQ_SCRATCH_PER_CONTEXT() { MODEL_7B, 128ull }, { MODEL_13B, 160ull }, { MODEL_30B, 208ull }, - { MODEL_65B, 416ull }, - { MODEL_70B, 416ull }, // TODO (likely can be reduced) + { MODEL_65B, 256ull }, + { MODEL_70B, 256ull }, }; return k_sizes; } @@ -175,15 +239,15 @@ static const std::map & VRAM_REQ_SCRATCH_PER_CONTEXT() // default hparams (LLaMA 7B) struct llama_hparams { uint32_t n_vocab = 32000; - uint32_t n_ctx = 512; // this is provided as user input? + uint32_t n_ctx = 512; uint32_t n_embd = 4096; uint32_t n_head = 32; uint32_t n_head_kv = 32; uint32_t n_layer = 32; uint32_t n_rot = 64; - uint32_t n_ff = 11008; + uint32_t n_ff = 11008; - float f_rms_norm_eps = LLAMA_DEFAULT_RMS_EPS; + float f_rms_norm_eps = 1e-5; float rope_freq_base = 10000.0f; float rope_freq_scale = 1.0f; @@ -241,7 +305,7 @@ struct llama_kv_cache { struct ggml_context * ctx = NULL; - gguf_ctx_buffer buf; + llama_buffer buf; int n; // number of tokens currently in the cache @@ -292,7 +356,7 @@ struct llama_model { struct ggml_context * ctx = NULL; // the model memory buffer - gguf_ctx_buffer buf; + llama_buffer buf; // model memory mapped file std::unique_ptr mapping; @@ -329,13 +393,22 @@ struct llama_model { struct llama_context { llama_context(const llama_model & model) : model(model), t_load_us(model.t_load_us), t_start_us(model.t_start_us) {} -#ifdef GGML_USE_METAL ~llama_context() { + if (model_owner) { + delete &model; + } +#ifdef GGML_USE_METAL if (ctx_metal) { ggml_metal_free(ctx_metal); } - } #endif +#ifdef LLAMA_USE_ALLOCATOR + if (alloc) { + ggml_allocr_free(alloc); + } +#endif + } + std::mt19937 rng; bool has_evaluated_once = false; @@ -372,8 +445,19 @@ struct llama_context { // memory buffers used to evaluate the model // TODO: move in llama_state - gguf_ctx_buffer buf_compute; - gguf_ctx_buffer buf_scratch[LLAMA_MAX_SCRATCH_BUFFERS]; + llama_buffer buf_compute; + +#ifdef LLAMA_USE_ALLOCATOR + llama_buffer buf_alloc; + ggml_allocr * alloc = NULL; +#endif + +#ifdef LLAMA_USE_SCRATCH + llama_buffer buf_scratch[LLAMA_MAX_SCRATCH_BUFFERS]; + + int buf_last = 0; + size_t buf_max_size[LLAMA_MAX_SCRATCH_BUFFERS] = { 0 }; +#endif #ifdef GGML_USE_METAL ggml_metal_context * ctx_metal = NULL; @@ -383,10 +467,7 @@ struct llama_context { ggml_mpi_context * ctx_mpi = NULL; #endif - int buf_last = 0; - size_t buf_max_size[LLAMA_MAX_SCRATCH_BUFFERS] = { 0 }; - - void use_buf(struct ggml_context * ctx, int i) { + static void use_buf(struct ggml_context * ctx, int i) { #if defined(LLAMA_USE_SCRATCH) size_t last_size = 0; @@ -394,7 +475,7 @@ struct llama_context { last_size = ggml_set_scratch(ctx, { 0, 0, nullptr, }); } else { auto & buf = buf_scratch[i]; - last_size = ggml_set_scratch(ctx, { 0, buf.size, buf.addr, }); + last_size = ggml_set_scratch(ctx, { 0, buf.size, buf.data, }); } if (buf_last >= 0) { @@ -408,7 +489,7 @@ struct llama_context { #endif } - size_t get_buf_max_mem(int i) const { + static size_t get_buf_max_mem(int i) { #if defined(LLAMA_USE_SCRATCH) return buf_max_size[i]; #else @@ -418,6 +499,14 @@ struct llama_context { } }; +struct llama_state { + // We save the log callback globally + llama_log_callback log_callback = llama_log_callback_default; + void * log_callback_user_data = nullptr; +}; +// global state +static llama_state g_state; + template static T checked_mul(T a, T b) { T ret = a * b; @@ -470,17 +559,16 @@ struct gguf_load_tensors_map { enum gguf_file_version { GGUF_FILE_VERSION_V1 = 1, - }; - struct gguf_file_loader { gguf_file file; gguf_context * gguf_ctx; gguf_file_version file_version; llama_hparams hparams; llama_vocab vocab; -struct ggml_context * ctx_data = NULL; + + struct ggml_context * ctx_data = NULL; gguf_file_loader(const char * fname, gguf_load_tensors_map & tensors_map) : file(fname, "rb") { @@ -499,7 +587,7 @@ struct ggml_context * ctx_data = NULL; read_tensor_metadata(tensors_map); } - uint32_t read_u32(const char * key) { + uint32_t read_u32(const char * key) const { int i = gguf_find_key(gguf_ctx, key); if (i == -1) { throw std::runtime_error(format("cannot find param with key %s\n", key)); @@ -508,7 +596,7 @@ struct ggml_context * ctx_data = NULL; return gguf_get_val_u32(gguf_ctx, i); } - float read_f32(const char * key) { + float read_f32(const char * key) const { int i = gguf_find_key(gguf_ctx, key); if (i == -1) { throw std::runtime_error(format("cannot find param with key %s\n", key)); @@ -517,27 +605,26 @@ struct ggml_context * ctx_data = NULL; return gguf_get_val_f32(gguf_ctx, i); } - int read_n_vocab() { + int read_n_vocab() const { int i = gguf_find_key(gguf_ctx, "tokenizer.ggml.tokens"); - if (i == -1) { - throw std::runtime_error("cannot find token list in GGUF file\n"); - } + if (i == -1) { + throw std::runtime_error("cannot find token list in GGUF file\n"); + } - return gguf_get_arr_n(gguf_ctx, i); + return gguf_get_arr_n(gguf_ctx, i); } void read_hparams() { - // TODO define keys as constants in header // TODO: read all hparams from file - hparams.n_vocab = read_n_vocab(); - hparams.n_ctx = read_u32("llama.context_length"); - hparams.n_embd = read_u32("llama.embedding_length"); - hparams.n_ff = read_u32("llama.feed_forward_length"); - hparams.n_head = read_u32("llama.attention.head_count"); - hparams.n_layer = read_u32("llama.layer_count"); - hparams.n_rot = read_u32("llama.rope.dimension_count"); + hparams.n_vocab = read_n_vocab(); + hparams.n_ctx = read_u32("llama.context_length"); + hparams.n_embd = read_u32("llama.embedding_length"); + hparams.n_ff = read_u32("llama.feed_forward_length"); + hparams.n_head = read_u32("llama.attention.head_count"); + hparams.n_layer = read_u32("llama.layer_count"); + hparams.n_rot = read_u32("llama.rope.dimension_count"); hparams.f_rms_norm_eps = read_f32("llama.attention.layer_norm_rms_epsilon"); // LLaMAv2 @@ -568,7 +655,7 @@ struct ggml_context * ctx_data = NULL; } } - void read_tensor_metadata(gguf_load_tensors_map & tensors_map) { + void read_tensor_metadata(gguf_load_tensors_map & tensors_map) const { const int n_tensors = gguf_get_n_tensors(gguf_ctx); for (int i = 0; i < n_tensors; ++i) { @@ -576,16 +663,19 @@ struct ggml_context * ctx_data = NULL; const char * name = gguf_get_tensor_name(gguf_ctx, i); struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name); - uint32_t n_dims = cur->n_dims; + + const uint32_t n_dims = cur->n_dims; tensor.type = cur->type; tensor.ne.resize(n_dims); + for (uint32_t j = 0; j < n_dims; ++j) { - tensor.ne[j] = cur->ne[j]; + tensor.ne[j] = cur->ne[j]; } if (n_dims < 1 || n_dims > 2) { throw std::runtime_error(format("llama.cpp: tensor '%s' should not be %u-dimensional", name, n_dims)); } + switch (tensor.type) { case GGML_TYPE_F32: case GGML_TYPE_F16: @@ -605,7 +695,6 @@ struct ggml_context * ctx_data = NULL; } } - tensor.file_off = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, i); tensor.name = name; @@ -632,46 +721,46 @@ struct gguf_file_saver { gguf_file_saver(const char * fname, gguf_file_loader * fl, enum llama_ftype new_ftype) : file(fname, "wb"), fl(fl) { - fprintf(stderr, "llama.cpp: saving model to %s\n", fname); - write_header(); - write_hparams(new_ftype); - } + fprintf(stderr, "llama.cpp: saving model to %s\n", fname); + write_header(); + write_hparams(new_ftype); + } void write_header() { const int32_t magic = GGUF_MAGIC; file.write_i32(magic); - const int32_t version = GGUF_VERSION; - file.write_i32(version); + const int32_t version = GGUF_VERSION; + file.write_i32(version); - const int32_t n_tensors = gguf_get_n_tensors(fl->gguf_ctx); - file.write_i32(n_tensors); + const int32_t n_tensors = gguf_get_n_tensors(fl->gguf_ctx); + file.write_i32(n_tensors); - const int32_t n_kv = gguf_get_n_kv(fl->gguf_ctx); - file.write_i32(n_kv); + const int32_t n_kv = gguf_get_n_kv(fl->gguf_ctx); + file.write_i32(n_kv); + } + + void write_hparam_arr_str(const std::string & key, enum gguf_type type, int i, int n_arr) { + std::vector data(n_arr); + + for (int j = 0; j < n_arr; ++j) { + std::string val = gguf_get_arr_str(fl->gguf_ctx, i, j); + data[j] = val; } - void write_hparam_arr_str(const std::string & key, enum gguf_type type, int i, int n_arr) { - std::vector data(n_arr); + file.write_arr(key, type, data); + } - for (int j = 0; j < n_arr; ++j) { - std::string val = gguf_get_arr_str(fl->gguf_ctx, i, j); - data[j] = val; - } + void write_hparam_arr_f32(const std::string & key, enum gguf_type type, int i, int n_arr) { + std::vector data(n_arr); - file.write_arr(key, type, data); + for (int j = 0; j < n_arr; ++j) { + float val = gguf_get_arr_f32(fl->gguf_ctx, i, j); + data[j] = val; } - void write_hparam_arr_f32(const std::string & key, enum gguf_type type, int i, int n_arr) { - std::vector data(n_arr); - - for (int j = 0; j < n_arr; ++j) { - float val = gguf_get_arr_f32(fl->gguf_ctx, i, j); - data[j] = val; - } - - file.write_arr(key, type, data); - } + file.write_arr(key, type, data); + } void write_hparams(enum llama_ftype new_ftype) { const int32_t n_kv = gguf_get_n_kv(fl->gguf_ctx); @@ -696,59 +785,62 @@ struct gguf_file_saver { switch(vtype) { case GGUF_TYPE_BOOL: - bool_val = gguf_get_val_bool(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_BOOL, bool_val); - break; + bool_val = gguf_get_val_bool(fl->gguf_ctx, i); + file.write_val(key, GGUF_TYPE_BOOL, bool_val); + break; case GGUF_TYPE_FLOAT32: - f32_val = gguf_get_val_f32(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_FLOAT32, f32_val); - break; + f32_val = gguf_get_val_f32(fl->gguf_ctx, i); + file.write_val(key, GGUF_TYPE_FLOAT32, f32_val); + break; case GGUF_TYPE_INT16: - i16_val = gguf_get_val_i16(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_INT16, i16_val); - break; + i16_val = gguf_get_val_i16(fl->gguf_ctx, i); + file.write_val(key, GGUF_TYPE_INT16, i16_val); + break; case GGUF_TYPE_INT32: - i32_val = gguf_get_val_i32(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_INT32, i32_val); - break; + i32_val = gguf_get_val_i32(fl->gguf_ctx, i); + file.write_val(key, GGUF_TYPE_INT32, i32_val); + break; case GGUF_TYPE_INT8: - i8_val = gguf_get_val_i8(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_INT8, i8_val); - break; + i8_val = gguf_get_val_i8(fl->gguf_ctx, i); + file.write_val(key, GGUF_TYPE_INT8, i8_val); + break; case GGUF_TYPE_STRING: - str_val = gguf_get_val_str(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_STRING, str_val); - break; + str_val = gguf_get_val_str(fl->gguf_ctx, i); + file.write_val(key, GGUF_TYPE_STRING, str_val); + break; case GGUF_TYPE_UINT16: - u16_val = gguf_get_val_u16(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_UINT16, u16_val); - break; + u16_val = gguf_get_val_u16(fl->gguf_ctx, i); + file.write_val(key, GGUF_TYPE_UINT16, u16_val); + break; case GGUF_TYPE_UINT32: - u32_val = gguf_get_val_u32(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_UINT32, u32_val); - break; + u32_val = gguf_get_val_u32(fl->gguf_ctx, i); + file.write_val(key, GGUF_TYPE_UINT32, u32_val); + break; case GGUF_TYPE_UINT8: - u8_val = gguf_get_val_u8(fl->gguf_ctx, i); - file.write_val(key, GGUF_TYPE_UINT8, u8_val); - break; + u8_val = gguf_get_val_u8(fl->gguf_ctx, i); + file.write_val(key, GGUF_TYPE_UINT8, u8_val); + break; case GGUF_TYPE_ARRAY: - arr_type = gguf_get_arr_type(fl->gguf_ctx, i); - n_arr = gguf_get_arr_n(fl->gguf_ctx, i); - if (arr_type == GGUF_TYPE_FLOAT32) { - write_hparam_arr_f32(key, arr_type, i, n_arr); + arr_type = gguf_get_arr_type(fl->gguf_ctx, i); + n_arr = gguf_get_arr_n(fl->gguf_ctx, i); + if (arr_type == GGUF_TYPE_FLOAT32) { + write_hparam_arr_f32(key, arr_type, i, n_arr); } else if (arr_type == GGUF_TYPE_STRING) { write_hparam_arr_str(key, GGUF_TYPE_STRING, i, n_arr); } else { throw std::runtime_error("not implemented"); } - break; + break; default: - throw std::runtime_error(format("cannot recognize value type for key %s\n", key)); + throw std::runtime_error(format("cannot recognize value type for key %s\n", key)); } } } - info_offset = file.tell(); + info_offset = file.tell(); + + GGML_ASSERT(gguf_get_data_offset(fl->gguf_ctx) >= info_offset); + size_t count = gguf_get_data_offset(fl->gguf_ctx) - info_offset; file.write_zeros(count); file.seek(info_offset, SEEK_SET); @@ -983,7 +1075,7 @@ static bool kv_cache_init( struct ggml_init_params params; params.mem_size = cache.buf.size; - params.mem_buffer = cache.buf.addr; + params.mem_buffer = cache.buf.data; params.no_alloc = false; cache.ctx = ggml_init(params); @@ -1016,8 +1108,6 @@ struct llama_context_params llama_context_default_params() { /*.seed =*/ LLAMA_DEFAULT_SEED, /*.n_ctx =*/ 512, /*.n_batch =*/ 512, - /*.n_gqa =*/ 1, - /*.rms_norm_eps =*/ LLAMA_DEFAULT_RMS_EPS, /*.gpu_layers =*/ 0, /*.main_gpu =*/ 0, /*.tensor_split =*/ nullptr, @@ -1026,6 +1116,7 @@ struct llama_context_params llama_context_default_params() { /*.progress_callback =*/ nullptr, /*.progress_callback_user_data =*/ nullptr, /*.low_vram =*/ false, + /*.mul_mat_q =*/ false, /*.f16_kv =*/ true, /*.logits_all =*/ false, /*.vocab_only =*/ false, @@ -1144,11 +1235,10 @@ static void llama_model_load_internal( llama_vocab & vocab, int n_ctx, int n_batch, - int n_gqa, - float rms_norm_eps, int n_gpu_layers, int main_gpu, const float * tensor_split, + const bool mul_mat_q, float rope_freq_base, float rope_freq_scale, bool low_vram, @@ -1158,8 +1248,6 @@ static void llama_model_load_internal( bool vocab_only, llama_progress_callback progress_callback, void * progress_callback_user_data) { - GGML_UNUSED(rms_norm_eps); // TODO: update function signature to remove this - model.t_start_us = ggml_time_us(); std::unique_ptr ml(new llama_model_loader(fname, use_mmap)); @@ -1189,11 +1277,15 @@ static void llama_model_load_internal( hparams.n_ctx = n_ctx; // LLaMAv2 - hparams.n_head_kv = hparams.n_head / n_gqa; - if (model.type == e_model::MODEL_65B && n_gqa == 8) { - fprintf(stderr, "%s: warning: assuming 70B model based on GQA == %d\n", __func__, n_gqa); - model.type = e_model::MODEL_70B; + // TODO: probably not needed + { + const auto n_gqa = hparams.n_gqa(); + + if (model.type == e_model::MODEL_65B && n_gqa == 8) { + fprintf(stderr, "%s: warning: assuming 70B model based on GQA == %d\n", __func__, n_gqa); + model.type = e_model::MODEL_70B; } + } hparams.rope_freq_base = rope_freq_base; hparams.rope_freq_scale = rope_freq_scale; @@ -1202,27 +1294,21 @@ static void llama_model_load_internal( const uint32_t n_ff = hparams.n_ff; { - fprintf(stderr, "%s: format = %s\n", __func__, gguf_file_version_name(file_version)); - fprintf(stderr, "%s: n_vocab = %u\n", __func__, hparams.n_vocab); - fprintf(stderr, "%s: n_ctx = %u\n", __func__, hparams.n_ctx); - fprintf(stderr, "%s: n_embd = %u\n", __func__, hparams.n_embd); - fprintf(stderr, "%s: n_head = %u\n", __func__, hparams.n_head); - fprintf(stderr, "%s: n_head_kv = %u\n", __func__, hparams.n_head_kv); - fprintf(stderr, "%s: n_layer = %u\n", __func__, hparams.n_layer); - fprintf(stderr, "%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim - fprintf(stderr, "%s: n_gqa = %u\n", __func__, hparams.n_gqa()); - fprintf(stderr, "%s: rnorm_eps = %.1e\n", __func__, hparams.f_rms_norm_eps); - fprintf(stderr, "%s: n_ff = %u\n", __func__, n_ff); - fprintf(stderr, "%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base); - fprintf(stderr, "%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale); - fprintf(stderr, "%s: ftype = %u (%s)\n", __func__, hparams.ftype, llama_ftype_name(hparams.ftype)); - fprintf(stderr, "%s: model size = %s\n", __func__, llama_model_type_name(model.type)); - } - - if (hparams.ftype == LLAMA_FTYPE_MOSTLY_Q4_0 || - hparams.ftype == LLAMA_FTYPE_MOSTLY_Q4_1 || - hparams.ftype == LLAMA_FTYPE_MOSTLY_Q8_0) { - throw std::runtime_error(format("this format is no longer supported (see https://github.com/ggerganov/llama.cpp/pull/1508)")); + LLAMA_LOG_INFO("%s: format = %s\n", __func__, gguf_file_version_name(file_version)); + LLAMA_LOG_INFO("%s: n_vocab = %u\n", __func__, hparams.n_vocab); + LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, hparams.n_ctx); + LLAMA_LOG_INFO("%s: n_embd = %u\n", __func__, hparams.n_embd); + LLAMA_LOG_INFO("%s: n_head = %u\n", __func__, hparams.n_head); + LLAMA_LOG_INFO("%s: n_head_kv = %u\n", __func__, hparams.n_head_kv); + LLAMA_LOG_INFO("%s: n_layer = %u\n", __func__, hparams.n_layer); + LLAMA_LOG_INFO("%s: n_rot = %u\n", __func__, hparams.n_rot); // a.k.a. n_embd_head, n_head_dim + LLAMA_LOG_INFO("%s: n_gqa = %u\n", __func__, hparams.n_gqa()); + LLAMA_LOG_INFO("%s: rnorm_eps = %.1e\n", __func__, hparams.f_rms_norm_eps); + LLAMA_LOG_INFO("%s: n_ff = %u\n", __func__, n_ff); + LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, hparams.rope_freq_base); + LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, hparams.rope_freq_scale); + LLAMA_LOG_INFO("%s: ftype = %u (%s)\n", __func__, hparams.ftype, llama_ftype_name(hparams.ftype)); + LLAMA_LOG_INFO("%s: model size = %s\n", __func__, llama_model_type_name(model.type)); } if (vocab_only) { @@ -1234,19 +1320,19 @@ static void llama_model_load_internal( size_t ctx_size; size_t mmapped_size; ml->calc_sizes(&ctx_size, &mmapped_size); - fprintf(stderr, "%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0); + LLAMA_LOG_INFO("%s: ggml ctx size = %7.2f MB\n", __func__, ctx_size/1024.0/1024.0); // create the ggml context { model.buf.resize(ctx_size); if (use_mlock) { - model.mlock_buf.init (model.buf.addr); + model.mlock_buf.init (model.buf.data); model.mlock_buf.grow_to(model.buf.size); } struct ggml_init_params params = { /*.mem_size =*/ model.buf.size, - /*.mem_buffer =*/ model.buf.addr, + /*.mem_buffer =*/ model.buf.data, /*.no_alloc =*/ ml->use_mmap, }; @@ -1257,13 +1343,15 @@ static void llama_model_load_internal( } (void) main_gpu; + (void) mul_mat_q; #if defined(GGML_USE_CUBLAS) - fprintf(stderr, "%s: using CUDA for GPU acceleration\n", __func__); + LLAMA_LOG_INFO("%s: using CUDA for GPU acceleration\n", __func__); ggml_cuda_set_main_device(main_gpu); + ggml_cuda_set_mul_mat_q(mul_mat_q); #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU #define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU_SPLIT #elif defined(GGML_USE_CLBLAST) - fprintf(stderr, "%s: using OpenCL for GPU acceleration\n", __func__); + LLAMA_LOG_INFO("%s: using OpenCL for GPU acceleration\n", __func__); #define LLAMA_BACKEND_OFFLOAD GGML_BACKEND_GPU #define LLAMA_BACKEND_OFFLOAD_SPLIT GGML_BACKEND_GPU #else @@ -1353,25 +1441,29 @@ static void llama_model_load_internal( const size_t scale = memory_type == GGML_TYPE_F32 ? 2 : 1; // this is the total memory required to run the inference - const size_t mem_required = + size_t mem_required = ctx_size + - mmapped_size - vram_weights + // weights in VRAM not in memory + mmapped_size - vram_weights; // weights in VRAM not in memory + +#ifndef LLAMA_USE_ALLOCATOR + mem_required += MEM_REQ_SCRATCH0(hparams.n_ctx).at(model.type) + MEM_REQ_SCRATCH1().at(model.type) + MEM_REQ_EVAL().at(model.type); +#endif // this is the memory required by one llama_state const size_t mem_required_state = scale*hparams.kv_size(); - fprintf(stderr, "%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__, + LLAMA_LOG_INFO("%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__, mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0); (void) vram_scratch; (void) n_batch; #ifdef GGML_USE_CUBLAS if (low_vram) { - fprintf(stderr, "%s: not allocating a VRAM scratch buffer due to low VRAM option\n", __func__); + LLAMA_LOG_INFO("%s: not allocating a VRAM scratch buffer due to low VRAM option\n", __func__); ggml_cuda_set_scratch_size(0); // disable scratch } else { const size_t vram_scratch_base = VRAM_REQ_SCRATCH_BASE().at(model.type); @@ -1379,7 +1471,7 @@ static void llama_model_load_internal( vram_scratch = n_batch * (vram_scratch_base + n_ctx * vram_scratch_per_context); ggml_cuda_set_scratch_size(vram_scratch); if (n_gpu_layers > 0) { - fprintf(stderr, "%s: allocating batch_size x (%zd kB + n_ctx x %zd B) = %zd MB VRAM for the scratch buffer\n", + LLAMA_LOG_INFO("%s: allocating batch_size x (%zd kB + n_ctx x %zd B) = %zd MB VRAM for the scratch buffer\n", __func__, vram_scratch_base / kB, vram_scratch_per_context, (vram_scratch + MB - 1) / MB); // round up } @@ -1389,9 +1481,9 @@ static void llama_model_load_internal( #if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST) const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer)); - fprintf(stderr, "%s: offloading %d repeating layers to GPU\n", __func__, n_gpu); + LLAMA_LOG_INFO("%s: offloading %d repeating layers to GPU\n", __func__, n_gpu); if (n_gpu_layers > (int) hparams.n_layer) { - fprintf(stderr, "%s: offloading non-repeating layers to GPU\n", __func__); + LLAMA_LOG_INFO("%s: offloading non-repeating layers to GPU\n", __func__); } size_t vram_kv_cache = 0; @@ -1400,17 +1492,17 @@ static void llama_model_load_internal( const int max_offloadable_layers = low_vram ? hparams.n_layer + 1 : hparams.n_layer + 3; if (n_gpu_layers > (int) hparams.n_layer + 1) { if (low_vram) { - fprintf(stderr, "%s: cannot offload v cache to GPU due to low VRAM option\n", __func__); + LLAMA_LOG_INFO("%s: cannot offload v cache to GPU due to low VRAM option\n", __func__); } else { - fprintf(stderr, "%s: offloading v cache to GPU\n", __func__); + LLAMA_LOG_INFO("%s: offloading v cache to GPU\n", __func__); vram_kv_cache += hparams.kv_size() / 2; } } if (n_gpu_layers > (int) hparams.n_layer + 2) { if (low_vram) { - fprintf(stderr, "%s: cannot offload k cache to GPU due to low VRAM option\n", __func__); + LLAMA_LOG_WARN("%s: cannot offload k cache to GPU due to low VRAM option\n", __func__); } else { - fprintf(stderr, "%s: offloading k cache to GPU\n", __func__); + LLAMA_LOG_INFO("%s: offloading k cache to GPU\n", __func__); vram_kv_cache += hparams.kv_size() / 2; } } @@ -1419,9 +1511,9 @@ static void llama_model_load_internal( const int max_offloadable_layers = hparams.n_layer + 1; #endif // GGML_USE_CUBLAS - fprintf(stderr, "%s: offloaded %d/%d layers to GPU\n", + LLAMA_LOG_INFO("%s: offloaded %d/%d layers to GPU\n", __func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers); - fprintf(stderr, "%s: total VRAM used: %zu MB\n", + LLAMA_LOG_INFO("%s: total VRAM used: %zu MB\n", __func__, (vram_weights + vram_scratch + vram_kv_cache + MB - 1) / MB); // round up #else (void) n_gpu_layers; @@ -1459,11 +1551,10 @@ static bool llama_model_load( llama_vocab & vocab, int n_ctx, int n_batch, - int n_gqa, - float rms_norm_eps, int n_gpu_layers, int main_gpu, const float * tensor_split, + const bool mul_mat_q, float rope_freq_base, float rope_freq_scale, bool low_vram, @@ -1474,41 +1565,25 @@ static bool llama_model_load( llama_progress_callback progress_callback, void *progress_callback_user_data) { try { - llama_model_load_internal(fname, model, vocab, n_ctx, n_batch, n_gqa, rms_norm_eps, n_gpu_layers, main_gpu, tensor_split, rope_freq_base, rope_freq_scale, low_vram, memory_type, + llama_model_load_internal(fname, model, vocab, n_ctx, n_batch, n_gpu_layers, + main_gpu, tensor_split, mul_mat_q, rope_freq_base, rope_freq_scale, low_vram, memory_type, use_mmap, use_mlock, vocab_only, progress_callback, progress_callback_user_data); return true; } catch (const std::exception & err) { - fprintf(stderr, "error loading model: %s\n", err.what()); + LLAMA_LOG_ERROR("error loading model: %s\n", err.what()); return false; } } -// evaluate the transformer -// -// - lctx: llama context -// - tokens: new batch of tokens to process -// - embd embeddings input -// - n_tokens number of tokens -// - n_past: the context size so far -// - n_threads: number of threads to use -// -static bool llama_eval_internal( +static struct ggml_cgraph * llama_build_graph( llama_context & lctx, const llama_token * tokens, const float * embd, int n_tokens, - int n_past, - int n_threads, - const char * cgraph_fname) { + int n_past) { GGML_ASSERT((!tokens && embd) || (tokens && !embd)); -#ifdef GGML_USE_MPI - ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads); -#endif - - const int64_t t_start_us = ggml_time_us(); - const int N = n_tokens; const auto & model = lctx.model; @@ -1524,7 +1599,6 @@ static bool llama_eval_internal( const int64_t n_head = hparams.n_head; const int64_t n_head_kv = hparams.n_head_kv; const int64_t n_embd_head = hparams.n_embd_head(); - const int64_t n_vocab = hparams.n_vocab; const int64_t n_embd_gqa = hparams.n_embd_gqa(); @@ -1539,26 +1613,35 @@ static bool llama_eval_internal( auto & mem_per_token = lctx.mem_per_token; auto & buf_compute = lctx.buf_compute; + struct ggml_init_params params = { /*.mem_size =*/ buf_compute.size, - /*.mem_buffer =*/ buf_compute.addr, + /*.mem_buffer =*/ buf_compute.data, /*.no_alloc =*/ false, }; +#ifdef LLAMA_USE_ALLOCATOR + params.no_alloc = true; +#endif + struct ggml_context * ctx0 = ggml_init(params); ggml_cgraph * gf = ggml_new_graph(ctx0); - // for big prompts, if BLAS is enabled, it is better to use only one thread - // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance - n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : n_threads; - struct ggml_tensor * cur; struct ggml_tensor * inpL; if (tokens) { struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); + +#ifdef LLAMA_USE_ALLOCATOR + ggml_allocr_alloc(lctx.alloc, inp_tokens); + if (!ggml_allocr_is_measure(lctx.alloc)) { + memcpy(inp_tokens->data, tokens, N*ggml_element_size(inp_tokens)); + } +#else memcpy(inp_tokens->data, tokens, N*ggml_element_size(inp_tokens)); +#endif ggml_set_name(inp_tokens, "inp_tokens"); inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens); @@ -1568,7 +1651,15 @@ static bool llama_eval_internal( #endif inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N); + +#ifdef LLAMA_USE_ALLOCATOR + ggml_allocr_alloc(lctx.alloc, inpL); + if (!ggml_allocr_is_measure(lctx.alloc)) { + memcpy(inpL->data, embd, N * n_embd * ggml_element_size(inpL)); + } +#else memcpy(inpL->data, embd, N * n_embd * ggml_element_size(inpL)); +#endif } const int i_gpu_start = n_layer - n_gpu_layers; @@ -1595,6 +1686,17 @@ static bool llama_eval_internal( } #endif // GGML_USE_CUBLAS + struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1); +#ifdef LLAMA_USE_ALLOCATOR + ggml_allocr_alloc(lctx.alloc, KQ_scale); + if (!ggml_allocr_is_measure(lctx.alloc)) { + ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head)); + } +#else + ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd)/n_head)); +#endif + ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)"); + for (int il = 0; il < n_layer; ++il) { ggml_format_name(inpL, "layer_inp_%d", il); @@ -1690,9 +1792,6 @@ static bool llama_eval_internal( ggml_set_name(KQ, "KQ"); // KQ_scaled = KQ / sqrt(n_embd_head) - struct ggml_tensor * KQ_scale = ggml_new_f32(ctx0, 1.0f/sqrtf(float(n_embd)/n_head)); - ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)"); - // KQ_scaled shape [n_past + N, N, n_head, 1] struct ggml_tensor * KQ_scaled = ggml_scale_inplace(ctx0, KQ, KQ_scale); offload_func_kq(KQ_scaled); @@ -1808,9 +1907,6 @@ static bool llama_eval_internal( lctx.use_buf(ctx0, 0); - // used at the end to optionally extract the embeddings - struct ggml_tensor * embeddings = NULL; - // norm { cur = ggml_rms_norm(ctx0, inpL, rms_norm_eps); @@ -1821,8 +1917,6 @@ static bool llama_eval_internal( cur = ggml_mul(ctx0, cur, model.norm); // offload_func_nr(cur); // TODO CPU + GPU mirrored backend ggml_set_name(cur, "result_norm"); - - embeddings = cur; } // lm_head @@ -1834,23 +1928,103 @@ static bool llama_eval_internal( // logits -> probs //cur = ggml_soft_max_inplace(ctx0, cur); - // run the computation ggml_build_forward_expand(gf, cur); - // fprintf(stderr, "graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf.n_nodes, gf.n_leafs); + if (mem_per_token == 0) { + mem_per_token = ggml_used_mem(ctx0)/N; + } + +#if 0 + LLAMA_LOG_INFO("\n%s: used_mem: eval ctx %.3f MB, scratch %.3f MB %.3f MB, work buf %.3f MB, n_past = %d, N = %d\n", __func__, + ggml_used_mem(ctx0)/1024.0/1024.0, + lctx.get_buf_max_mem(0)/1024.0/1024.0, + lctx.get_buf_max_mem(1)/1024.0/1024.0, + lctx.work_buffer.size()/1024.0/1024.0, + n_past, N); +#endif + + ggml_free(ctx0); + + return gf; +} + +// evaluate the transformer +// +// - lctx: llama context +// - tokens: new batch of tokens to process +// - embd embeddings input +// - n_tokens number of tokens +// - n_past: the context size so far +// - n_threads: number of threads to use +// +static bool llama_eval_internal( + llama_context & lctx, + const llama_token * tokens, + const float * embd, + int n_tokens, + int n_past, + int n_threads, + const char * cgraph_fname) { + + GGML_ASSERT((!tokens && embd) || (tokens && !embd)); + + const int64_t t_start_us = ggml_time_us(); + +#ifdef GGML_USE_MPI + ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads); +#endif + + const int N = n_tokens; + + const auto & model = lctx.model; + const auto & hparams = model.hparams; + + const auto & kv_self = lctx.kv_self; + + GGML_ASSERT(!!kv_self.ctx); + + const int64_t n_embd = hparams.n_embd; + const int64_t n_vocab = hparams.n_vocab; + +#ifdef LLAMA_USE_ALLOCATOR + ggml_allocr_reset(lctx.alloc); +#endif + + ggml_cgraph * gf = llama_build_graph(lctx, tokens, embd, n_tokens, n_past); + +#ifdef LLAMA_USE_ALLOCATOR + ggml_allocr_alloc_graph(lctx.alloc, gf); +#endif + + // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs); + + // for big prompts, if BLAS is enabled, it is better to use only one thread + // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance + n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : n_threads; + + struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1]; + struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2]; + + GGML_ASSERT(strcmp(res->name, "result_output") == 0); + GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0); #if GGML_USE_MPI + const int64_t n_layer = hparams.n_layer; ggml_mpi_graph_compute_pre(lctx.ctx_mpi, gf, n_layer); #endif #ifdef GGML_USE_METAL if (lctx.ctx_metal && N == 1) { - if (!ggml_metal_if_optimized(lctx.ctx_metal)) { - ggml_metal_graph_find_concurrency(lctx.ctx_metal, gf); - } + // TODO: disabled until #2413 is resolved + //if (!ggml_metal_if_optimized(lctx.ctx_metal)) { + // ggml_metal_graph_find_concurrency(lctx.ctx_metal, gf); + //} ggml_metal_set_n_cb (lctx.ctx_metal, n_threads); ggml_metal_graph_compute(lctx.ctx_metal, gf); - ggml_metal_get_tensor (lctx.ctx_metal, cur); + ggml_metal_get_tensor (lctx.ctx_metal, res); + if (!lctx.embedding.empty()) { + ggml_metal_get_tensor(lctx.ctx_metal, embeddings); + } } else { // IMPORTANT: // Since we don't have efficient Matrix x Matrix Metal multiplication yet, we fallback to vanilla @@ -1881,8 +2055,6 @@ static bool llama_eval_internal( // update kv token count lctx.kv_self.n = n_past + N; - struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1]; - if (cgraph_fname) { ggml_graph_export(gf, cgraph_fname); } @@ -1920,21 +2092,6 @@ static bool llama_eval_internal( memcpy(embedding_out.data(), (float *) ggml_get_data(embeddings) + (n_embd*(N - 1)), sizeof(float)*n_embd); } - if (mem_per_token == 0) { - mem_per_token = ggml_used_mem(ctx0)/N; - } - -#if 0 - printf("\n%s: used_mem: eval ctx %.3f MB, scratch %.3f MB %.3f MB, work buf %.3f MB, n_past = %d, N = %d\n", __func__, - ggml_used_mem(ctx0)/1024.0/1024.0, - lctx.get_buf_max_mem(0)/1024.0/1024.0, - lctx.get_buf_max_mem(1)/1024.0/1024.0, - lctx.work_buffer.size()/1024.0/1024.0, - n_past, N); -#endif - - ggml_free(ctx0); - // measure the performance only for the single-token evals if (N == 1) { lctx.t_eval_us += ggml_time_us() - t_start_us; @@ -2026,7 +2183,7 @@ struct llama_tokenizer { left_sym.n += right_sym.n; right_sym.n = 0; - //printf("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size); + //LLAMA_LOG_INFO("left = '%*s' size = %zu\n", (int) left_sym.n, left_sym.text, bigram.size); // remove the right sym from the chain left_sym.next = right_sym.next; @@ -2046,7 +2203,9 @@ struct llama_tokenizer { if (token == vocab_.token_to_id.end()) { // output any symbols that did not form tokens as bytes. for (int j = 0; j < (int) symbol.n; ++j) { - llama_vocab::id token_id = static_cast(symbol.text[j]) + 3; + // NOTE: old version, before #2420 - not sure what are the implications of this + //llama_vocab::id token_id = static_cast(symbol.text[j]) + 3; + llama_vocab::id token_id = vocab_.token_to_id.at(std::string(1, symbol.text[j])); output.push_back(token_id); } } else { @@ -2904,11 +3063,11 @@ void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar // quantization // -static void llama_convert_tensor_internal(const gguf_load_tensor & tensor, gguf_buffer & output, const int nelements, const int nthread) { - if (output.size < nelements * sizeof(float)) { - output.resize(nelements * sizeof(float)); +static void llama_convert_tensor_internal(const gguf_load_tensor & tensor, std::vector & output, const size_t nelements, const int nthread) { + if (output.size() < nelements) { + output.resize(nelements); } - float * f32_output = (float *) output.addr; + float * f32_output = (float *) output.data(); ggml_type_traits_t qtype; if (ggml_is_quantized(tensor.type)) { @@ -3026,13 +3185,16 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s }; size_t idx = 0; + + std::vector read_data; + std::vector work; + for (gguf_load_tensor & tensor : model_loader->tensors_map.tensors) { - gguf_buffer read_data; read_data.resize(tensor.size); - tensor.data = read_data.addr; + tensor.data = read_data.data(); model_loader->load_data_for(tensor); - printf("[%4zu/%4zu] %36s - %16s, type = %6s, ", + LLAMA_LOG_INFO("[%4zu/%4zu] %36s - %16s, type = %6s, ", ++idx, model_loader->tensors_map.tensors.size(), tensor.name.c_str(), llama_format_tensor_shape(tensor.ne).c_str(), ggml_type_name(tensor.type)); @@ -3048,13 +3210,12 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s enum ggml_type new_type; void * new_data; size_t new_size; - gguf_buffer work; if (!quantize) { new_type = tensor.type; new_data = tensor.data; new_size = tensor.size; - printf("size = %8.3f MB\n", tensor.size/1024.0/1024.0); + LLAMA_LOG_INFO("size = %8.3f MB\n", tensor.size/1024.0/1024.0); } else { new_type = quantized_type; #ifdef GGML_USE_K_QUANTS @@ -3089,26 +3250,27 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s int nx = tensor.ne.at(0); int ny = tensor.ne.at(1); if (nx % QK_K != 0 || ny % QK_K != 0) { - fprintf(stderr, "\n\nTensor sizes %d x %d are not divisible by %d, required for k-quants.\n",nx,ny,QK_K); + LLAMA_LOG_INFO("\n\nTensor sizes %d x %d are not divisible by %d, required for k-quants.\n",nx,ny,QK_K); convert_incompatible_tensor = true; } } if (convert_incompatible_tensor) { if (tensor.name == "output.weight") { new_type = GGML_TYPE_F16; //fall back to F16 instead of just failing. - fprintf(stderr, "F16 will be used for this tensor instead.\n"); + LLAMA_LOG_WARN("F16 will be used for this tensor instead.\n"); } else if (tensor.name == "tok_embeddings.weight") { new_type = GGML_TYPE_Q4_0; //fall back to Q4_0 instead of just failing. - fprintf(stderr, "Q4_0 will be used for this tensor instead.\n"); + LLAMA_LOG_WARN("Q4_0 will be used for this tensor instead.\n"); } else { throw std::runtime_error("Unsupported tensor size encountered\n"); } } #endif + const size_t nelements = tensor.ne.at(0) * tensor.ne.at(1); + float * f32_data; - size_t nelements = tensor.ne.at(0) * tensor.ne.at(1); - gguf_buffer f32_conv_buf; + std::vector f32_conv_buf; if (tensor.type == GGML_TYPE_F32) { f32_data = (float *) tensor.data; @@ -3116,17 +3278,17 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s throw std::runtime_error(format("requantizing from type %s is disabled", ggml_type_name(tensor.type))); } else { llama_convert_tensor_internal(tensor, f32_conv_buf, nelements, nthread); - f32_data = (float *) f32_conv_buf.addr; + f32_data = (float *) f32_conv_buf.data(); } - printf("quantizing to %s .. ", ggml_type_name(new_type)); + LLAMA_LOG_INFO("quantizing to %s .. ", ggml_type_name(new_type)); fflush(stdout); work.resize(nelements * 4); // upper bound on size - new_data = work.addr; + new_data = work.data(); std::vector hist_cur(1 << 4, 0); - int chunk_size = 32 * 512; + const int chunk_size = 32 * 512; const int nchunk = (nelements + chunk_size - 1)/chunk_size; const int nthread_use = nthread > 1 ? std::max(1, std::min(nthread, nchunk)) : 1; if (nthread_use < 2) { @@ -3134,7 +3296,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } else { size_t counter = 0; new_size = 0; - auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements, chunk_size] () { + auto compute = [&mutex, &counter, &hist_cur, &new_size, new_type, f32_data, new_data, nelements] () { std::vector local_hist; size_t local_size = 0; while (true) { @@ -3169,7 +3331,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } } - printf("size = %8.2f MB -> %8.2f MB | hist: ", tensor.size/1024.0/1024.0, new_size/1024.0/1024.0); + LLAMA_LOG_INFO("size = %8.2f MB -> %8.2f MB | hist: ", tensor.size/1024.0/1024.0, new_size/1024.0/1024.0); int64_t tot_count = 0; for (size_t i = 0; i < hist_cur.size(); i++) { hist_all[i] += hist_cur[i]; @@ -3178,18 +3340,18 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s if (tot_count > 0) { for (size_t i = 0; i < hist_cur.size(); i++) { - printf("%5.3f ", hist_cur[i] / float(nelements)); + LLAMA_LOG_INFO("%5.3f ", hist_cur[i] / float(nelements)); } } - printf("\n"); + LLAMA_LOG_INFO("\n"); } total_size_org += tensor.size; total_size_new += new_size; file_saver.write_tensor(tensor, new_type, new_data, new_size); } - printf("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); - printf("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0); + LLAMA_LOG_INFO("%s: model size = %8.2f MB\n", __func__, total_size_org/1024.0/1024.0); + LLAMA_LOG_INFO("%s: quant size = %8.2f MB\n", __func__, total_size_new/1024.0/1024.0); { int64_t sum_all = 0; @@ -3198,17 +3360,15 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s } if (sum_all > 0) { - printf("%s: hist: ", __func__); + LLAMA_LOG_INFO("%s: hist: ", __func__); for (size_t i = 0; i < hist_all.size(); i++) { - printf("%5.3f ", hist_all[i] / float(sum_all)); + LLAMA_LOG_INFO("%5.3f ", hist_all[i] / float(sum_all)); } - printf("\n"); + LLAMA_LOG_INFO("\n"); } } } - - // // interface implementation // @@ -3222,12 +3382,12 @@ struct llama_model * llama_load_model_from_file( ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32; - if (!llama_model_load(path_model, *model, model->vocab, params.n_ctx, params.n_batch, params.n_gqa, params.rms_norm_eps, params.n_gpu_layers, - params.main_gpu, params.tensor_split, params.rope_freq_base, params.rope_freq_scale,params.low_vram, - memory_type, params.use_mmap, params.use_mlock, params.vocab_only, params.progress_callback, - params.progress_callback_user_data)) { + if (!llama_model_load(path_model, *model, model->vocab, params.n_ctx, params.n_batch, params.n_gpu_layers, + params.main_gpu, params.tensor_split, params.mul_mat_q, params.rope_freq_base, params.rope_freq_scale, + params.low_vram, memory_type, params.use_mmap, params.use_mlock, params.vocab_only, + params.progress_callback, params.progress_callback_user_data)) { + LLAMA_LOG_ERROR("%s: failed to load model\n", __func__); delete model; - fprintf(stderr, "%s: failed to load model\n", __func__); return nullptr; } @@ -3260,10 +3420,9 @@ struct llama_context * llama_new_context_with_model( unsigned percentage = (unsigned) (100 * progress); while (percentage > *cur_percentage_p) { *cur_percentage_p = percentage; - fprintf(stderr, "."); - fflush(stderr); + LLAMA_LOG_INFO("."); if (percentage >= 100) { - fprintf(stderr, "\n"); + LLAMA_LOG_INFO("\n"); } } }; @@ -3277,14 +3436,14 @@ struct llama_context * llama_new_context_with_model( // reserve memory for context buffers if (!params.vocab_only) { if (!kv_cache_init(ctx->model.hparams, ctx->kv_self, memory_type, ctx->model.hparams.n_ctx, params.n_gpu_layers)) { - fprintf(stderr, "%s: kv_cache_init() failed for self-attention cache\n", __func__); + LLAMA_LOG_ERROR("%s: kv_cache_init() failed for self-attention cache\n", __func__); llama_free(ctx); return nullptr; } { const size_t memory_size = ggml_nbytes(ctx->kv_self.k) + ggml_nbytes(ctx->kv_self.v); - fprintf(stderr, "%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0); + LLAMA_LOG_INFO("%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0); } const auto & hparams = ctx->model.hparams; @@ -3300,10 +3459,47 @@ struct llama_context * llama_new_context_with_model( ctx->embedding.resize(hparams.n_embd); } - ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type) + ggml_graph_overhead()); +#ifdef LLAMA_USE_ALLOCATOR + { + static const size_t tensor_alignment = 32; + // the compute buffer is used to store the tensor and graph structs, while the allocator buffer is used for the tensor data + ctx->buf_compute.resize(ggml_tensor_overhead()*GGML_MAX_NODES + ggml_graph_overhead()); + // create measure allocator + ctx->alloc = ggml_allocr_new_measure(tensor_alignment); + + // build worst-case graph + int n_tokens = std::min((int)hparams.n_ctx, params.n_batch); + int n_past = hparams.n_ctx - n_tokens; + llama_token token = llama_token_bos(); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph + ggml_cgraph * gf = llama_build_graph(*ctx, &token, NULL, n_tokens, n_past); + + // measure memory requirements for the graph + size_t alloc_size = ggml_allocr_alloc_graph(ctx->alloc, gf) + tensor_alignment; + + LLAMA_LOG_INFO("%s: compute buffer total size = %7.2f MB\n", __func__, (ctx->buf_compute.size + alloc_size) / 1024.0 / 1024.0); + + // debug - for comparison with scratch buffer + //size_t prev_req = + // MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type) + + // MEM_REQ_SCRATCH1().at(ctx->model.type) + + // MEM_REQ_EVAL().at(ctx->model.type); + //LLAMA_LOG_INFO("%s: (debug) equivalent with scratch buffer = %7.2f MB\n", __func__, prev_req / 1024.0 / 1024.0); + + // recreate allocator with exact memory requirements + ggml_allocr_free(ctx->alloc); + + ctx->buf_alloc.resize(alloc_size); + ctx->alloc = ggml_allocr_new(ctx->buf_alloc.data, ctx->buf_alloc.size, tensor_alignment); + } +#else + ctx->buf_compute.resize(MEM_REQ_EVAL().at(ctx->model.type) + ggml_graph_overhead()); +#endif + +#ifdef LLAMA_USE_SCRATCH ctx->buf_scratch[0].resize(MEM_REQ_SCRATCH0(hparams.n_ctx).at(ctx->model.type)); ctx->buf_scratch[1].resize(MEM_REQ_SCRATCH1().at(ctx->model.type)); +#endif } #ifdef GGML_USE_METAL @@ -3324,22 +3520,22 @@ struct llama_context * llama_new_context_with_model( const size_t max_size = ggml_get_max_tensor_size(ctx->model.ctx); - fprintf(stderr, "%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0); + LLAMA_LOG_INFO("%s: max tensor size = %8.2f MB\n", __func__, max_size/1024.0/1024.0); -#define LLAMA_METAL_CHECK_BUF(result) \ - if (!(result)) { \ - fprintf(stderr, "%s: failed to add buffer\n", __func__); \ - llama_free(ctx); \ - return NULL; \ +#define LLAMA_METAL_CHECK_BUF(result) \ + if (!(result)) { \ + LLAMA_LOG_ERROR("%s: failed to add buffer\n", __func__); \ + llama_free(ctx); \ + return NULL; \ } LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "data", data_ptr, data_size, max_size)); - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "eval", ctx->buf_compute.addr, ctx->buf_compute.size, 0)); - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.addr, ctx->kv_self.buf.size, 0)); + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "eval", ctx->buf_compute.data, ctx->buf_compute.size, 0)); + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "kv", ctx->kv_self.buf.data, ctx->kv_self.buf.size, 0)); - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr0", ctx->buf_scratch[0].addr, ctx->buf_scratch[0].size, 0)); - LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr1", ctx->buf_scratch[1].addr, ctx->buf_scratch[1].size, 0)); + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr0", ctx->buf_scratch[0].data, ctx->buf_scratch[0].size, 0)); + LLAMA_METAL_CHECK_BUF(ggml_metal_add_buffer(ctx->ctx_metal, "scr1", ctx->buf_scratch[1].data, ctx->buf_scratch[1].size, 0)); #undef LLAMA_METAL_CHECK_BUF } #endif @@ -3373,9 +3569,6 @@ struct llama_context * llama_init_from_file( } void llama_free(struct llama_context * ctx) { - if (ctx->model_owner) { - delete &ctx->model; - } delete ctx; } @@ -3387,19 +3580,19 @@ int llama_model_quantize( llama_model_quantize_internal(fname_inp, fname_out, params); return 0; } catch (const std::exception & err) { - fprintf(stderr, "%s: failed to quantize: %s\n", __func__, err.what()); + LLAMA_LOG_ERROR("%s: failed to quantize: %s\n", __func__, err.what()); return 1; } } int llama_apply_lora_from_file_internal(const struct llama_model & model, const char * path_lora, const char * path_base_model, int n_threads) { - fprintf(stderr, "%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora); + LLAMA_LOG_INFO("%s: applying lora adapter from '%s' - please wait ...\n", __func__, path_lora); const int64_t t_start_lora_us = ggml_time_us(); auto fin = std::ifstream(path_lora, std::ios::binary); if (!fin) { - fprintf(stderr, "%s: failed to open '%s'\n", __func__, path_lora); + LLAMA_LOG_ERROR("%s: failed to open '%s'\n", __func__, path_lora); return 1; } @@ -3411,7 +3604,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const fin.read((char *) &format_version, sizeof(format_version)); if (format_version != 1) { - fprintf(stderr, "%s: unsupported file version\n", __func__ ); + LLAMA_LOG_ERROR("%s: unsupported file version\n", __func__ ); return 1; } } @@ -3422,8 +3615,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const fin.read((char *) &lora_alpha, sizeof(lora_alpha)); float scaling = (float)lora_alpha / (float)lora_r; - fprintf(stderr, "%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling); - + LLAMA_LOG_INFO("%s: r = %d, alpha = %d, scaling = %.2f\n", __func__, lora_r, lora_alpha, scaling); // create a temporary ggml context to store the lora tensors // todo: calculate size from biggest possible tensor @@ -3442,13 +3634,12 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const model_tensors.insert(kv); } - // load base model std::unique_ptr model_loader; ggml_context * base_ctx = NULL; - gguf_buffer base_buf; + std::vector base_buf; if (path_base_model) { - fprintf(stderr, "%s: loading base model from '%s'\n", __func__, path_base_model); + LLAMA_LOG_INFO("%s: loading base model from '%s'\n", __func__, path_base_model); model_loader.reset(new llama_model_loader(path_base_model, /*use_mmap*/ true)); size_t ctx_size; @@ -3457,8 +3648,8 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const base_buf.resize(ctx_size); ggml_init_params base_params; - base_params.mem_size = base_buf.size; - base_params.mem_buffer = base_buf.addr; + base_params.mem_size = base_buf.size(); + base_params.mem_buffer = base_buf.data(); base_params.no_alloc = model_loader->use_mmap; base_ctx = ggml_init(base_params); @@ -3505,17 +3696,17 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const const std::string lora_suffix = ".lora"; size_t pos = name.rfind(lora_suffix); if (pos == std::string::npos) { - fprintf(stderr, "%s: error: '%s' is not a lora tensor\n", __func__, name.c_str()); + LLAMA_LOG_ERROR("%s: error: '%s' is not a lora tensor\n", __func__, name.c_str()); return 1; } std::string lora_type = name.substr(pos + lora_suffix.length()); std::string base_name = name; base_name.erase(pos); - // fprintf(stderr, "%s: %s => %s (lora type %s) ", __func__, name.c_str(),base_name.c_str(), lora_type.c_str()); + // LLAMA_LOG_INFO("%s: %s => %s (lora type %s) \n", __func__, name.c_str(),base_name.c_str(), lora_type.c_str()); if (model_tensors.find(base_name) == model_tensors.end()) { - fprintf(stderr, "%s: unknown tensor '%s' in lora adapter\n", __func__, name.data()); + LLAMA_LOG_ERROR("%s: unknown tensor '%s' in lora adapter\n", __func__, name.data()); return 1; } @@ -3526,7 +3717,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const case 1: wtype = GGML_TYPE_F16; break; default: { - fprintf(stderr, "%s: invalid tensor data type '%d'\n", + LLAMA_LOG_ERROR("%s: invalid tensor data type '%d'\n", __func__, ftype); return false; } @@ -3536,7 +3727,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const lora_tensor = ggml_new_tensor_2d(lora_ctx, wtype, ne[0], ne[1]); } else { - fprintf(stderr, "%s: unsupported tensor dimension %d\n", __func__, n_dims); + LLAMA_LOG_ERROR("%s: unsupported tensor dimension %d\n", __func__, n_dims); return 1; } ggml_set_name(lora_tensor, "lora_tensor"); @@ -3574,7 +3765,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const if (model_loader) { // load from base model if (model_loader->tensors_map.name_to_idx.find(base_name) == model_loader->tensors_map.name_to_idx.end()) { - fprintf(stderr, "%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str()); + LLAMA_LOG_ERROR("%s: error: tensor '%s' not found in base model\n", __func__, base_name.c_str()); return 1; } size_t idx = model_loader->tensors_map.name_to_idx[base_name]; @@ -3590,8 +3781,8 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const if (ggml_is_quantized(base_t->type)) { if (!warned) { - fprintf(stderr, "%s: warning: using a lora adapter with a quantized model may result in poor quality, " - "use a f16 or f32 base model with --lora-base\n", __func__); + LLAMA_LOG_WARN("%s: warning: using a lora adapter with a quantized model may result in poor quality, " + "use a f16 or f32 base model with --lora-base\n", __func__); warned = true; } } @@ -3605,8 +3796,8 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const ggml_set_name(loraB, "loraB"); if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) { - fprintf(stderr, "%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");" - " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]); + LLAMA_LOG_ERROR("%s: incompatible tensor dimensions (%" PRId64 " and %" PRId64 ");" + " are you sure that this adapter is for this model?\n", __func__, base_t->ne[0], loraA->ne[1]); return 1; } @@ -3651,7 +3842,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const n_tensors++; if (n_tensors % 4 == 0) { - fprintf(stderr, "."); + LLAMA_LOG_INFO("."); } } } @@ -3663,7 +3854,7 @@ int llama_apply_lora_from_file_internal(const struct llama_model & model, const } const int64_t t_lora_us = ggml_time_us() - t_start_lora_us; - fprintf(stderr, " done (%.2f ms)\n", t_lora_us / 1000.0); + LLAMA_LOG_INFO(" done (%.2f ms)\n", t_lora_us / 1000.0); return 0; } @@ -3672,7 +3863,7 @@ int llama_apply_lora_from_file(struct llama_context * ctx, const char * path_lor try { return llama_apply_lora_from_file_internal(ctx->model, path_lora, path_base_model, n_threads); } catch (const std::exception & err) { - fprintf(stderr, "%s: failed to apply lora adapter: %s\n", __func__, err.what()); + LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what()); return 1; } } @@ -3681,7 +3872,7 @@ int llama_model_apply_lora_from_file(const struct llama_model * model, const cha try { return llama_apply_lora_from_file_internal(*model, path_lora, path_base_model, n_threads); } catch (const std::exception & err) { - fprintf(stderr, "%s: failed to apply lora adapter: %s\n", __func__, err.what()); + LLAMA_LOG_ERROR("%s: failed to apply lora adapter: %s\n", __func__, err.what()); return 1; } } @@ -3730,10 +3921,60 @@ size_t llama_get_state_size(const struct llama_context * ctx) { return s_total; } -// Copies the state to the specified destination address -size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) { - uint8_t * out = dst; +// llama_context_data +struct llama_data_context { + virtual void write(const void * src, size_t size) = 0; + virtual size_t get_size_written() = 0; + virtual ~llama_data_context() = default; +}; +struct llama_data_buffer_context : llama_data_context { + uint8_t * ptr; + size_t size_written = 0; + + llama_data_buffer_context(uint8_t * p) : ptr(p) {} + + void write(const void * src, size_t size) override { + memcpy(ptr, src, size); + ptr += size; + size_written += size; + } + + size_t get_size_written() override { + return size_written; + } +}; + +struct llama_data_file_context : llama_data_context { + FILE * file; + size_t size_written = 0; + + llama_data_file_context(FILE * f) : file(f) {} + + void write(const void * src, size_t size) override { + fwrite(src, size, 1, file); + size_written += size; + } + + size_t get_size_written() override { + return size_written; + } +}; + +/** copy state data into either a buffer or file depending on the passed in context + * + * file context: + * llama_file file("/path", "wb"); + * llama_data_file_context data_ctx(&file); + * llama_copy_state_data(ctx, &data_ctx); + * + * buffer context: + * std::vector buf(max_size, 0); + * llama_data_buffer_context data_ctx(&buf.data()); + * llama_copy_state_data(ctx, &data_ctx); + * +*/ +void llama_copy_state_data_internal(struct llama_context * ctx, llama_data_context * data_ctx) { // copy rng { std::stringstream rng_ss; @@ -3745,8 +3986,8 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) { memset(&rng_buf[0], 0, LLAMA_MAX_RNG_STATE); memcpy(&rng_buf[0], rng_ss.str().data(), rng_ss.str().size()); - memcpy(out, &rng_size, sizeof(rng_size)); out += sizeof(rng_size); - memcpy(out, &rng_buf[0], LLAMA_MAX_RNG_STATE); out += LLAMA_MAX_RNG_STATE; + data_ctx->write(&rng_size, sizeof(rng_size)); + data_ctx->write(&rng_buf[0], LLAMA_MAX_RNG_STATE); } // copy logits @@ -3754,25 +3995,29 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) { const size_t logits_cap = ctx->logits.capacity(); const size_t logits_size = ctx->logits.size(); - memcpy(out, &logits_cap, sizeof(logits_cap)); out += sizeof(logits_cap); - memcpy(out, &logits_size, sizeof(logits_size)); out += sizeof(logits_size); + data_ctx->write(&logits_cap, sizeof(logits_cap)); + data_ctx->write(&logits_size, sizeof(logits_size)); if (logits_size) { - memcpy(out, ctx->logits.data(), logits_size * sizeof(float)); + data_ctx->write(ctx->logits.data(), logits_size * sizeof(float)); } - out += logits_cap * sizeof(float); + // If there is a gap between the size and the capacity, write padding + size_t padding_size = (logits_cap - logits_size) * sizeof(float); + if (padding_size > 0) { + std::vector padding(padding_size, 0); // Create a buffer filled with zeros + data_ctx->write(padding.data(), padding_size); + } } // copy embeddings { const size_t embedding_size = ctx->embedding.size(); - memcpy(out, &embedding_size, sizeof(embedding_size)); out += sizeof(embedding_size); + data_ctx->write(&embedding_size, sizeof(embedding_size)); if (embedding_size) { - memcpy(out, ctx->embedding.data(), embedding_size * sizeof(float)); - out += embedding_size * sizeof(float); + data_ctx->write(ctx->embedding.data(), embedding_size * sizeof(float)); } } @@ -3781,14 +4026,14 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) { const auto & kv_self = ctx->kv_self; const auto & hparams = ctx->model.hparams; const int n_layer = hparams.n_layer; - const int n_embd = hparams.n_embd; + const int n_embd = hparams.n_embd_gqa(); const int n_ctx = hparams.n_ctx; const size_t kv_size = kv_self.buf.size; const int kv_ntok = llama_get_kv_cache_token_count(ctx); - memcpy(out, &kv_size, sizeof(kv_size)); out += sizeof(kv_size); - memcpy(out, &kv_ntok, sizeof(kv_ntok)); out += sizeof(kv_ntok); + data_ctx->write(&kv_size, sizeof(kv_size)); + data_ctx->write(&kv_ntok, sizeof(kv_ntok)); if (kv_size) { const size_t elt_size = ggml_element_size(kv_self.k); @@ -3797,12 +4042,12 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) { ggml_cgraph gf{}; ggml_tensor * kout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.k->type, n_embd, kv_ntok, n_layer); - kout3d->data = out; - out += ggml_nbytes(kout3d); + std::vector kout3d_data(ggml_nbytes(kout3d), 0); + kout3d->data = kout3d_data.data(); ggml_tensor * vout3d = ggml_new_tensor_3d(cpy_ctx, kv_self.v->type, kv_ntok, n_embd, n_layer); - vout3d->data = out; - out += ggml_nbytes(vout3d); + std::vector vout3d_data(ggml_nbytes(vout3d), 0); + vout3d->data = vout3d_data.data(); ggml_tensor * k3d = ggml_view_3d(cpy_ctx, kv_self.k, n_embd, kv_ntok, n_layer, @@ -3817,15 +4062,20 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) { ggml_graph_compute_helper(ctx->work_buffer, &gf, /*n_threads*/ 1); ggml_free(cpy_ctx); + + // our data is now in the kout3d_data and vout3d_data buffers + // write them to file + data_ctx->write(kout3d_data.data(), kout3d_data.size()); + data_ctx->write(vout3d_data.data(), vout3d_data.size()); } } +} - const size_t written = out - dst; - const size_t max_size = llama_get_state_size(ctx); +size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) { + llama_data_buffer_context data_ctx(dst); + llama_copy_state_data_internal(ctx, &data_ctx); - GGML_ASSERT(written <= max_size); - - return written; + return data_ctx.get_size_written(); } // Sets the state reading from the specified source address @@ -3884,7 +4134,7 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) { const auto & kv_self = ctx->kv_self; const auto & hparams = ctx->model.hparams; const int n_layer = hparams.n_layer; - const int n_embd = hparams.n_embd; + const int n_embd = hparams.n_embd_gqa(); const int n_ctx = hparams.n_ctx; size_t kv_size; @@ -3952,7 +4202,7 @@ bool llama_load_session_file(struct llama_context * ctx, const char * path_sessi try { return llama_load_session_file_internal(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out); } catch (const std::exception & err) { - fprintf(stderr, "error loading session file: %s\n", err.what()); + LLAMA_LOG_ERROR("error loading session file: %s\n", err.what()); return false; } } @@ -3975,7 +4225,7 @@ int llama_eval( int n_past, int n_threads) { if (!llama_eval_internal(*ctx, tokens, nullptr, n_tokens, n_past, n_threads, nullptr)) { - fprintf(stderr, "%s: failed to eval\n", __func__); + LLAMA_LOG_ERROR("%s: failed to eval\n", __func__); return 1; } @@ -3997,7 +4247,7 @@ int llama_eval_embd( int n_past, int n_threads) { if (!llama_eval_internal(*ctx, nullptr, embd, n_tokens, n_past, n_threads, nullptr)) { - fprintf(stderr, "%s: failed to eval\n", __func__); + LLAMA_LOG_ERROR("%s: failed to eval\n", __func__); return 1; } @@ -4018,7 +4268,7 @@ int llama_eval_export(struct llama_context * ctx, const char * fname) { const std::vector tmp(n_batch, llama_token_bos()); if (!llama_eval_internal(*ctx, tmp.data(), nullptr, tmp.size(), n_ctx, 1, fname)) { - fprintf(stderr, "%s: failed to eval\n", __func__); + LLAMA_LOG_ERROR("%s: failed to eval\n", __func__); return 1; } @@ -4034,7 +4284,7 @@ int llama_tokenize_with_model( auto res = llama_tokenize(model->vocab, text, add_bos); if (n_max_tokens < (int) res.size()) { - fprintf(stderr, "%s: too many tokens\n", __func__); + LLAMA_LOG_ERROR("%s: too many tokens\n", __func__); return -((int) res.size()); } @@ -4151,15 +4401,15 @@ struct llama_timings llama_get_timings(struct llama_context * ctx) { void llama_print_timings(struct llama_context * ctx) { const llama_timings timings = llama_get_timings(ctx); - fprintf(stderr, "\n"); - fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, timings.t_load_ms); - fprintf(stderr, "%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n", + LLAMA_LOG_INFO("\n"); + LLAMA_LOG_INFO("%s: load time = %8.2f ms\n", __func__, timings.t_load_ms); + LLAMA_LOG_INFO("%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n", __func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample); - fprintf(stderr, "%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n", + LLAMA_LOG_INFO("%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n", __func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval); - fprintf(stderr, "%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n", + LLAMA_LOG_INFO("%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n", __func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval); - fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms)); + LLAMA_LOG_INFO("%s: total time = %8.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms)); } void llama_reset_timings(struct llama_context * ctx) { @@ -4195,3 +4445,44 @@ const char * llama_print_system_info(void) { const std::vector>& llama_internal_get_tensor_map(struct llama_context * ctx) { return ctx->model.tensors_by_name; } + + +void llama_log_set(llama_log_callback log_callback, void * user_data) { + g_state.log_callback = log_callback ? log_callback : llama_log_callback_default; + g_state.log_callback_user_data = user_data; +} + +#if defined(_MSC_VER) && !defined(vsnprintf) +#define vsnprintf _vsnprintf +#endif + +static void llama_log_internal_v(llama_log_level level, const char * format, va_list args) { + va_list args_copy; + va_copy(args_copy, args); + char buffer[128]; + int len = vsnprintf(buffer, 128, format, args); + if (len < 128) { + g_state.log_callback(level, buffer, g_state.log_callback_user_data); + } else { + char* buffer2 = new char[len+1]; + vsnprintf(buffer2, len+1, format, args_copy); + buffer2[len] = 0; + g_state.log_callback(level, buffer2, g_state.log_callback_user_data); + delete[] buffer2; + } + va_end(args_copy); +} + +static void llama_log_internal(llama_log_level level, const char * format, ...) { + va_list args; + va_start(args, format); + llama_log_internal_v(level, format, args); + va_end(args); +} + +static void llama_log_callback_default(llama_log_level level, const char * text, void * user_data) { + (void) level; + (void) user_data; + fputs(text, stderr); + fflush(stderr); +} diff --git a/gguf-llama.h b/gguf-llama.h index 540167bd1..a8ed69d91 100644 --- a/gguf-llama.h +++ b/gguf-llama.h @@ -41,10 +41,6 @@ #define LLAMA_SUPPORTS_GPU_OFFLOAD #endif -#ifndef LLAMA_DEFAULT_RMS_EPS -#define LLAMA_DEFAULT_RMS_EPS 5e-6f -#endif - #ifdef __cplusplus extern "C" { #endif @@ -74,12 +70,23 @@ extern "C" { typedef void (*llama_progress_callback)(float progress, void *ctx); - struct llama_context_params { + enum llama_log_level { + LLAMA_LOG_LEVEL_ERROR = 2, + LLAMA_LOG_LEVEL_WARN = 3, + LLAMA_LOG_LEVEL_INFO = 4 + }; + + // Signature for logging events + // Note that text includes the new line character at the end for most events. + // If your logging mechanism cannot handle that, check if the last character is '\n' and strip it + // if it exists. + // It might not exist for progress report where '.' is output repeatedly. + typedef void (*llama_log_callback)(enum llama_log_level level, const char * text, void * user_data); + + struct llama_context_params { uint32_t seed; // RNG seed, -1 for random int32_t n_ctx; // text context int32_t n_batch; // prompt processing batch size - int32_t n_gqa; // grouped-query attention (TEMP - will be moved to model hparams) - float rms_norm_eps; // rms norm epsilon (TEMP - will be moved to model hparams) int32_t n_gpu_layers; // number of layers to store in VRAM int32_t main_gpu; // the GPU that is used for scratch and small tensors @@ -96,6 +103,7 @@ extern "C" { // Keep the booleans together to avoid misalignment during copy-by-value. bool low_vram; // if true, reduce VRAM usage at the cost of performance + bool mul_mat_q; // if true, use experimental mul_mat_q kernels bool f16_kv; // use fp16 for KV cache bool logits_all; // the llama_eval() call computes all logits, not just the last one bool vocab_only; // only load the vocabulary, no weights @@ -129,7 +137,7 @@ extern "C" { // model quantization parameters typedef struct llama_model_quantize_params { int nthread; // number of threads to use for quantizing, if <=0 will use std::thread::hardware_concurrency() - enum llama_ftype ftype; // quantize to this llama_ftype + enum llama_ftype ftype; // quantize to this llama_ftype bool allow_requantize; // allow quantizing non-f32/f16 tensors bool quantize_output_tensor; // quantize output.weight } llama_model_quantize_params; @@ -182,6 +190,10 @@ extern "C" { int32_t n_eval; }; + // Set callback for all future logging events. + // If this is not called, or NULL is supplied, everything is output on stderr. + LLAMA_API void llama_log_set(llama_log_callback log_callback, void * user_data); + LLAMA_API int llama_max_devices(); LLAMA_API struct llama_context_params llama_context_default_params(); diff --git a/gguf-util.h b/gguf-util.h index 774ae57ee..d8557d94f 100644 --- a/gguf-util.h +++ b/gguf-util.h @@ -64,13 +64,6 @@ static std::string format(const char * fmt, ...) { return std::string(buf.data(), size); } -template -static std::string to_string(const T & val) { - std::stringstream ss; - ss << val; - return ss.str(); -} - // TODO: can we merge this one and gguf_context? struct gguf_file { // use FILE * so we don't have to re-open the file to mmap @@ -474,94 +467,4 @@ struct gguf_mlock { #endif }; -// Replacement for std::vector that doesn't require zero-initialization. -struct gguf_buffer { - uint8_t * addr = NULL; - size_t size = 0; - - gguf_buffer() = default; - - void resize(size_t len) { -#ifdef GGML_USE_METAL - free(addr); - int result = posix_memalign((void **) &addr, getpagesize(), len); - if (result == 0) { - memset(addr, 0, len); - } - else { - addr = NULL; - } -#else - delete[] addr; - addr = new uint8_t[len]; -#endif - size = len; - } - - ~gguf_buffer() { -#ifdef GGML_USE_METAL - free(addr); -#else - delete[] addr; -#endif - addr = NULL; - } - - // disable copy and move - gguf_buffer(const gguf_buffer&) = delete; - gguf_buffer(gguf_buffer&&) = delete; - gguf_buffer& operator=(const gguf_buffer&) = delete; - gguf_buffer& operator=(gguf_buffer&&) = delete; -}; - -#ifdef GGML_USE_CUBLAS -#include "ggml-cuda.h" -struct gguf_ctx_buffer { - uint8_t * addr = NULL; - bool is_cuda; - size_t size = 0; - - gguf_ctx_buffer() = default; - - void resize(size_t size) { - free(); - - addr = (uint8_t *) ggml_cuda_host_malloc(size); - if (addr) { - is_cuda = true; - } - else { - // fall back to pageable memory - addr = new uint8_t[size]; - is_cuda = false; - } - this->size = size; - } - - void free() { - if (addr) { - if (is_cuda) { - ggml_cuda_host_free(addr); - } - else { - delete[] addr; - } - } - addr = NULL; - } - - ~gguf_ctx_buffer() { - free(); - } - - // disable copy and move - gguf_ctx_buffer(const gguf_ctx_buffer&) = delete; - gguf_ctx_buffer(gguf_ctx_buffer&&) = delete; - gguf_ctx_buffer& operator=(const gguf_ctx_buffer&) = delete; - gguf_ctx_buffer& operator=(gguf_ctx_buffer&&) = delete; -}; -#else -typedef gguf_buffer gguf_ctx_buffer; -#endif - #endif