mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 10:54:36 +00:00
Add mmap support for model files
This commit is contained in:
parent
3bcc129ba8
commit
c03ae8dca1
7
ggml.c
7
ggml.c
@ -2529,6 +2529,7 @@ struct ggml_context {
|
|||||||
void * mem_buffer;
|
void * mem_buffer;
|
||||||
bool mem_buffer_owned;
|
bool mem_buffer_owned;
|
||||||
bool mem_buffer_mlocked;
|
bool mem_buffer_mlocked;
|
||||||
|
bool no_alloc;
|
||||||
|
|
||||||
int n_objects;
|
int n_objects;
|
||||||
|
|
||||||
@ -2815,6 +2816,7 @@ struct ggml_context * ggml_init(struct ggml_init_params params) {
|
|||||||
/*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : malloc(params.mem_size),
|
/*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : malloc(params.mem_size),
|
||||||
/*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
|
/*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
|
||||||
/*.mem_buffer_mlocked =*/ false,
|
/*.mem_buffer_mlocked =*/ false,
|
||||||
|
/*.no_alloc =*/ params.no_alloc,
|
||||||
/*.n_objects =*/ 0,
|
/*.n_objects =*/ 0,
|
||||||
/*.objects_begin =*/ NULL,
|
/*.objects_begin =*/ NULL,
|
||||||
/*.objects_end =*/ NULL,
|
/*.objects_end =*/ NULL,
|
||||||
@ -2930,7 +2932,7 @@ struct ggml_tensor * ggml_new_tensor_impl(
|
|||||||
|
|
||||||
size_t size_needed = 0;
|
size_t size_needed = 0;
|
||||||
|
|
||||||
if (data == NULL) {
|
if (data == NULL && !ctx->no_alloc) {
|
||||||
size_needed += GGML_TYPE_SIZE[type]*(ne[0]/GGML_BLCK_SIZE[type]);
|
size_needed += GGML_TYPE_SIZE[type]*(ne[0]/GGML_BLCK_SIZE[type]);
|
||||||
for (int i = 1; i < n_dims; i++) {
|
for (int i = 1; i < n_dims; i++) {
|
||||||
size_needed *= ne[i];
|
size_needed *= ne[i];
|
||||||
@ -3014,7 +3016,7 @@ struct ggml_tensor * ggml_new_tensor_impl(
|
|||||||
/*.perf_runs =*/ 0,
|
/*.perf_runs =*/ 0,
|
||||||
/*.perf_cycles =*/ 0,
|
/*.perf_cycles =*/ 0,
|
||||||
/*.perf_time_us =*/ 0,
|
/*.perf_time_us =*/ 0,
|
||||||
/*.data =*/ data == NULL ? (void *)(result + 1) : data,
|
/*.data =*/ (data == NULL && !ctx->no_alloc) ? (void *)(result + 1) : data,
|
||||||
/*.pad =*/ { 0 },
|
/*.pad =*/ { 0 },
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -10277,6 +10279,7 @@ enum ggml_opt_result ggml_opt(
|
|||||||
struct ggml_init_params params_ctx = {
|
struct ggml_init_params params_ctx = {
|
||||||
.mem_size = 16*1024*1024,
|
.mem_size = 16*1024*1024,
|
||||||
.mem_buffer = NULL,
|
.mem_buffer = NULL,
|
||||||
|
.no_alloc = false,
|
||||||
};
|
};
|
||||||
|
|
||||||
ctx = ggml_init(params_ctx);
|
ctx = ggml_init(params_ctx);
|
||||||
|
1
ggml.h
1
ggml.h
@ -316,6 +316,7 @@ struct ggml_init_params {
|
|||||||
// memory pool
|
// memory pool
|
||||||
size_t mem_size; // bytes
|
size_t mem_size; // bytes
|
||||||
void * mem_buffer; // if NULL, memory will be allocated internally
|
void * mem_buffer; // if NULL, memory will be allocated internally
|
||||||
|
bool no_alloc; // don't allocate memory for the tensor data
|
||||||
};
|
};
|
||||||
|
|
||||||
void ggml_time_init(void); // call this once at the beginning of the program
|
void ggml_time_init(void); // call this once at the beginning of the program
|
||||||
|
60
llama.cpp
60
llama.cpp
@ -12,6 +12,13 @@
|
|||||||
#include <cassert>
|
#include <cassert>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
|
||||||
|
// headers for POSIX mmap
|
||||||
|
#if defined (__unix__) || defined (__APPLE__)
|
||||||
|
# include <sys/mman.h>
|
||||||
|
# include <fcntl.h>
|
||||||
|
# include <unistd.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
#define LLAMA_USE_SCRATCH
|
#define LLAMA_USE_SCRATCH
|
||||||
#define LLAMA_MAX_SCRATCH_BUFFERS 16
|
#define LLAMA_MAX_SCRATCH_BUFFERS 16
|
||||||
|
|
||||||
@ -246,6 +253,7 @@ static bool kv_cache_init(
|
|||||||
struct ggml_init_params params;
|
struct ggml_init_params params;
|
||||||
params.mem_size = cache.buf.size();
|
params.mem_size = cache.buf.size();
|
||||||
params.mem_buffer = cache.buf.data();
|
params.mem_buffer = cache.buf.data();
|
||||||
|
params.no_alloc = false;
|
||||||
|
|
||||||
cache.ctx = ggml_init(params);
|
cache.ctx = ggml_init(params);
|
||||||
|
|
||||||
@ -288,6 +296,26 @@ struct llama_context_params llama_context_default_params() {
|
|||||||
// model loading
|
// model loading
|
||||||
//
|
//
|
||||||
|
|
||||||
|
void * mmap_file(const char* fname) {
|
||||||
|
#if defined(MAP_FAILED)
|
||||||
|
// POSIX mmap
|
||||||
|
int fd = open(fname, O_RDONLY);
|
||||||
|
size_t len = lseek(fd, 0, SEEK_END);
|
||||||
|
void * mm_addr = mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
|
||||||
|
if (mm_addr == MAP_FAILED) {
|
||||||
|
perror("mmap failed");
|
||||||
|
mm_addr = NULL;
|
||||||
|
}
|
||||||
|
close(fd);
|
||||||
|
return mm_addr;
|
||||||
|
#else
|
||||||
|
// TODO: windows support
|
||||||
|
(void)(fname); // suppress warnings
|
||||||
|
return NULL;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static bool llama_model_load(
|
static bool llama_model_load(
|
||||||
const std::string & fname,
|
const std::string & fname,
|
||||||
llama_context & lctx,
|
llama_context & lctx,
|
||||||
@ -303,6 +331,7 @@ static bool llama_model_load(
|
|||||||
|
|
||||||
lctx.t_start_us = t_start_us;
|
lctx.t_start_us = t_start_us;
|
||||||
|
|
||||||
|
// TODO: this could probably be smaller when using mmap
|
||||||
std::vector<char> f_buf(1024*1024);
|
std::vector<char> f_buf(1024*1024);
|
||||||
|
|
||||||
auto & model = lctx.model;
|
auto & model = lctx.model;
|
||||||
@ -449,18 +478,30 @@ static bool llama_model_load(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool use_mmap = (n_parts == 1);
|
||||||
|
|
||||||
|
// try to memory map the model file
|
||||||
|
void* mm_addr = NULL;
|
||||||
|
if (use_mmap) {
|
||||||
|
mm_addr = mmap_file(fname.c_str());
|
||||||
|
if (mm_addr == NULL) {
|
||||||
|
use_mmap = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
auto & ctx = model.ctx;
|
auto & ctx = model.ctx;
|
||||||
|
|
||||||
size_t ctx_size = 0;
|
size_t ctx_size = 0;
|
||||||
|
|
||||||
{
|
{
|
||||||
const auto & hparams = model.hparams;
|
const auto & hparams = model.hparams;
|
||||||
|
|
||||||
const int n_embd = hparams.n_embd;
|
const int n_embd = hparams.n_embd;
|
||||||
const int n_layer = hparams.n_layer;
|
const int n_layer = hparams.n_layer;
|
||||||
const int n_ctx = hparams.n_ctx;
|
|
||||||
const int n_vocab = hparams.n_vocab;
|
const int n_vocab = hparams.n_vocab;
|
||||||
|
|
||||||
|
if (!use_mmap) {
|
||||||
ctx_size += n_embd*n_vocab*ggml_type_sizef(vtype); // tok_embeddings
|
ctx_size += n_embd*n_vocab*ggml_type_sizef(vtype); // tok_embeddings
|
||||||
|
|
||||||
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // norm
|
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // norm
|
||||||
@ -479,9 +520,7 @@ static bool llama_model_load(
|
|||||||
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w1
|
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w1
|
||||||
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w2
|
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w2
|
||||||
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w3
|
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w3
|
||||||
|
}
|
||||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_k
|
|
||||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_v
|
|
||||||
|
|
||||||
ctx_size += (5 + 10*n_layer)*256; // object overhead
|
ctx_size += (5 + 10*n_layer)*256; // object overhead
|
||||||
|
|
||||||
@ -514,6 +553,7 @@ static bool llama_model_load(
|
|||||||
struct ggml_init_params params = {
|
struct ggml_init_params params = {
|
||||||
/*.mem_size =*/ lctx.model.buf.size(),
|
/*.mem_size =*/ lctx.model.buf.size(),
|
||||||
/*.mem_buffer =*/ lctx.model.buf.data(),
|
/*.mem_buffer =*/ lctx.model.buf.data(),
|
||||||
|
/*.no_alloc =*/ use_mmap,
|
||||||
};
|
};
|
||||||
|
|
||||||
model.ctx = ggml_init(params);
|
model.ctx = ggml_init(params);
|
||||||
@ -595,7 +635,7 @@ static bool llama_model_load(
|
|||||||
fname_part += "." + std::to_string(i);
|
fname_part += "." + std::to_string(i);
|
||||||
}
|
}
|
||||||
|
|
||||||
fprintf(stderr, "%s: loading model part %d/%d from '%s'\n", __func__, i+1, n_parts, fname_part.c_str());
|
fprintf(stderr, "%s: loading model part %d/%d from '%s'%s\n", __func__, i+1, n_parts, fname_part.c_str(), use_mmap ? " (memory mapped)" : "");
|
||||||
|
|
||||||
fin = std::ifstream(fname_part, std::ios::binary);
|
fin = std::ifstream(fname_part, std::ios::binary);
|
||||||
fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size());
|
fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size());
|
||||||
@ -736,7 +776,14 @@ static bool llama_model_load(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (part_id == 0) {
|
if (part_id == 0) {
|
||||||
|
if (mm_addr) {
|
||||||
|
off_t offset = fin.tellg();
|
||||||
|
tensor->data = (char *) mm_addr + offset;
|
||||||
|
fin.seekg(ggml_nbytes(tensor), std::ios::cur);
|
||||||
|
}
|
||||||
|
else {
|
||||||
fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
|
fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
fin.seekg(ggml_nbytes(tensor), std::ios::cur);
|
fin.seekg(ggml_nbytes(tensor), std::ios::cur);
|
||||||
}
|
}
|
||||||
@ -849,6 +896,7 @@ static bool llama_eval_internal(
|
|||||||
struct ggml_init_params params = {
|
struct ggml_init_params params = {
|
||||||
/*.mem_size =*/ buf_compute.size(),
|
/*.mem_size =*/ buf_compute.size(),
|
||||||
/*.mem_buffer =*/ buf_compute.data(),
|
/*.mem_buffer =*/ buf_compute.data(),
|
||||||
|
/*.no_alloc =*/ false,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ggml_context * ctx0 = ggml_init(params);
|
struct ggml_context * ctx0 = ggml_init(params);
|
||||||
|
Loading…
Reference in New Issue
Block a user