mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 03:14:35 +00:00
train-text-from-scratch : fix assert failure in ggml-alloc (#3618)
This commit is contained in:
parent
e74c705e15
commit
a5e8c1d8c7
@ -253,13 +253,14 @@ static void init_model(struct my_llama_model * model) {
|
|||||||
set_param_model(model);
|
set_param_model(model);
|
||||||
|
|
||||||
// measure data size
|
// measure data size
|
||||||
struct ggml_allocr * alloc = NULL;
|
size_t size = 0;
|
||||||
alloc = ggml_allocr_new_measure(tensor_alignment);
|
for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != NULL; t = ggml_get_next_tensor(ctx, t)) {
|
||||||
alloc_model(alloc, model);
|
size += GGML_PAD(ggml_nbytes(t), tensor_alignment);
|
||||||
|
}
|
||||||
|
|
||||||
// allocate data
|
// allocate data
|
||||||
model->data.resize(ggml_allocr_max_size(alloc) + tensor_alignment);
|
struct ggml_allocr * alloc = NULL;
|
||||||
ggml_allocr_free(alloc);
|
model->data.resize(size + tensor_alignment);
|
||||||
alloc = ggml_allocr_new(model->data.data(), model->data.size(), tensor_alignment);
|
alloc = ggml_allocr_new(model->data.data(), model->data.size(), tensor_alignment);
|
||||||
alloc_model(alloc, model);
|
alloc_model(alloc, model);
|
||||||
ggml_allocr_free(alloc);
|
ggml_allocr_free(alloc);
|
||||||
@ -1094,11 +1095,9 @@ int main(int argc, char ** argv) {
|
|||||||
struct ggml_tensor * target_probs = ggml_new_tensor_3d(ctx_input, GGML_TYPE_F32, n_vocab, n_tokens, n_batch);
|
struct ggml_tensor * target_probs = ggml_new_tensor_3d(ctx_input, GGML_TYPE_F32, n_vocab, n_tokens, n_batch);
|
||||||
|
|
||||||
// measure required memory for input tensors
|
// measure required memory for input tensors
|
||||||
alloc = ggml_allocr_new_measure(tensor_alignment);
|
size_t max_input_size = GGML_PAD(ggml_nbytes(tokens_input), tensor_alignment) +
|
||||||
ggml_allocr_alloc(alloc, tokens_input);
|
GGML_PAD(ggml_nbytes(target_probs), tensor_alignment) +
|
||||||
ggml_allocr_alloc(alloc, target_probs);
|
tensor_alignment;
|
||||||
size_t max_input_size = ggml_allocr_max_size(alloc) + tensor_alignment;
|
|
||||||
ggml_allocr_free(alloc);
|
|
||||||
printf("%s: input_size = %zu bytes (%.1f MB)\n", __func__, max_input_size, (float) max_input_size / (1024.0f*1024.0f));
|
printf("%s: input_size = %zu bytes (%.1f MB)\n", __func__, max_input_size, (float) max_input_size / (1024.0f*1024.0f));
|
||||||
|
|
||||||
// allocate input tensors
|
// allocate input tensors
|
||||||
|
Loading…
Reference in New Issue
Block a user