mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-28 12:24:35 +00:00
gguf : start implementing libllama in GGUF (WIP)
This commit is contained in:
parent
1c4d8bf981
commit
4f865181aa
Binary file not shown.
@ -565,7 +565,10 @@ struct ggml_context * ctx_data = NULL;
|
||||
uint32_t n_dims = cur->n_dims;
|
||||
tensor.type = cur->type;
|
||||
tensor.ne.resize(n_dims);
|
||||
memcpy(tensor.ne.data(), &cur->ne[0], sizeof(tensor.ne[0]) * n_dims);
|
||||
for (uint32_t j = 0; j < n_dims; ++j) {
|
||||
tensor.ne[j] = cur->ne[j];
|
||||
}
|
||||
|
||||
if (n_dims < 1 || n_dims > 2) {
|
||||
throw std::runtime_error(format("llama.cpp: tensor '%s' should not be %u-dimensional", name, n_dims));
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user