mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-11 19:21:46 +00:00
fix
This commit is contained in:
parent
9eda98d14b
commit
6383bbfa5f
@ -2019,7 +2019,7 @@ int llama_apply_lora_from_file_internal(struct llama_context * ctx, const char *
|
|||||||
|
|
||||||
// base indim = loraA transposed indim, base outdim = loraB outdim
|
// base indim = loraA transposed indim, base outdim = loraB outdim
|
||||||
if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
|
if (base_t->ne[0] != loraA->ne[1] || base_t->ne[1] != loraB->ne[1]) {
|
||||||
fprintf(stderr, "%s: incompatible tensor dimensions (outdims: %" PRId64 ", %" PRId64 ", indims: %" PRId64 ", %" PRId64 ");"
|
fprintf(stderr, "%s: incompatible tensor dimensions (outdim: %" PRId64 ", %" PRId64 ", indim: %" PRId64 ", %" PRId64 ");"
|
||||||
" are you sure that this adapter is for this model?\n", __func__, base_t->ne[1], loraB->ne[1], base_t->ne[0], loraA->ne[1]);
|
" are you sure that this adapter is for this model?\n", __func__, base_t->ne[1], loraB->ne[1], base_t->ne[0], loraA->ne[1]);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user