mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-12 03:31:46 +00:00
llama : remove check flash_attn with lora (#11104)
This commit is contained in:
parent
96a1dc27c3
commit
09186fabbe
@ -11519,13 +11519,7 @@ int32_t llama_lora_adapter_set(
|
|||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
struct llama_lora_adapter * adapter,
|
struct llama_lora_adapter * adapter,
|
||||||
float scale) {
|
float scale) {
|
||||||
if (ctx->cparams.flash_attn) {
|
|
||||||
LLAMA_LOG_ERROR("%s: flash_attn is not compatible with LoRA\n", __func__);
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx->lora_adapters[adapter] = scale;
|
ctx->lora_adapters[adapter] = scale;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user