mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-11 19:21:46 +00:00
llama : remove check flash_attn with lora (#11104)
This commit is contained in:
parent
96a1dc27c3
commit
09186fabbe
@ -11519,13 +11519,7 @@ int32_t llama_lora_adapter_set(
|
||||
struct llama_context * ctx,
|
||||
struct llama_lora_adapter * adapter,
|
||||
float scale) {
|
||||
if (ctx->cparams.flash_attn) {
|
||||
LLAMA_LOG_ERROR("%s: flash_attn is not compatible with LoRA\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
ctx->lora_adapters[adapter] = scale;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user