mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 10:54:36 +00:00
llama : mark LLM_ARCH_STARCODER as full offload supported (#3945)
as done in https://github.com/ggerganov/llama.cpp/pull/3827
This commit is contained in:
parent
c41ea36eaa
commit
3d48f42efc
@ -5168,7 +5168,8 @@ static int llama_decode_internal(
|
||||
model.arch == LLM_ARCH_BAICHUAN ||
|
||||
model.arch == LLM_ARCH_FALCON ||
|
||||
model.arch == LLM_ARCH_REFACT ||
|
||||
model.arch == LLM_ARCH_MPT;
|
||||
model.arch == LLM_ARCH_MPT ||
|
||||
model.arch == LLM_ARCH_STARCODER;
|
||||
|
||||
const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3;
|
||||
if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) {
|
||||
|
Loading…
Reference in New Issue
Block a user