mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 19:04:35 +00:00
CUDA: mul_mat_q=true llama_context_params default (#2912)
This commit is contained in:
parent
71d6975559
commit
8afe228000
@ -5287,7 +5287,7 @@ struct llama_context_params llama_context_default_params() {
|
||||
/*.progress_callback =*/ nullptr,
|
||||
/*.progress_callback_user_data =*/ nullptr,
|
||||
/*.low_vram =*/ false,
|
||||
/*.mul_mat_q =*/ false,
|
||||
/*.mul_mat_q =*/ true,
|
||||
/*.f16_kv =*/ true,
|
||||
/*.logits_all =*/ false,
|
||||
/*.vocab_only =*/ false,
|
||||
|
Loading…
Reference in New Issue
Block a user