mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-12 19:50:17 +00:00
metal : try to improve batched decoding
This commit is contained in:
parent
3e68df8616
commit
3cb1c348b3
@ -1083,7 +1083,7 @@ void ggml_metal_graph_compute(
|
||||
|
||||
// find the break-even point where the matrix-matrix kernel becomes more efficient compared
|
||||
// to the matrix-vector kernel
|
||||
int ne11_mm_min = 1;
|
||||
int ne11_mm_min = src0t == GGML_TYPE_F16 ? 1 : 16;
|
||||
|
||||
#if 0
|
||||
// the numbers below are measured on M2 Ultra for 7B and 13B models
|
||||
|
Loading…
Reference in New Issue
Block a user