mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-11 19:21:46 +00:00
cuda : fix LLAMA_CUDA_F16 build (#6197)
This commit is contained in:
parent
cfd3be76e3
commit
03a8f8fafe
@ -9453,7 +9453,7 @@ static void ggml_cuda_op_dequantize_mul_mat_vec(
|
||||
|
||||
// on some GPUs it is faster to convert src1 to half and to use half precision intrinsics
|
||||
#ifdef GGML_CUDA_F16
|
||||
cuda_pool_alloc<half> src1_dfloat_a;
|
||||
ggml_cuda_pool_alloc<half> src1_dfloat_a(ctx.pool());
|
||||
half * src1_dfloat = nullptr; // dfloat == half
|
||||
|
||||
bool src1_convert_f16 =
|
||||
|
Loading…
Reference in New Issue
Block a user