mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 11:24:35 +00:00
cuda : only use native when supported by cmake (#10389)
This commit is contained in:
parent
531cb1c233
commit
d3481e6316
@ -12,7 +12,7 @@ if (CUDAToolkit_FOUND)
|
|||||||
# 61 == Pascal, __dp4a instruction (per-byte integer dot product)
|
# 61 == Pascal, __dp4a instruction (per-byte integer dot product)
|
||||||
# 70 == V100, FP16 tensor cores
|
# 70 == V100, FP16 tensor cores
|
||||||
# 75 == Turing, int8 tensor cores
|
# 75 == Turing, int8 tensor cores
|
||||||
if (GGML_NATIVE AND CUDAToolkit_VERSION VERSION_GREATER_EQUAL "11.6")
|
if (GGML_NATIVE AND CUDAToolkit_VERSION VERSION_GREATER_EQUAL "11.6" AND CMAKE_VERSION VERSION_GREATER_EQUAL "3.24")
|
||||||
set(CMAKE_CUDA_ARCHITECTURES "native")
|
set(CMAKE_CUDA_ARCHITECTURES "native")
|
||||||
elseif(GGML_CUDA_F16 OR GGML_CUDA_DMMV_F16)
|
elseif(GGML_CUDA_F16 OR GGML_CUDA_DMMV_F16)
|
||||||
set(CMAKE_CUDA_ARCHITECTURES "60;61;70;75")
|
set(CMAKE_CUDA_ARCHITECTURES "60;61;70;75")
|
||||||
|
Loading…
Reference in New Issue
Block a user