mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-11 03:01:45 +00:00
fix cuda build error
This commit is contained in:
parent
67bb367962
commit
2193ab6281
@ -2544,7 +2544,7 @@ bool ggml_cuda_is_gpu_offloading(struct ggml_tensor * tensor) {
|
|||||||
|
|
||||||
bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor){
|
bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor){
|
||||||
ggml_cuda_func_t func;
|
ggml_cuda_func_t func;
|
||||||
const bool any_on_device = is_gpu_offloading(tensor);
|
const bool any_on_device = ggml_cuda_is_gpu_offloading(tensor);
|
||||||
|
|
||||||
switch (tensor->op) {
|
switch (tensor->op) {
|
||||||
case GGML_OP_ADD:
|
case GGML_OP_ADD:
|
||||||
|
Loading…
Reference in New Issue
Block a user