mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 10:54:36 +00:00
Add nvidia and amd backends (#6157)
This commit is contained in:
parent
42e21c6882
commit
c5b8595e3f
@ -977,8 +977,10 @@ namespace dpct
|
||||
static int convert_backend_index(std::string & backend) {
|
||||
if (backend == "ext_oneapi_level_zero:gpu") return 0;
|
||||
if (backend == "opencl:gpu") return 1;
|
||||
if (backend == "opencl:cpu") return 2;
|
||||
if (backend == "opencl:acc") return 3;
|
||||
if (backend == "ext_oneapi_cuda:gpu") return 2;
|
||||
if (backend == "ext_oneapi_hip:gpu") return 3;
|
||||
if (backend == "opencl:cpu") return 4;
|
||||
if (backend == "opencl:acc") return 5;
|
||||
printf("convert_backend_index: can't handle backend=%s\n", backend.c_str());
|
||||
GGML_ASSERT(false);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user