mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-28 12:24:35 +00:00
0d2b66c638
refactor ggml-cuda
20 lines
251 B
C
20 lines
251 B
C
#pragma once
|
|
|
|
#include "ggml.h"
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
void * ggml_cuda_host_malloc(size_t size);
|
|
void ggml_cuda_host_free(void * ptr);
|
|
|
|
// backend API
|
|
|
|
struct ggml_backend ggml_backend_cuda_init();
|
|
|
|
|
|
#ifdef __cplusplus
|
|
}
|
|
#endif
|