llama.cpp/ggml-cuda.h
slaren 0d2b66c638 ggml backend interface wip
refactor ggml-cuda
2023-07-16 14:56:46 +02:00

20 lines
251 B
C

#pragma once
#include "ggml.h"
#ifdef __cplusplus
extern "C" {
#endif
void * ggml_cuda_host_malloc(size_t size);
void ggml_cuda_host_free(void * ptr);
// backend API
struct ggml_backend ggml_backend_cuda_init();
#ifdef __cplusplus
}
#endif