llama.cpp/ggml-cuda.h
slaren 295f85654a allocators wip
renamed ggml_backend functions
changed ggml_buffer and ggml_backend to always be used as pointers
rename ggml_tensor::params -> op_params
2023-07-19 02:43:44 +02:00

22 lines
401 B
C

#pragma once
#include "ggml.h"
#ifdef __cplusplus
extern "C" {
#endif
GGML_API void * ggml_cuda_host_malloc(size_t size);
GGML_API void ggml_cuda_host_free(void * ptr);
GGML_API void ggml_cuda_host_register(void * ptr, size_t size);
GGML_API void ggml_cuda_host_unregister(void * ptr);
// backend API
GGML_API struct ggml_backend * ggml_backend_cuda_init();
#ifdef __cplusplus
}
#endif