mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
295f85654a
renamed ggml_backend functions changed ggml_buffer and ggml_backend to always be used as pointers rename ggml_tensor::params -> op_params
22 lines
401 B
C
22 lines
401 B
C
#pragma once
|
|
|
|
#include "ggml.h"
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
GGML_API void * ggml_cuda_host_malloc(size_t size);
|
|
GGML_API void ggml_cuda_host_free(void * ptr);
|
|
GGML_API void ggml_cuda_host_register(void * ptr, size_t size);
|
|
GGML_API void ggml_cuda_host_unregister(void * ptr);
|
|
|
|
// backend API
|
|
|
|
GGML_API struct ggml_backend * ggml_backend_cuda_init();
|
|
|
|
|
|
#ifdef __cplusplus
|
|
}
|
|
#endif
|