mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-11 21:39:52 +00:00
097e121e2f
* llama : add benchmark example * add to examples CMakeLists.txt * fix msvc build * add missing include * add Bessel's correction to stdev calculation Co-authored-by: Johannes Gäßler <johannesg@5d6.de> * improve markdown formatting * add missing include * print warning is NDEBUG is not defined * remove n_prompt and n_gen from the matrix, use each value separately instead * better checks for non-optimized builds * llama.cpp : fix MEM_REQ_SCRATCH0 reusing the value of n_ctx of the first call * fix json formatting * add sql output * add basic cpu and gpu info (linx/cuda only) * markdown: also show values that differ from the default * markdown: add build id * cleanup * improve formatting * formatting --------- Co-authored-by: Johannes Gäßler <johannesg@5d6.de>
34 lines
1.3 KiB
C
34 lines
1.3 KiB
C
#pragma once
|
|
|
|
#include "ggml.h"
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
#define GGML_CUDA_MAX_DEVICES 16
|
|
|
|
GGML_API void ggml_init_cublas(void);
|
|
GGML_API void * ggml_cuda_host_malloc(size_t size);
|
|
GGML_API void ggml_cuda_host_free(void * ptr);
|
|
|
|
GGML_API bool ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
|
GGML_API void ggml_cuda_set_tensor_split(const float * tensor_split);
|
|
GGML_API void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor);
|
|
GGML_API void ggml_cuda_free_data(struct ggml_tensor * tensor);
|
|
GGML_API void ggml_cuda_assign_buffers(struct ggml_tensor * tensor);
|
|
GGML_API void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor);
|
|
GGML_API void ggml_cuda_assign_buffers_force_inplace(struct ggml_tensor * tensor);
|
|
GGML_API void ggml_cuda_set_main_device(int main_device);
|
|
GGML_API void ggml_cuda_set_mul_mat_q(bool mul_mat_q);
|
|
GGML_API void ggml_cuda_set_scratch_size(size_t scratch_size);
|
|
GGML_API void ggml_cuda_free_scratch(void);
|
|
GGML_API bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor);
|
|
|
|
GGML_API int ggml_cuda_get_device_count(void);
|
|
GGML_API void ggml_cuda_get_device_description(int device, char * description, size_t description_size);
|
|
|
|
#ifdef __cplusplus
|
|
}
|
|
#endif
|