mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-11 03:01:45 +00:00
06b00827a0
* removed ggml_task_backend, infavour of ggml_task_profile.runner and newly added id and name. * extracted mul_mat blas codes into ggml_compute_forward_mul_mat_blas, thus align with CUDA/CL a bit more and make it easier to fix profile and run tune. * rewrote task profile and update/add some cuda/cl codes, finnaly made CL GPU offloading work. * misc minor fix/update to tune, the data format was changed.
26 lines
788 B
C
26 lines
788 B
C
#pragma once
|
|
|
|
#include "ggml.h"
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
void ggml_cl_init(void);
|
|
|
|
void ggml_cl_mul(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
|
bool ggml_cl_is_gpu_offloading(struct ggml_tensor * tensor);
|
|
size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
|
void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
|
|
|
|
void * ggml_cl_host_malloc(size_t size);
|
|
void ggml_cl_host_free(void * ptr);
|
|
|
|
void ggml_cl_free_data(const struct ggml_tensor* tensor);
|
|
|
|
void ggml_cl_transform_tensor(void * data, struct ggml_tensor * tensor);
|
|
|
|
#ifdef __cplusplus
|
|
}
|
|
#endif
|