mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 20:04:35 +00:00
43b35e38ba
* cuda sqrt support * enable cuda in pca * fix comments in pca * add test * add sqrt to ggml_backend_cuda_supports_op * fix test * new line * Use F32 sqrtf instead of F64 sqrt Co-authored-by: Johannes Gäßler <johannesg@5d6.de> --------- Co-authored-by: Johannes Gäßler <johannesg@5d6.de>
34 lines
1.2 KiB
Plaintext
34 lines
1.2 KiB
Plaintext
#include "common.cuh"
|
|
|
|
#define CUDA_GELU_BLOCK_SIZE 256
|
|
#define CUDA_SILU_BLOCK_SIZE 256
|
|
#define CUDA_TANH_BLOCK_SIZE 256
|
|
#define CUDA_RELU_BLOCK_SIZE 256
|
|
#define CUDA_SIGMOID_BLOCK_SIZE 256
|
|
#define CUDA_HARDSIGMOID_BLOCK_SIZE 256
|
|
#define CUDA_HARDSWISH_BLOCK_SIZE 256
|
|
#define CUDA_SQR_BLOCK_SIZE 256
|
|
#define CUDA_SQRT_BLOCK_SIZE 256
|
|
|
|
void ggml_cuda_op_gelu(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
|
|
|
void ggml_cuda_op_silu(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
|
|
|
void ggml_cuda_op_gelu_quick(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
|
|
|
void ggml_cuda_op_tanh(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
|
|
|
void ggml_cuda_op_relu(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
|
|
|
void ggml_cuda_op_sigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
|
|
|
void ggml_cuda_op_hardsigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
|
|
|
void ggml_cuda_op_hardswish(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
|
|
|
void ggml_cuda_op_leaky_relu(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
|
|
|
void ggml_cuda_op_sqr(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
|
|
|
void ggml_cuda_op_sqrt(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|