llama.cpp/ggml/include/ggml-blas.h

26 lines
586 B
C
Raw Normal View History

#pragma once
#include "ggml.h"
#include "ggml-backend.h"
#ifdef __cplusplus
extern "C" {
#endif
// backend API
2024-11-12 12:56:28 +00:00
GGML_BACKEND_API ggml_backend_t ggml_backend_blas_init(void);
2024-11-12 12:56:28 +00:00
GGML_BACKEND_API bool ggml_backend_is_blas(ggml_backend_t backend);
// number of threads used for conversion to float
// for openblas and blis, this will also set the number of threads used for blas operations
2024-11-12 12:56:28 +00:00
GGML_BACKEND_API void ggml_backend_blas_set_n_threads(ggml_backend_t backend_blas, int n_threads);
2024-11-12 12:56:28 +00:00
GGML_BACKEND_API ggml_backend_reg_t ggml_backend_blas_reg(void);
#ifdef __cplusplus
}
#endif