mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 03:14:35 +00:00
vulkan: properly initialize vulkan devices for LLAMA_SPLIT_MODE_NONE (#7552)
This commit is contained in:
parent
2b737caae1
commit
56411a950f
@ -6012,6 +6012,8 @@ static ggml_backend_buffer_type_i ggml_backend_vk_buffer_type_interface = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
GGML_CALL ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num) {
|
GGML_CALL ggml_backend_buffer_type_t ggml_backend_vk_buffer_type(size_t dev_num) {
|
||||||
|
ggml_vk_instance_init();
|
||||||
|
|
||||||
#ifdef GGML_VULKAN_DEBUG
|
#ifdef GGML_VULKAN_DEBUG
|
||||||
std::cerr << "ggml_backend_vk_buffer_type(" << dev_num << ")" << std::endl;
|
std::cerr << "ggml_backend_vk_buffer_type(" << dev_num << ")" << std::endl;
|
||||||
#endif
|
#endif
|
||||||
|
Loading…
Reference in New Issue
Block a user