mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 11:24:35 +00:00
Vulkan Improvements (#5835)
* Improve dequant shaders, add fast q4_0 dequant * Optimize dmmv non-kquants for GCN Remove unnecessary SPIR-V shader duplication * Fix q4_0 dequant dispatch sizes Fix backend free bug * Optimize dequant shaders for q4_1, q5_0, q5_1 and q8_0 * Add unary and binary op shader templates * Fix Vulkan check results * Enable non-contiguous support for simple ops * Add argsort Basic q4_0 mmq shader and unit test * Speed up q4_0 dequant code, enable mmq for q4_0 * Rework matmul pipeline selection * Add soft_max alibi support * Add q4_1, q5_0, q5_1 and q8_0 dequant mat mat mul shaders * Add environment variable GGML_VK_FORCE_MAX_ALLOCATION_SIZE to limit max buffer size Rename GGML_VULKAN_DISABLE_F16 to GGML_VK_DISABLE_F16 for consistency
This commit is contained in:
parent
21b0867433
commit
61d1c88e15
86931
ggml-vulkan-shaders.hpp
86931
ggml-vulkan-shaders.hpp
File diff suppressed because it is too large
Load Diff
2128
ggml-vulkan.cpp
2128
ggml-vulkan.cpp
File diff suppressed because it is too large
Load Diff
@ -10,6 +10,7 @@ extern "C" {
|
|||||||
#define GGML_VK_NAME "Vulkan"
|
#define GGML_VK_NAME "Vulkan"
|
||||||
#define GGML_VK_MAX_DEVICES 16
|
#define GGML_VK_MAX_DEVICES 16
|
||||||
|
|
||||||
|
GGML_API void ggml_vk_instance_init(void);
|
||||||
GGML_API void ggml_vk_init_cpu_assist(void);
|
GGML_API void ggml_vk_init_cpu_assist(void);
|
||||||
|
|
||||||
GGML_API void ggml_vk_preallocate_buffers_graph_cpu_assist(struct ggml_tensor * node);
|
GGML_API void ggml_vk_preallocate_buffers_graph_cpu_assist(struct ggml_tensor * node);
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -5014,8 +5014,8 @@ static struct ggml_tensor * llm_build_kqv(
|
|||||||
ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
|
ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(GGML_USE_VULKAN) || defined(GGML_USE_KOMPUTE)
|
#if defined(GGML_USE_KOMPUTE)
|
||||||
#pragma message("TODO: ALiBi support in ggml_soft_max_ext is not implemented for Vulkan, and Kompute")
|
#pragma message("TODO: ALiBi support in ggml_soft_max_ext is not implemented for Kompute")
|
||||||
#pragma message(" Falling back to ggml_alibi(). Will become an error in Mar 2024")
|
#pragma message(" Falling back to ggml_alibi(). Will become an error in Mar 2024")
|
||||||
#pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/5488")
|
#pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/5488")
|
||||||
if (hparams.f_max_alibi_bias > 0.0f) {
|
if (hparams.f_max_alibi_bias > 0.0f) {
|
||||||
|
Loading…
Reference in New Issue
Block a user