diff --git a/ggml-kompute.cpp b/ggml-kompute.cpp index df8bcca3d..f70231bed 100644 --- a/ggml-kompute.cpp +++ b/ggml-kompute.cpp @@ -64,7 +64,7 @@ struct ggml_kompute_context { // we *have* to have the kompute manager no matter what for device discovery, but the kompute context // is only created when a device is set and vulkan is explicitly turned on. ggml_kompute_context *s_kompute_context = nullptr; -kp::Manager *komputeManager() { +static kp::Manager *komputeManager() { static kp::Manager *s_mgr = nullptr; if (s_mgr && !s_mgr->hasInstance()) { delete s_mgr; @@ -551,7 +551,7 @@ void ggml_vk_d2h_tensor(struct ggml_kompute_context * ctx, struct ggml_tensor * komputeManager()->sequence()->eval({res}); } -std::vector getSpirvShader(const unsigned char* rawData, size_t size) { +static std::vector getSpirvShader(const unsigned char* rawData, size_t size) { if (size % sizeof(uint32_t) != 0) { throw std::runtime_error("Invalid size: must be divisible by sizeof(uint32_t)"); } @@ -573,7 +573,7 @@ uint32_t safe_divide(uint32_t a, uint32_t b) { return a / b; } -void ggml_vk_add( +static void ggml_vk_add( kp::Sequence& seq, const std::shared_ptr& inA, const std::shared_ptr& inB, @@ -621,7 +621,7 @@ void ggml_vk_add( seq.record(s_algo); } -void ggml_vk_addrow(kp::Sequence& seq, +static void ggml_vk_addrow(kp::Sequence& seq, const std::shared_ptr& inA, const std::shared_ptr& inB, const std::shared_ptr& out, @@ -652,7 +652,7 @@ void ggml_vk_addrow(kp::Sequence& seq, seq.record(s_algo); } -void ggml_vk_mul(kp::Sequence& seq, +static void ggml_vk_mul(kp::Sequence& seq, const std::shared_ptr& inA, const std::shared_ptr& inB, const std::shared_ptr& out, @@ -681,7 +681,7 @@ void ggml_vk_mul(kp::Sequence& seq, seq.record(s_algo); } -void ggml_vk_mulrow(kp::Sequence& seq, +static void ggml_vk_mulrow(kp::Sequence& seq, const std::shared_ptr& inA, const std::shared_ptr& inB, const std::shared_ptr& out, @@ -712,7 +712,7 @@ void ggml_vk_mulrow(kp::Sequence& seq, seq.record(s_algo); } -void ggml_vk_scale(kp::Sequence& seq, +static void ggml_vk_scale(kp::Sequence& seq, const std::shared_ptr& in, const std::shared_ptr& out, uint32_t inOff, uint32_t outOff, @@ -753,7 +753,7 @@ void ggml_vk_scale(kp::Sequence& seq, seq.record(s_algo); } -void ggml_vk_xxlu(const std::vector& spirv, kp::Sequence& seq, +static void ggml_vk_xxlu(const std::vector& spirv, kp::Sequence& seq, const std::shared_ptr& in, const std::shared_ptr& out, uint32_t inOff, uint32_t outOff, @@ -778,7 +778,7 @@ void ggml_vk_xxlu(const std::vector& spirv, kp::Sequence& seq, } template -void ggml_vk_silu(Args&&... args) { +static void ggml_vk_silu(Args&&... args) { const static auto spirv = getSpirvShader(kp::shader_data::op_silu_comp_spv, kp::shader_data::op_silu_comp_spv_len); @@ -786,7 +786,7 @@ void ggml_vk_silu(Args&&... args) { } template -void ggml_vk_relu(Args&&... args) { +static void ggml_vk_relu(Args&&... args) { const static auto spirv = getSpirvShader(kp::shader_data::op_relu_comp_spv, kp::shader_data::op_relu_comp_spv_len); @@ -794,14 +794,14 @@ void ggml_vk_relu(Args&&... args) { } template -void ggml_vk_gelu(Args&&... args) { +static void ggml_vk_gelu(Args&&... args) { const static auto spirv = getSpirvShader(kp::shader_data::op_gelu_comp_spv, kp::shader_data::op_gelu_comp_spv_len); ggml_vk_xxlu(spirv, std::forward(args)...); } -void ggml_vk_soft_max(kp::Sequence& seq, +static void ggml_vk_soft_max(kp::Sequence& seq, const std::shared_ptr& in, const std::shared_ptr& out, uint32_t inOff, uint32_t outOff, @@ -833,7 +833,7 @@ void ggml_vk_soft_max(kp::Sequence& seq, seq.record(s_algo); } -void ggml_vk_norm_(const std::vector& spirv, kp::Sequence& seq, +static void ggml_vk_norm_(const std::vector& spirv, kp::Sequence& seq, const std::shared_ptr& in, const std::shared_ptr& out, uint32_t inOff, uint32_t outOff, @@ -865,7 +865,7 @@ void ggml_vk_norm_(const std::vector& spirv, kp::Sequence& seq, } template -void ggml_vk_norm(Args&&... args) { +static void ggml_vk_norm(Args&&... args) { const static auto spirv = getSpirvShader(kp::shader_data::op_norm_comp_spv, kp::shader_data::op_norm_comp_spv_len); @@ -873,14 +873,14 @@ void ggml_vk_norm(Args&&... args) { } template -void ggml_vk_rms_norm(Args&&... args) { +static void ggml_vk_rms_norm(Args&&... args) { const static auto spirv = getSpirvShader(kp::shader_data::op_rmsnorm_comp_spv, kp::shader_data::op_rmsnorm_comp_spv_len); ggml_vk_norm_(spirv, std::forward(args)...); } -void ggml_vk_diag_mask_inf(kp::Sequence& seq, +static void ggml_vk_diag_mask_inf(kp::Sequence& seq, const std::shared_ptr& in, const std::shared_ptr& out, uint32_t inOff, uint32_t outOff, @@ -912,7 +912,7 @@ void ggml_vk_diag_mask_inf(kp::Sequence& seq, seq.record(s_algo); } -void ggml_vk_mul_mat_f16(kp::Sequence& seq, +static void ggml_vk_mul_mat_f16(kp::Sequence& seq, const std::shared_ptr& inA, const std::shared_ptr& inB, const std::shared_ptr& out, @@ -951,7 +951,7 @@ void ggml_vk_mul_mat_f16(kp::Sequence& seq, seq.record(s_algo); } -void ggml_vk_mul_mat_q8_0(kp::Sequence& seq, +static void ggml_vk_mul_mat_q8_0(kp::Sequence& seq, const std::shared_ptr& inA, const std::shared_ptr& inB, const std::shared_ptr& out, @@ -989,7 +989,7 @@ void ggml_vk_mul_mat_q8_0(kp::Sequence& seq, } -void ggml_vk_mul_mat_mat_f32(kp::Sequence& seq, +static void ggml_vk_mul_mat_mat_f32(kp::Sequence& seq, const std::shared_ptr& inA, const std::shared_ptr& inB, const std::shared_ptr& out, @@ -1039,7 +1039,7 @@ void ggml_vk_mul_mat_mat_f32(kp::Sequence& seq, seq.record(s_algo); } -void ggml_vk_mul_mat_q4_x(const std::vector& spirv, uint32_t block_size, kp::Sequence& seq, +static void ggml_vk_mul_mat_q4_x(const std::vector& spirv, uint32_t block_size, kp::Sequence& seq, const std::shared_ptr& inA, const std::shared_ptr& inB, const std::shared_ptr& out, @@ -1069,7 +1069,7 @@ void ggml_vk_mul_mat_q4_x(const std::vector& spirv, uint32_t block_siz } template -void ggml_vk_mul_mat_q4_0(Args&&... args) { +static void ggml_vk_mul_mat_q4_0(Args&&... args) { const static auto spirv = getSpirvShader(kp::shader_data::op_mul_mat_q4_0_comp_spv, kp::shader_data::op_mul_mat_q4_0_comp_spv_len); @@ -1077,14 +1077,14 @@ void ggml_vk_mul_mat_q4_0(Args&&... args) { } template -void ggml_vk_mul_mat_q4_1(Args&&... args) { +static void ggml_vk_mul_mat_q4_1(Args&&... args) { const static auto spirv = getSpirvShader(kp::shader_data::op_mul_mat_q4_1_comp_spv, kp::shader_data::op_mul_mat_q4_1_comp_spv_len); ggml_vk_mul_mat_q4_x(spirv, 1/*We access blocks unaligned*/, std::forward(args)...); } -void ggml_vk_mul_mat_q6_k(kp::Sequence& seq, +static void ggml_vk_mul_mat_q6_k(kp::Sequence& seq, const std::shared_ptr& inA, const std::shared_ptr& inB, const std::shared_ptr& out, @@ -1116,7 +1116,7 @@ void ggml_vk_mul_mat_q6_k(kp::Sequence& seq, seq.record(s_algo); } -void ggml_vk_get_rows(const std::vector& spirv, +static void ggml_vk_get_rows(const std::vector& spirv, unsigned element_size, unsigned qk, kp::Sequence& seq, const std::shared_ptr& inA, @@ -1151,7 +1151,7 @@ void ggml_vk_get_rows(const std::vector& spirv, } template -void ggml_vk_get_rows_f16(Args&&... args) { +static void ggml_vk_get_rows_f16(Args&&... args) { const static auto spirv = getSpirvShader(kp::shader_data::op_getrows_f16_comp_spv, kp::shader_data::op_getrows_f16_comp_spv_len); @@ -1159,7 +1159,7 @@ void ggml_vk_get_rows_f16(Args&&... args) { } template -void ggml_vk_get_rows_q4_0(Args&&... args) { +static void ggml_vk_get_rows_q4_0(Args&&... args) { const static auto spirv = getSpirvShader(kp::shader_data::op_getrows_q4_0_comp_spv, kp::shader_data::op_getrows_q4_0_comp_spv_len); @@ -1167,7 +1167,7 @@ void ggml_vk_get_rows_q4_0(Args&&... args) { } template -void ggml_vk_get_rows_q4_1(Args&&... args) { +static void ggml_vk_get_rows_q4_1(Args&&... args) { const static auto spirv = getSpirvShader(kp::shader_data::op_getrows_q4_1_comp_spv, kp::shader_data::op_getrows_q4_1_comp_spv_len); @@ -1175,13 +1175,13 @@ void ggml_vk_get_rows_q4_1(Args&&... args) { } template -void ggml_vk_get_rows_q6_k(Args&&... args) { +static void ggml_vk_get_rows_q6_k(Args&&... args) { const static auto spirv = getSpirvShader(kp::shader_data::op_getrows_q6_k_comp_spv, kp::shader_data::op_getrows_q6_k_comp_spv_len); ggml_vk_get_rows(spirv, 1/*We access blocks unaligned*/, QK_NL, std::forward(args)...); } -void ggml_vk_rope( +static void ggml_vk_rope( kp::Sequence& seq, const std::shared_ptr& inA, const std::shared_ptr& inB, @@ -1249,7 +1249,7 @@ void ggml_vk_rope( } template -void ggml_vk_cpy(const std::vector& spirv, +static void ggml_vk_cpy(const std::vector& spirv, kp::Sequence& seq, const std::shared_ptr& in, const std::shared_ptr& out, @@ -1289,28 +1289,28 @@ void ggml_vk_cpy(const std::vector& spirv, } template -void ggml_vk_cpy_f32_f16(Args&&... args) { +static void ggml_vk_cpy_f32_f16(Args&&... args) { const static auto spirv = getSpirvShader(kp::shader_data::op_cpy_f32_f16_comp_spv, kp::shader_data::op_cpy_f32_f16_comp_spv_len); ggml_vk_cpy<4, 2>(spirv, std::forward(args)...); } template -void ggml_vk_cpy_f32_f32(Args&&... args) { +static void ggml_vk_cpy_f32_f32(Args&&... args) { const static auto spirv = getSpirvShader(kp::shader_data::op_cpy_f32_f32_comp_spv, kp::shader_data::op_cpy_f32_f32_comp_spv_len); ggml_vk_cpy<4, 4>(spirv, std::forward(args)...); } template -void ggml_vk_cpy_f16_f16(Args&&... args) { +static void ggml_vk_cpy_f16_f16(Args&&... args) { const static auto spirv = getSpirvShader(kp::shader_data::op_cpy_f16_f16_comp_spv, kp::shader_data::op_cpy_f16_f16_comp_spv_len); ggml_vk_cpy<2, 2>(spirv, std::forward(args)...); } template -void ggml_vk_cpy_f16_f32(Args&&... args) { +static void ggml_vk_cpy_f16_f32(Args&&... args) { const static auto spirv = getSpirvShader(kp::shader_data::op_cpy_f16_f32_comp_spv, kp::shader_data::op_cpy_f16_f32_comp_spv_len); ggml_vk_cpy<2, 4>(spirv, std::forward(args)...); @@ -1349,6 +1349,8 @@ void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml_cgraph case GGML_OP_TRANSPOSE: case GGML_OP_PERMUTE: continue; // noop -> next node + default: + break; } const int32_t ne00 = src0 ? src0->ne[0] : 0;