diff --git a/CMakeLists.txt b/CMakeLists.txt index c0538eb88..cf4042ea3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -479,6 +479,10 @@ if (LLAMA_KOMPUTE) kompute/op_norm.comp kompute/op_rmsnorm.comp kompute/op_diagmask.comp + kompute/op_mul_mat_mat_f16.comp + kompute/op_mul_mat_mat_f32.comp + kompute/op_mul_mat_mat_q4_0.comp + kompute/op_mul_mat_mat_q8_0.comp kompute/op_mul_mat_f16.comp kompute/op_mul_mat_q8_0.comp kompute/op_mul_mat_q4_0.comp @@ -509,6 +513,10 @@ if (LLAMA_KOMPUTE) shaderop_norm.h shaderop_rmsnorm.h shaderop_diagmask.h + shaderop_mul_mat_mat_f16.h + shaderop_mul_mat_mat_f32.h + shaderop_mul_mat_mat_q4_0.h + shaderop_mul_mat_mat_q8_0.h shaderop_mul_mat_f16.h shaderop_mul_mat_q8_0.h shaderop_mul_mat_q4_0.h diff --git a/ggml-vulkan.cpp b/ggml-vulkan.cpp index 59852c649..6ae1a8fc3 100644 --- a/ggml-vulkan.cpp +++ b/ggml-vulkan.cpp @@ -27,6 +27,10 @@ #include "shaderop_mul_mat_q4_0.h" #include "shaderop_mul_mat_q4_1.h" #include "shaderop_mul_mat_q6_k.h" +#include "shaderop_mul_mat_mat_f32.h" +#include "shaderop_mul_mat_mat_f16.h" +#include "shaderop_mul_mat_mat_q4_0.h" +#include "shaderop_mul_mat_mat_q8_0.h" #include "shaderop_getrows_f16.h" #include "shaderop_getrows_q4_0.h" #include "shaderop_getrows_q4_1.h" @@ -938,7 +942,7 @@ void ggml_vk_mul_mat_q8_0(kp::Sequence& seq, uint32_t nb11, nb12; int32_t ne0, ne1; } pushConsts { - safe_divide(inAOff, 2), safe_divide(inBOff, 4), safe_divide(outOff, 4), + inAOff, safe_divide(inBOff, 4), safe_divide(outOff, 4), ne00, nb01, nb02, nb11, nb12, ne0, ne1, }; @@ -956,6 +960,211 @@ void ggml_vk_mul_mat_q8_0(kp::Sequence& seq, seq.record(s_algo); } + +void ggml_vk_mul_mat_mat_f32(kp::Sequence& seq, + const std::shared_ptr& inA, + const std::shared_ptr& inB, + const std::shared_ptr& out, + uint32_t inAOff, uint32_t inBOff, uint32_t outOff, + int32_t ne00, int32_t ne01, int32_t ne02, + uint32_t nb01, uint32_t nb02, + int32_t ne11, int32_t ne12, + uint32_t nb11, uint32_t nb12, + uint32_t nb1, uint32_t nb2) { + const static auto spirv = getSpirvShader(kp::shader_data::op_mul_mat_mat_f32_comp_spv, + kp::shader_data::op_mul_mat_mat_f32_comp_spv_len); + + struct PushConstants { + uint32_t inAOff, inBOff, outOff; + int32_t ne00, ne01, ne02, ne11, ne12; + uint32_t nb01, nb02; + uint32_t nb11, nb12; + uint32_t nb1, nb2; + } pushConsts { + safe_divide(inAOff, 4), safe_divide(inBOff, 4), safe_divide(outOff, 4), + ne00, ne01, ne02, ne11, ne12, + nb01, nb02, nb11, nb12, + nb1, nb2 + }; + + std::shared_ptr s_algo = nullptr; + if (!komputeManager()->hasAlgorithm(__func__)) { + //std::cerr << "init f32 matmat shader" << std::endl; + s_algo = komputeManager()->algorithm(__func__, s_kompute_context->pool.get(), + {inA, inB, out}, spirv, + {unsigned(ne01), + unsigned(ne11), + unsigned(ne12)}, + {}, + {pushConsts}); + } else { + s_algo = komputeManager()->getAlgorithm(__func__); + s_algo->setTensors({inA, inB, out}); + s_algo->setWorkgroup({unsigned(ne01), + unsigned(ne11), + unsigned(std::max(ne12, ne02))}); + s_algo->setPushConstants({pushConsts}); + s_algo->updateDescriptors(s_kompute_context->pool.get()); + } + //seq.record({out}); + seq.record(s_algo); +} + +void ggml_vk_mul_mat_mat_f16(kp::Sequence& seq, + const std::shared_ptr& inA, + const std::shared_ptr& inB, + const std::shared_ptr& out, + uint32_t inAOff, uint32_t inBOff, uint32_t outOff, + int32_t ne00, int32_t ne01, int32_t ne02, + uint32_t nb01, uint32_t nb02, + int32_t ne11, int32_t ne12, + uint32_t nb11, uint32_t nb12, + uint32_t nb1, uint32_t nb2) { + const static auto spirv = getSpirvShader(kp::shader_data::op_mul_mat_mat_f16_comp_spv, + kp::shader_data::op_mul_mat_mat_f16_comp_spv_len); + + struct PushConstants { + uint32_t inAOff, inBOff, outOff; + int32_t ne00, ne01, ne02, ne11, ne12; + uint32_t nb01, nb02; + uint32_t nb11, nb12; + uint32_t nb1, nb2; + } pushConsts { + safe_divide(inAOff, 2), safe_divide(inBOff, 4), safe_divide(outOff, 4), + ne00, ne01, ne02, ne11, ne12, + nb01, nb02, nb11, nb12, + nb1, nb2 + }; + + std::shared_ptr s_algo = nullptr; + if (!komputeManager()->hasAlgorithm(__func__)) { + s_algo = komputeManager()->algorithm(__func__, s_kompute_context->pool.get(), + {inA, inB, out}, spirv, + {unsigned(ne01), + unsigned(ne11), + unsigned(std::max(ne12, ne02)) + }, + {}, + {pushConsts}); + } else { + s_algo = komputeManager()->getAlgorithm(__func__); + s_algo->setTensors({inA, inB, out}); + s_algo->setWorkgroup({unsigned(ne01), + unsigned(ne11), + unsigned(std::max(ne12, ne02)), + }); + s_algo->setPushConstants({pushConsts}); + s_algo->updateDescriptors(s_kompute_context->pool.get()); + } + seq.record(s_algo); +} + + +void ggml_vk_mul_mat_mat_q8_0( + kp::Sequence& seq, + const std::shared_ptr& inA, + const std::shared_ptr& inB, + const std::shared_ptr& out, + uint32_t inAOff, uint32_t inBOff, uint32_t outOff, + int32_t ne00, int32_t ne01, int32_t ne02, + uint32_t nb01, uint32_t nb02, + int32_t ne11, int32_t ne12, + uint32_t nb11, uint32_t nb12, + uint32_t nb1, uint32_t nb2) { + const static auto spirv = getSpirvShader(kp::shader_data::op_mul_mat_mat_q8_0_comp_spv, + kp::shader_data::op_mul_mat_mat_q8_0_comp_spv_len); + struct PushConstants { + uint32_t inAOff, inBOff, outOff; + int32_t ne00, ne01, ne02, ne11, ne12; + uint32_t nb01, nb02; + uint32_t nb11, nb12; + uint32_t nb1, nb2; + } pushConsts { + inAOff, safe_divide(inBOff, 4), safe_divide(outOff, 4), + ne00, ne01, ne02, ne11, ne12, + nb01, nb02, nb11, nb12, + nb1, nb2 + }; + + std::shared_ptr s_algo = nullptr; + if (!komputeManager()->hasAlgorithm(__func__)) { + s_algo = komputeManager()->algorithm(__func__, s_kompute_context->pool.get(), + {inA, inB, out}, spirv, + {unsigned(ne01), + unsigned(ne11), + unsigned(std::max(ne12, ne02)) + }, + {}, + {pushConsts}); + } else { + s_algo = komputeManager()->getAlgorithm(__func__); + s_algo->setTensors({inA, inB, out}); + s_algo->setWorkgroup({unsigned(ne01), + unsigned(ne11), + unsigned(std::max(ne12, ne02)), + }); + s_algo->setPushConstants({pushConsts}); + s_algo->updateDescriptors(s_kompute_context->pool.get()); + } + seq.record(s_algo); +} + + +void ggml_vk_mul_mat_mat_q4_x(const std::vector& spirv, + kp::Sequence& seq, + const std::shared_ptr& inA, + const std::shared_ptr& inB, + const std::shared_ptr& out, + uint32_t inAOff, uint32_t inBOff, uint32_t outOff, + int32_t ne00, int32_t ne01, int32_t ne02, + uint32_t nb01, uint32_t nb02, + int32_t ne11, int32_t ne12, + uint32_t nb11, uint32_t nb12, + uint32_t nb1, uint32_t nb2) { + struct PushConstants { + uint32_t inAOff, inBOff, outOff; + int32_t ne00, ne01, ne02, ne11, ne12; + uint32_t nb01, nb02; + uint32_t nb11, nb12; + uint32_t nb1, nb2; + } pushConsts { + inAOff, safe_divide(inBOff, 4), safe_divide(outOff, 4), + ne00, ne01, ne02, ne11, ne12, + nb01, nb02, nb11, nb12, + nb1, nb2 + }; + + std::shared_ptr s_algo = nullptr; + if (!komputeManager()->hasAlgorithm(__func__)) { + s_algo = komputeManager()->algorithm(__func__, s_kompute_context->pool.get(), + {inA, inB, out}, spirv, + {unsigned(ne01), + unsigned(ne11), + unsigned(std::max(ne12, ne02))}, + {}, + {pushConsts}); + } else { + s_algo = komputeManager()->getAlgorithm(__func__); + s_algo->setTensors({inA, inB, out}); + s_algo->setWorkgroup({unsigned(ne01), + unsigned(ne11), + unsigned(std::max(ne12, ne02)), + }); + s_algo->setPushConstants({pushConsts}); + s_algo->updateDescriptors(s_kompute_context->pool.get()); + } + seq.record(s_algo); +} + + +template +void ggml_vk_mul_mat_mat_q4_0(Args&&... args) { + const static auto spirv = getSpirvShader(kp::shader_data::op_mul_mat_mat_q4_0_comp_spv, + kp::shader_data::op_mul_mat_mat_q4_0_comp_spv_len); + + ggml_vk_mul_mat_mat_q4_x(spirv, std::forward(args)...); +} + void ggml_vk_mul_mat_q4_x(const std::vector& spirv, uint32_t block_size, kp::Sequence& seq, const std::shared_ptr& inA, const std::shared_ptr& inB, @@ -1357,16 +1566,61 @@ void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml_cgraph case GGML_OP_MUL_MAT: { if (src1t != GGML_TYPE_F32) { - fprintf(stderr, "%s: %s: Unsupported quantization: %u/%u\n", __func__, ggml_op_name(dst->op), src0t, src1t); + fprintf(stderr, "%s: %s: Unsupported src1 type: %u/%u\n", __func__, ggml_op_name(dst->op), src0t, src1t); goto not_implemented; } if (!ggml_is_transposed(src0) && !ggml_is_transposed(src1) - && ne00%32 == 0 - && ne11 > 1) { - fprintf(stderr, "%s: %s: Unsupported quantization: %u/%u\n", __func__, ggml_op_name(dst->op), src0t, src1t); - goto not_implemented; + //&& ne00%32 == 0 + && ne11 > 1 + ) { + switch (src0t) { + case GGML_TYPE_F32: + ggml_vk_mul_mat_mat_f32(seq, + id_src0, id_src1, id_dst, + off_src0, off_src1, off_dst, + ne00, ne01, ne02, + nb01, nb02, + ne11, ne12, + nb11, nb12, + nb1, nb2); + break; + case GGML_TYPE_F16: + ggml_vk_mul_mat_mat_f16(seq, + id_src0, id_src1, id_dst, + off_src0, off_src1, off_dst, + ne00, ne01, ne02, + nb01, nb02, + ne11, ne12, + nb11, nb12, + nb1, nb2); + break; + case GGML_TYPE_Q4_0: + ggml_vk_mul_mat_mat_q4_0(seq, + id_src0, id_src1, id_dst, + off_src0, off_src1, off_dst, + ne00, ne01, ne02, + nb01, nb02, + ne11, ne12, + nb11, nb12, + nb1, nb2); + break; + case GGML_TYPE_Q8_0: + ggml_vk_mul_mat_mat_q8_0(seq, + id_src0, id_src1, id_dst, + off_src0, off_src1, off_dst, + ne00, ne01, ne02, + nb01, nb02, + ne11, ne12, + nb11, nb12, + nb1, nb2); + break; + default: { + fprintf(stderr, "%s: %s: Unsupported quantization for M*M: %u/%u\n", __func__, ggml_op_name(dst->op), src0t, src1t); + goto not_implemented; + } + } } else { switch (src0t) { case GGML_TYPE_F16: diff --git a/kompute/op_mul_mat_mat_f16.comp b/kompute/op_mul_mat_mat_f16.comp new file mode 100644 index 000000000..b62f06d10 --- /dev/null +++ b/kompute/op_mul_mat_mat_f16.comp @@ -0,0 +1,56 @@ +/** + * Copyright (c) 2023 Nomic, Inc. All rights reserved. + * + * This software is licensed under the terms of the Software for Open Models + * License (SOM), version 1.0, as detailed in the LICENSE_SOM.txt file. A copy + * of this license should accompany this software. Except as expressly granted + * in the SOM license, all rights are reserved by Nomic, Inc. + */ + +#version 450 + +#include "common.comp" + +#extension GL_KHR_shader_subgroup_arithmetic : require +#extension GL_EXT_debug_printf : enable + +// layout(local_size_x = 8) in; + +layout(binding = 0) readonly buffer tensorInA { float16_t inA[]; }; +layout(binding = 1) readonly buffer tensorInB { float inB[]; }; +layout(binding = 2) writeonly buffer tensorOut { float out_[]; }; + +layout(push_constant) uniform parameter { + uint inAOff; + uint inBOff; + uint outOff; + int ne00; + int ne01; + int ne02; + int ne11; + int ne12; + uint nb01; + uint nb02; + uint nb11; + uint nb12; + uint nb1; + uint nb2; +} +pcs; + + +void main() { + uvec3 gid = gl_GlobalInvocationID; + + uint bc_ab = pcs.ne12 > pcs.ne02 ? gid.z / (pcs.ne12 / pcs.ne02) : gid.z; + uint bc_ba = pcs.ne02 > pcs.ne12 ? gid.z / (pcs.ne02 / pcs.ne12) : gid.z; + + const uint x = (gid.x*pcs.nb01 + bc_ab*pcs.nb02) / 2 + pcs.inAOff; // Based from inA + const uint y = (gid.y*pcs.nb11 + bc_ba*pcs.nb12) / 4 + pcs.inBOff; // based from inB + float sum = 0.0f; + for (uint i = 0; i < pcs.ne00; i ++) { + sum += float(inA[x+i]) * float(inB[y+i]); + } + + out_[gid.z*(pcs.nb2/4) + gid.y*(pcs.nb1/4) + gid.x + pcs.outOff] = sum; +} \ No newline at end of file diff --git a/kompute/op_mul_mat_mat_f32.comp b/kompute/op_mul_mat_mat_f32.comp new file mode 100644 index 000000000..6234322ca --- /dev/null +++ b/kompute/op_mul_mat_mat_f32.comp @@ -0,0 +1,53 @@ +/** + * Copyright (c) 2023 Nomic, Inc. All rights reserved. + * + * This software is licensed under the terms of the Software for Open Models + * License (SOM), version 1.0, as detailed in the LICENSE_SOM.txt file. A copy + * of this license should accompany this software. Except as expressly granted + * in the SOM license, all rights are reserved by Nomic, Inc. + */ + +#version 450 + +#include "common.comp" + +#extension GL_KHR_shader_subgroup_arithmetic : require +#extension GL_EXT_debug_printf : enable + +// layout(local_size_x = 8) in; + +layout(binding = 0) readonly buffer tensorInA { float inA[]; }; +layout(binding = 1) readonly buffer tensorInB { float inB[]; }; +layout(binding = 2) writeonly buffer tensorOut { float out_[]; }; + +layout(push_constant) uniform parameter { + uint inAOff; + uint inBOff; + uint outOff; + int ne00; + int ne01; + int ne02; + int ne11; + int ne12; + uint nb01; + uint nb02; + uint nb11; + uint nb12; + uint nb1; + uint nb2; +} +pcs; + + +void main() { + uvec3 gid = gl_GlobalInvocationID; + + const uint x = (gid.x*pcs.nb01 + gid.z/(pcs.ne12/pcs.ne02)*pcs.nb02) / 4 + pcs.inAOff; // Based from inA + const uint y = (gid.y*pcs.nb11 + gid.z/(pcs.ne02/pcs.ne12)*pcs.nb12) / 4 + pcs.inBOff; // based from inB + float sum = 0.0f; + for (uint i = 0; i < pcs.ne00; i ++) { + sum += float(inA[x+i]) * float(inB[y+i]); + } + + out_[gid.z*(pcs.nb2/4) + gid.y*(pcs.nb1/4) + gid.x + pcs.outOff] = sum; +} diff --git a/kompute/op_mul_mat_mat_q4_0.comp b/kompute/op_mul_mat_mat_q4_0.comp new file mode 100644 index 000000000..93dcfdaed --- /dev/null +++ b/kompute/op_mul_mat_mat_q4_0.comp @@ -0,0 +1,77 @@ +/** + * Copyright (c) 2023 Nomic, Inc. All rights reserved. + * + * This software is licensed under the terms of the Software for Open Models + * License (SOM), version 1.0, as detailed in the LICENSE_SOM.txt file. A copy + * of this license should accompany this software. Except as expressly granted + * in the SOM license, all rights are reserved by Nomic, Inc. + */ + +#version 450 + +#include "common.comp" + +#extension GL_KHR_shader_subgroup_arithmetic : require +#extension GL_EXT_debug_printf : enable + +// layout(local_size_x = 8) in; + +layout(binding = 0) readonly buffer tensorInA { uint8_t inA[]; }; +layout(binding = 1) readonly buffer tensorInB { float inB[]; }; +layout(binding = 2) writeonly buffer tensorOut { float out_[]; }; + +layout(push_constant) uniform parameter { + uint inAOff; + uint inBOff; + uint outOff; + int ne00; + int ne01; + int ne02; + int ne11; + int ne12; + uint nb01; + uint nb02; + uint nb11; + uint nb12; + uint nb1; + uint nb2; +} +pcs; + +#define ELS_PER_BLOCK 32 +#define QS_OFFSET 2 +#define BLOCK_SIZE ((ELS_PER_BLOCK / 2) + QS_OFFSET) + +void main() { + uvec3 gid = gl_GlobalInvocationID; + + uint bc_ab = pcs.ne12 > pcs.ne02 ? gid.z / (pcs.ne12 / pcs.ne02) : gid.z; + uint bc_ba = pcs.ne02 > pcs.ne12 ? gid.z / (pcs.ne02 / pcs.ne12) : gid.z; + + + const uint x = (gid.x*pcs.nb01 + bc_ab*pcs.nb02) + pcs.inAOff; // Based from inA + const uint y = (gid.y*pcs.nb11 + bc_ba*pcs.nb12) / 4 + pcs.inBOff; // based from inB + float sum = 0.0f; + for (uint i = 0; i < pcs.ne00; i+=ELS_PER_BLOCK) { + for (uint j = 0; j < ELS_PER_BLOCK / 2; j++) { + const uint block_number = i / ELS_PER_BLOCK; + const uint block_offset = block_number * BLOCK_SIZE; + const float d = u8BufToFloat16(inA, x + block_offset); + const uint byte_position_in_block = j; + const int q0 = (inA[x+block_offset+QS_OFFSET+byte_position_in_block] & 0x0F) - 8; + const int q1 = (inA[x+block_offset+QS_OFFSET+byte_position_in_block] >> 4) - 8; + const float dq0 = d * q0; + const float dq1 = d * q1; + // if (gid.x == 0 && gid.y == 0 && gid.z == 0 && i < 4 && j < 4) { + // debugPrintfEXT("shp=%d,%d,%d gid=%d,%d,%d i=%d, d=%f, q0=%d, q1=%d, dqs=%f,%f\n", + // pcs.ne01, pcs.ne11, pcs.ne12, + // gid.x, gid.y, gid.z, i, d, q0, q1, dq0, dq1 + // ); + // } + sum += (dq0 * float(inB[y+i+j])) + \ + (dq1 * float(inB[y+i+j+(ELS_PER_BLOCK/2)])); + } + } + + out_[gid.z*(pcs.nb2/4) + gid.y*(pcs.nb1/4) + gid.x + pcs.outOff] = sum; +} \ No newline at end of file diff --git a/kompute/op_mul_mat_mat_q8_0.comp b/kompute/op_mul_mat_mat_q8_0.comp new file mode 100644 index 000000000..715e533e2 --- /dev/null +++ b/kompute/op_mul_mat_mat_q8_0.comp @@ -0,0 +1,66 @@ +/** + * Copyright (c) 2023 Nomic, Inc. All rights reserved. + * + * This software is licensed under the terms of the Software for Open Models + * License (SOM), version 1.0, as detailed in the LICENSE_SOM.txt file. A copy + * of this license should accompany this software. Except as expressly granted + * in the SOM license, all rights are reserved by Nomic, Inc. + */ + +#version 450 + +#include "common.comp" + +#extension GL_KHR_shader_subgroup_arithmetic : require +#extension GL_EXT_debug_printf : enable + +// layout(local_size_x = 8) in; + +layout(binding = 0) readonly buffer tensorInA { uint8_t inA[]; }; +layout(binding = 1) readonly buffer tensorInB { float inB[]; }; +layout(binding = 2) writeonly buffer tensorOut { float out_[]; }; + +layout(push_constant) uniform parameter { + uint inAOff; + uint inBOff; + uint outOff; + int ne00; + int ne01; + int ne02; + int ne11; + int ne12; + uint nb01; + uint nb02; + uint nb11; + uint nb12; + uint nb1; + uint nb2; +} +pcs; + +#define ELS_PER_BLOCK 32 +#define QS_OFFSET 2 // d +#define BLOCK_SIZE (ELS_PER_BLOCK + 2) + +void main() { + uvec3 gid = gl_GlobalInvocationID; + + uint bc_ab = pcs.ne12 > pcs.ne02 ? gid.z / (pcs.ne12 / pcs.ne02) : gid.z; + uint bc_ba = pcs.ne02 > pcs.ne12 ? gid.z / (pcs.ne02 / pcs.ne12) : gid.z; + + + const uint x = (gid.x*pcs.nb01 + bc_ab*pcs.nb02) + pcs.inAOff; // Based from inA + const uint y = (gid.y*pcs.nb11 + bc_ba*pcs.nb12) / 4 + pcs.inBOff; // based from inB + float sum = 0.0f; + for (uint i = 0; i < pcs.ne00; i++) { + const uint block_number = i / ELS_PER_BLOCK; + const uint block_offset = block_number * BLOCK_SIZE; + const float d = u8BufToFloat16(inA, x + block_offset); + const uint position_in_block = i % ELS_PER_BLOCK; + const int q0 = int8_t(inA[x+block_offset+QS_OFFSET+position_in_block]); + const float dq0 = d * q0; + sum += (dq0 * float(inB[y+i])); + } + + out_[gid.z*(pcs.nb2/4) + gid.y*(pcs.nb1/4) + gid.x + pcs.outOff] = sum; +} \ No newline at end of file diff --git a/llama.cpp b/llama.cpp index f5e0eac81..0ff459ba5 100644 --- a/llama.cpp +++ b/llama.cpp @@ -3855,7 +3855,7 @@ static bool llama_eval_internal( ggml_graph_compute_helper(lctx.work_buffer, gf, n_threads); } #elif defined(GGML_USE_KOMPUTE) - if (lctx.ctx_kompute && N == 1) { + if (lctx.ctx_kompute) { // && N == 1) { ggml_vk_graph_compute(lctx.ctx_kompute, gf); ggml_vk_d2h_tensor(lctx.ctx_kompute, res); } else {