From ff4212d20fcbc675106efb19c5278af60e18e97d Mon Sep 17 00:00:00 2001 From: Aaron Miller Date: Wed, 4 Oct 2023 21:02:17 -0700 Subject: [PATCH] q8 mat*vec --- CMakeLists.txt | 2 ++ ggml-vulkan.cpp | 41 +++++++++++++++++++++++ kompute/op_mul_mat_q8_0.comp | 64 ++++++++++++++++++++++++++++++++++++ 3 files changed, 107 insertions(+) create mode 100644 kompute/op_mul_mat_q8_0.comp diff --git a/CMakeLists.txt b/CMakeLists.txt index 2445d177c..c0538eb88 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -480,6 +480,7 @@ if (LLAMA_KOMPUTE) kompute/op_rmsnorm.comp kompute/op_diagmask.comp kompute/op_mul_mat_f16.comp + kompute/op_mul_mat_q8_0.comp kompute/op_mul_mat_q4_0.comp kompute/op_mul_mat_q4_1.comp kompute/op_mul_mat_q6_k.comp @@ -509,6 +510,7 @@ if (LLAMA_KOMPUTE) shaderop_rmsnorm.h shaderop_diagmask.h shaderop_mul_mat_f16.h + shaderop_mul_mat_q8_0.h shaderop_mul_mat_q4_0.h shaderop_mul_mat_q4_1.h shaderop_mul_mat_q6_k.h diff --git a/ggml-vulkan.cpp b/ggml-vulkan.cpp index bf732be32..59852c649 100644 --- a/ggml-vulkan.cpp +++ b/ggml-vulkan.cpp @@ -23,6 +23,7 @@ #include "shaderop_rmsnorm.h" #include "shaderop_diagmask.h" #include "shaderop_mul_mat_f16.h" +#include "shaderop_mul_mat_q8_0.h" #include "shaderop_mul_mat_q4_0.h" #include "shaderop_mul_mat_q4_1.h" #include "shaderop_mul_mat_q6_k.h" @@ -918,6 +919,43 @@ void ggml_vk_mul_mat_f16(kp::Sequence& seq, seq.record(s_algo); } +void ggml_vk_mul_mat_q8_0(kp::Sequence& seq, + const std::shared_ptr& inA, + const std::shared_ptr& inB, + const std::shared_ptr& out, + uint32_t inAOff, uint32_t inBOff, uint32_t outOff, + int32_t ne00, int32_t ne01, + uint32_t nb01, uint32_t nb02, + int32_t ne11, int32_t ne12, + uint32_t nb11, uint32_t nb12, + int32_t ne0, int32_t ne1) { + const static auto spirv = getSpirvShader(kp::shader_data::op_mul_mat_q8_0_comp_spv, + kp::shader_data::op_mul_mat_q8_0_comp_spv_len); + struct PushConstants { + uint32_t inAOff, inBOff, outOff; + int32_t ne00; + uint32_t nb01, nb02; + uint32_t nb11, nb12; + int32_t ne0, ne1; + } pushConsts { + safe_divide(inAOff, 2), safe_divide(inBOff, 4), safe_divide(outOff, 4), + ne00, nb01, nb02, nb11, nb12, ne0, ne1, + }; + + std::shared_ptr s_algo = nullptr; + if (!komputeManager()->hasAlgorithm(__func__)) { + const uint32_t local_x = ggml_vk_current_device().subgroupSize; + s_algo = komputeManager()->algorithm(__func__, s_kompute_context->pool.get(), {inA, inB, out}, spirv, {unsigned(ne01), unsigned(ne11), unsigned(ne12)}, {local_x}, {pushConsts}); + } else { + s_algo = komputeManager()->getAlgorithm(__func__); + s_algo->setTensors({inA, inB, out}); + s_algo->setWorkgroup({unsigned(ne01), unsigned(ne11), unsigned(ne12)}); + s_algo->setPushConstants({pushConsts}); + s_algo->updateDescriptors(s_kompute_context->pool.get()); + } + seq.record(s_algo); +} + void ggml_vk_mul_mat_q4_x(const std::vector& spirv, uint32_t block_size, kp::Sequence& seq, const std::shared_ptr& inA, const std::shared_ptr& inB, @@ -1335,6 +1373,9 @@ void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml_cgraph case GGML_TYPE_F32: ggml_vk_mul_mat_f16(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne01, ne02, nb01, nb02, ne11, ne12, nb11, nb12, ne0, ne1); break; + case GGML_TYPE_Q8_0: + ggml_vk_mul_mat_q8_0(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne01, nb01, nb02, ne11, ne12, nb11, nb12, ne0, ne1); + break; case GGML_TYPE_Q4_0: ggml_vk_mul_mat_q4_0(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne10, ne0, ne1, ne01, ne11, ne12, ne02); break; diff --git a/kompute/op_mul_mat_q8_0.comp b/kompute/op_mul_mat_q8_0.comp new file mode 100644 index 000000000..2ba48127b --- /dev/null +++ b/kompute/op_mul_mat_q8_0.comp @@ -0,0 +1,64 @@ +/** + * Copyright (c) 2023 Nomic, Inc. All rights reserved. + * + * This software is licensed under the terms of the Software for Open Models License (SOM), + * version 1.0, as detailed in the LICENSE_SOM.txt file. A copy of this license should accompany + * this software. Except as expressly granted in the SOM license, all rights are reserved by Nomic, Inc. + */ + +#version 450 + +#include "common.comp" + +#define BLOCKS_IN_QUANT QK8_0 +#define SIZE_OF_BLOCK sizeof_block_q8_0 +#define N_ROWS 4 + +layout(local_size_x_id = 0) in; +layout(local_size_y = 1) in; +layout(local_size_z = 1) in; + +layout (binding = 0) readonly buffer tensorInA { uint8_t inA[]; }; +layout (binding = 1) readonly buffer tensorInB { float inB[]; }; +layout (binding = 2) writeonly buffer tensorOut { float out_[]; }; + +layout (push_constant) uniform parameter { + uint inAOff; + uint inBOff; + uint outOff; + int ne00; + int ne10; + int ne0; + int ne1; + int ne01; + int gqa; +} pcs; + +#define ELS_PER_BLOCK 32 +#define SIZE_OF_D 2 +#define BLOCK_SIZE (ELS_PER_BLOCK + SIZE_OF_D) + +void main() { + const uint r0 = gl_WorkGroupID.x; + const uint r1 = gl_WorkGroupID.y; + const uint im = gl_WorkGroupID.z; + + const uint x = r0 * (pcs.ne00/ELS_PER_BLOCK) * BLOCK_SIZE + pcs.inAOff; // Based from inA + const uint y = r1 * pcs.ne10 + pcs.inBOff; // based from inB + + float sumf = 0.0f; + for (uint i = gl_SubgroupInvocationID.x; i < pcs.ne00; i += gl_SubgroupSize) { + const uint block_number = i / ELS_PER_BLOCK; + const uint block_offset = block_number * BLOCK_SIZE; + const float d = u8BufToFloat16(inA, x + block_offset); + const uint position_in_block = i % ELS_PER_BLOCK; + const int q = int8_t(inA[x+block_offset+SIZE_OF_D+position_in_block]); + const float dq = d * q; + sumf += dq * float(inB[y+i]); + } + + const float all_sum = subgroupAdd(sumf); + if (subgroupElect()) { + out_[im*pcs.ne1*pcs.ne0 + r1*pcs.ne0 + r0 + pcs.outOff] = all_sum; + } +}