mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-29 04:44:34 +00:00
use mat*vec shaders for mat*mat
I wrote the mat*mat shaders from scratch so I understand them better but they are currently not faster than just multiply-invoking the mat*vec shaders, by a significant degree - so, except for f32 which needed a new shader, revert to the m*v ones here.
This commit is contained in:
parent
c1fd64548d
commit
cc05a602d6
@ -479,12 +479,7 @@ if (LLAMA_KOMPUTE)
|
||||
kompute/op_norm.comp
|
||||
kompute/op_rmsnorm.comp
|
||||
kompute/op_diagmask.comp
|
||||
kompute/op_mul_mat_mat_f16.comp
|
||||
kompute/op_mul_mat_mat_f32.comp
|
||||
kompute/op_mul_mat_mat_q4_0.comp
|
||||
kompute/op_mul_mat_mat_q4_1.comp
|
||||
kompute/op_mul_mat_mat_q8_0.comp
|
||||
kompute/op_mul_mat_mat_q6_k.comp
|
||||
kompute/op_mul_mat_f16.comp
|
||||
kompute/op_mul_mat_q8_0.comp
|
||||
kompute/op_mul_mat_q4_0.comp
|
||||
@ -515,12 +510,7 @@ if (LLAMA_KOMPUTE)
|
||||
shaderop_norm.h
|
||||
shaderop_rmsnorm.h
|
||||
shaderop_diagmask.h
|
||||
shaderop_mul_mat_mat_f16.h
|
||||
shaderop_mul_mat_mat_f32.h
|
||||
shaderop_mul_mat_mat_q4_0.h
|
||||
shaderop_mul_mat_mat_q4_1.h
|
||||
shaderop_mul_mat_mat_q8_0.h
|
||||
shaderop_mul_mat_mat_q6_k.h
|
||||
shaderop_mul_mat_f16.h
|
||||
shaderop_mul_mat_q8_0.h
|
||||
shaderop_mul_mat_q4_0.h
|
||||
|
330
ggml-vulkan.cpp
330
ggml-vulkan.cpp
@ -28,11 +28,6 @@
|
||||
#include "shaderop_mul_mat_q4_1.h"
|
||||
#include "shaderop_mul_mat_q6_k.h"
|
||||
#include "shaderop_mul_mat_mat_f32.h"
|
||||
#include "shaderop_mul_mat_mat_f16.h"
|
||||
#include "shaderop_mul_mat_mat_q4_0.h"
|
||||
#include "shaderop_mul_mat_mat_q4_1.h"
|
||||
#include "shaderop_mul_mat_mat_q8_0.h"
|
||||
#include "shaderop_mul_mat_mat_q6_k.h"
|
||||
#include "shaderop_getrows_f16.h"
|
||||
#include "shaderop_getrows_q4_0.h"
|
||||
#include "shaderop_getrows_q4_1.h"
|
||||
@ -1013,219 +1008,6 @@ void ggml_vk_mul_mat_mat_f32(kp::Sequence& seq,
|
||||
seq.record<kp::OpAlgoDispatch>(s_algo);
|
||||
}
|
||||
|
||||
void ggml_vk_mul_mat_mat_f16(kp::Sequence& seq,
|
||||
const std::shared_ptr<kp::Tensor>& inA,
|
||||
const std::shared_ptr<kp::Tensor>& inB,
|
||||
const std::shared_ptr<kp::Tensor>& out,
|
||||
uint32_t inAOff, uint32_t inBOff, uint32_t outOff,
|
||||
int32_t ne00, int32_t ne01, int32_t ne02,
|
||||
uint32_t nb01, uint32_t nb02,
|
||||
int32_t ne11, int32_t ne12,
|
||||
uint32_t nb11, uint32_t nb12,
|
||||
uint32_t nb1, uint32_t nb2) {
|
||||
const static auto spirv = getSpirvShader(kp::shader_data::op_mul_mat_mat_f16_comp_spv,
|
||||
kp::shader_data::op_mul_mat_mat_f16_comp_spv_len);
|
||||
|
||||
struct PushConstants {
|
||||
uint32_t inAOff, inBOff, outOff;
|
||||
int32_t ne00, ne01, ne02, ne11, ne12;
|
||||
uint32_t nb01, nb02;
|
||||
uint32_t nb11, nb12;
|
||||
uint32_t nb1, nb2;
|
||||
} pushConsts {
|
||||
safe_divide(inAOff, 2), safe_divide(inBOff, 4), safe_divide(outOff, 4),
|
||||
ne00, ne01, ne02, ne11, ne12,
|
||||
nb01, nb02, nb11, nb12,
|
||||
nb1, nb2
|
||||
};
|
||||
|
||||
const uint32_t local_x = ggml_vk_current_device().subgroupSize;
|
||||
std::shared_ptr<kp::Algorithm> s_algo = nullptr;
|
||||
if (!komputeManager()->hasAlgorithm(__func__)) {
|
||||
s_algo = komputeManager()->algorithm<uint32_t, PushConstants>(__func__, s_kompute_context->pool.get(),
|
||||
{inA, inB, out}, spirv,
|
||||
{unsigned(ne01),
|
||||
unsigned(ne11),
|
||||
unsigned(std::max(ne12, ne02))
|
||||
},
|
||||
{local_x},
|
||||
{pushConsts});
|
||||
} else {
|
||||
s_algo = komputeManager()->getAlgorithm(__func__);
|
||||
s_algo->setTensors({inA, inB, out});
|
||||
s_algo->setWorkgroup({unsigned(ne01),
|
||||
unsigned(ne11),
|
||||
unsigned(std::max(ne12, ne02)),
|
||||
});
|
||||
s_algo->setPushConstants<PushConstants>({pushConsts});
|
||||
s_algo->updateDescriptors(s_kompute_context->pool.get());
|
||||
}
|
||||
seq.record<kp::OpAlgoDispatch>(s_algo);
|
||||
}
|
||||
|
||||
|
||||
void ggml_vk_mul_mat_mat_q8_0(
|
||||
kp::Sequence& seq,
|
||||
const std::shared_ptr<kp::Tensor>& inA,
|
||||
const std::shared_ptr<kp::Tensor>& inB,
|
||||
const std::shared_ptr<kp::Tensor>& out,
|
||||
uint32_t inAOff, uint32_t inBOff, uint32_t outOff,
|
||||
int32_t ne00, int32_t ne01, int32_t ne02,
|
||||
uint32_t nb01, uint32_t nb02,
|
||||
int32_t ne11, int32_t ne12,
|
||||
uint32_t nb11, uint32_t nb12,
|
||||
uint32_t nb1, uint32_t nb2) {
|
||||
const static auto spirv = getSpirvShader(kp::shader_data::op_mul_mat_mat_q8_0_comp_spv,
|
||||
kp::shader_data::op_mul_mat_mat_q8_0_comp_spv_len);
|
||||
struct PushConstants {
|
||||
uint32_t inAOff, inBOff, outOff;
|
||||
int32_t ne00, ne01, ne02, ne11, ne12;
|
||||
uint32_t nb01, nb02;
|
||||
uint32_t nb11, nb12;
|
||||
uint32_t nb1, nb2;
|
||||
} pushConsts {
|
||||
inAOff, safe_divide(inBOff, 4), safe_divide(outOff, 4),
|
||||
ne00, ne01, ne02, ne11, ne12,
|
||||
nb01, nb02, nb11, nb12,
|
||||
nb1, nb2
|
||||
};
|
||||
|
||||
std::shared_ptr<kp::Algorithm> s_algo = nullptr;
|
||||
if (!komputeManager()->hasAlgorithm(__func__)) {
|
||||
s_algo = komputeManager()->algorithm<float, PushConstants>(__func__, s_kompute_context->pool.get(),
|
||||
{inA, inB, out}, spirv,
|
||||
{unsigned(ne01),
|
||||
unsigned(ne11),
|
||||
unsigned(std::max(ne12, ne02))
|
||||
},
|
||||
{},
|
||||
{pushConsts});
|
||||
} else {
|
||||
s_algo = komputeManager()->getAlgorithm(__func__);
|
||||
s_algo->setTensors({inA, inB, out});
|
||||
s_algo->setWorkgroup({unsigned(ne01),
|
||||
unsigned(ne11),
|
||||
unsigned(std::max(ne12, ne02)),
|
||||
});
|
||||
s_algo->setPushConstants<PushConstants>({pushConsts});
|
||||
s_algo->updateDescriptors(s_kompute_context->pool.get());
|
||||
}
|
||||
seq.record<kp::OpAlgoDispatch>(s_algo);
|
||||
}
|
||||
|
||||
void ggml_vk_mul_mat_mat_q6_k(
|
||||
kp::Sequence& seq,
|
||||
const std::shared_ptr<kp::Tensor>& inA,
|
||||
const std::shared_ptr<kp::Tensor>& inB,
|
||||
const std::shared_ptr<kp::Tensor>& out,
|
||||
uint32_t inAOff, uint32_t inBOff, uint32_t outOff,
|
||||
int32_t ne00, int32_t ne01, int32_t ne02,
|
||||
uint32_t nb01, uint32_t nb02,
|
||||
int32_t ne11, int32_t ne12,
|
||||
uint32_t nb11, uint32_t nb12,
|
||||
uint32_t nb1, uint32_t nb2) {
|
||||
const static auto spirv = getSpirvShader(kp::shader_data::op_mul_mat_mat_q6_k_comp_spv,
|
||||
kp::shader_data::op_mul_mat_mat_q6_k_comp_spv_len);
|
||||
struct PushConstants {
|
||||
uint32_t inAOff, inBOff, outOff;
|
||||
int32_t ne00, ne01, ne02, ne11, ne12;
|
||||
uint32_t nb01, nb02;
|
||||
uint32_t nb11, nb12;
|
||||
uint32_t nb1, nb2;
|
||||
} pushConsts {
|
||||
inAOff, safe_divide(inBOff, 4), safe_divide(outOff, 4),
|
||||
ne00, ne01, ne02, ne11, ne12,
|
||||
nb01, nb02, nb11, nb12,
|
||||
nb1, nb2
|
||||
};
|
||||
|
||||
std::shared_ptr<kp::Algorithm> s_algo = nullptr;
|
||||
if (!komputeManager()->hasAlgorithm(__func__)) {
|
||||
s_algo = komputeManager()->algorithm<float, PushConstants>(__func__, s_kompute_context->pool.get(),
|
||||
{inA, inB, out}, spirv,
|
||||
{unsigned(ne01)/256,
|
||||
unsigned(ne11),
|
||||
unsigned(std::max(ne12, ne02))
|
||||
},
|
||||
{},
|
||||
{pushConsts});
|
||||
} else {
|
||||
s_algo = komputeManager()->getAlgorithm(__func__);
|
||||
s_algo->setTensors({inA, inB, out});
|
||||
s_algo->setWorkgroup({unsigned(ne01)/256,
|
||||
unsigned(ne11),
|
||||
unsigned(std::max(ne12, ne02)),
|
||||
});
|
||||
s_algo->setPushConstants<PushConstants>({pushConsts});
|
||||
s_algo->updateDescriptors(s_kompute_context->pool.get());
|
||||
}
|
||||
seq.record<kp::OpAlgoDispatch>(s_algo);
|
||||
}
|
||||
|
||||
void ggml_vk_mul_mat_mat_q4_x(const std::vector<uint32_t>& spirv,
|
||||
kp::Sequence& seq,
|
||||
const std::shared_ptr<kp::Tensor>& inA,
|
||||
const std::shared_ptr<kp::Tensor>& inB,
|
||||
const std::shared_ptr<kp::Tensor>& out,
|
||||
uint32_t inAOff, uint32_t inBOff, uint32_t outOff,
|
||||
int32_t ne00, int32_t ne01, int32_t ne02,
|
||||
uint32_t nb01, uint32_t nb02,
|
||||
int32_t ne11, int32_t ne12,
|
||||
uint32_t nb11, uint32_t nb12,
|
||||
uint32_t nb1, uint32_t nb2) {
|
||||
struct PushConstants {
|
||||
uint32_t inAOff, inBOff, outOff;
|
||||
int32_t ne00, ne01, ne02, ne11, ne12;
|
||||
uint32_t nb01, nb02;
|
||||
uint32_t nb11, nb12;
|
||||
uint32_t nb1, nb2;
|
||||
} pushConsts {
|
||||
inAOff, safe_divide(inBOff, 4), safe_divide(outOff, 4),
|
||||
ne00, ne01, ne02, ne11, ne12,
|
||||
nb01, nb02, nb11, nb12,
|
||||
nb1, nb2
|
||||
};
|
||||
|
||||
std::shared_ptr<kp::Algorithm> s_algo = nullptr;
|
||||
if (!komputeManager()->hasAlgorithm(__func__)) {
|
||||
const uint32_t local_x = ggml_vk_current_device().subgroupSize;
|
||||
s_algo = komputeManager()->algorithm<uint32_t, PushConstants>(__func__, s_kompute_context->pool.get(),
|
||||
{inA, inB, out}, spirv,
|
||||
{unsigned(ne01),
|
||||
unsigned(ne11),
|
||||
unsigned(std::max(ne12, ne02))},
|
||||
{local_x, 1},
|
||||
{pushConsts});
|
||||
} else {
|
||||
s_algo = komputeManager()->getAlgorithm(__func__);
|
||||
s_algo->setTensors({inA, inB, out});
|
||||
s_algo->setWorkgroup({unsigned(ne01),
|
||||
unsigned(ne11),
|
||||
unsigned(std::max(ne12, ne02)),
|
||||
});
|
||||
s_algo->setPushConstants<PushConstants>({pushConsts});
|
||||
s_algo->updateDescriptors(s_kompute_context->pool.get());
|
||||
}
|
||||
seq.record<kp::OpAlgoDispatch>(s_algo);
|
||||
}
|
||||
|
||||
|
||||
template <typename... Args>
|
||||
void ggml_vk_mul_mat_mat_q4_0(Args&&... args) {
|
||||
const static auto spirv = getSpirvShader(kp::shader_data::op_mul_mat_mat_q4_0_comp_spv,
|
||||
kp::shader_data::op_mul_mat_mat_q4_0_comp_spv_len);
|
||||
|
||||
ggml_vk_mul_mat_mat_q4_x(spirv, std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
template <typename... Args>
|
||||
void ggml_vk_mul_mat_mat_q4_1(Args&&... args) {
|
||||
const static auto spirv = getSpirvShader(kp::shader_data::op_mul_mat_mat_q4_1_comp_spv,
|
||||
kp::shader_data::op_mul_mat_mat_q4_1_comp_spv_len);
|
||||
|
||||
ggml_vk_mul_mat_mat_q4_x(spirv, std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
void ggml_vk_mul_mat_q4_x(const std::vector<uint32_t>& spirv, uint32_t block_size, kp::Sequence& seq,
|
||||
const std::shared_ptr<kp::Tensor>& inA,
|
||||
const std::shared_ptr<kp::Tensor>& inB,
|
||||
@ -1635,54 +1417,15 @@ void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml_cgraph
|
||||
goto not_implemented;
|
||||
}
|
||||
|
||||
if (!ggml_is_transposed(src0)
|
||||
&& !ggml_is_transposed(src1)
|
||||
//&& ne00%32 == 0
|
||||
&& ne11 > 1
|
||||
) {
|
||||
switch (src0t) {
|
||||
case GGML_TYPE_F32:
|
||||
ggml_vk_mul_mat_mat_f32(seq,
|
||||
id_src0, id_src1, id_dst,
|
||||
off_src0, off_src1, off_dst,
|
||||
ne00, ne01, ne02,
|
||||
nb01, nb02,
|
||||
ne11, ne12,
|
||||
nb11, nb12,
|
||||
nb1, nb2);
|
||||
break;
|
||||
case GGML_TYPE_F16:
|
||||
ggml_vk_mul_mat_mat_f16(seq,
|
||||
id_src0, id_src1, id_dst,
|
||||
off_src0, off_src1, off_dst,
|
||||
ne00, ne01, ne02,
|
||||
nb01, nb02,
|
||||
ne11, ne12,
|
||||
nb11, nb12,
|
||||
nb1, nb2);
|
||||
break;
|
||||
case GGML_TYPE_Q4_0:
|
||||
ggml_vk_mul_mat_mat_q4_0(seq,
|
||||
id_src0, id_src1, id_dst,
|
||||
off_src0, off_src1, off_dst,
|
||||
ne00, ne01, ne02,
|
||||
nb01, nb02,
|
||||
ne11, ne12,
|
||||
nb11, nb12,
|
||||
nb1, nb2);
|
||||
break;
|
||||
case GGML_TYPE_Q4_1:
|
||||
ggml_vk_mul_mat_mat_q4_1(seq,
|
||||
id_src0, id_src1, id_dst,
|
||||
off_src0, off_src1, off_dst,
|
||||
ne00, ne01, ne02,
|
||||
nb01, nb02,
|
||||
ne11, ne12,
|
||||
nb11, nb12,
|
||||
nb1, nb2);
|
||||
break;
|
||||
case GGML_TYPE_Q8_0:
|
||||
ggml_vk_mul_mat_mat_q8_0(seq,
|
||||
if (ggml_is_transposed(src0) ||
|
||||
ggml_is_transposed(src1)) {
|
||||
fprintf(stderr, "%s: %s: matmul on tranposed tensor not supported: %u/%u\n", __func__, ggml_op_name(dst->op), src0t, src1t);
|
||||
goto not_implemented;
|
||||
}
|
||||
|
||||
switch (src0t) {
|
||||
case GGML_TYPE_F32:
|
||||
ggml_vk_mul_mat_mat_f32(seq,
|
||||
id_src0, id_src1, id_dst,
|
||||
off_src0, off_src1, off_dst,
|
||||
ne00, ne01, ne02,
|
||||
@ -1690,46 +1433,27 @@ void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml_cgraph
|
||||
ne11, ne12,
|
||||
nb11, nb12,
|
||||
nb1, nb2);
|
||||
case GGML_TYPE_F16:
|
||||
ggml_vk_mul_mat_f16(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne01, ne02, nb01, nb02, ne11, ne12, nb11, nb12, ne0, ne1);
|
||||
break;
|
||||
case GGML_TYPE_Q6_K:
|
||||
ggml_vk_mul_mat_mat_q6_k(seq,
|
||||
id_src0, id_src1, id_dst,
|
||||
off_src0, off_src1, off_dst,
|
||||
ne00, ne01, ne02,
|
||||
nb01, nb02,
|
||||
ne11, ne12,
|
||||
nb11, nb12,
|
||||
nb1, nb2);
|
||||
break;
|
||||
default: {
|
||||
fprintf(stderr, "%s: %s: Unsupported quantization for M*M: %u/%u\n", __func__, ggml_op_name(dst->op), src0t, src1t);
|
||||
goto not_implemented;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
switch (src0t) {
|
||||
case GGML_TYPE_F16:
|
||||
case GGML_TYPE_F32:
|
||||
ggml_vk_mul_mat_f16(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne01, ne02, nb01, nb02, ne11, ne12, nb11, nb12, ne0, ne1);
|
||||
break;
|
||||
case GGML_TYPE_Q8_0:
|
||||
ggml_vk_mul_mat_q8_0(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne01, nb01, nb02, ne11, ne12, nb11, nb12, ne0, ne1);
|
||||
break;
|
||||
case GGML_TYPE_Q4_0:
|
||||
ggml_vk_mul_mat_q4_0(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne10, ne0, ne1, ne01, ne11, ne12, ne02);
|
||||
break;
|
||||
case GGML_TYPE_Q4_1:
|
||||
ggml_vk_mul_mat_q4_1(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne10, ne0, ne1, ne01, ne11, ne12, ne02);
|
||||
break;
|
||||
case GGML_TYPE_Q6_K:
|
||||
ggml_vk_mul_mat_q6_k(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne10, ne0, ne1, ne01, ne11, ne12, ne02);
|
||||
break;
|
||||
default: {
|
||||
fprintf(stderr, "%s: %s: Unsupported quantization: %u/%u\n", __func__, ggml_op_name(dst->op), src0t, src1t);
|
||||
goto not_implemented;
|
||||
}
|
||||
case GGML_TYPE_Q8_0:
|
||||
ggml_vk_mul_mat_q8_0(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne01, nb01, nb02, ne11, ne12, nb11, nb12, ne0, ne1);
|
||||
break;
|
||||
case GGML_TYPE_Q4_0:
|
||||
ggml_vk_mul_mat_q4_0(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne10, ne0, ne1, ne01, ne11, ne12, ne02);
|
||||
break;
|
||||
case GGML_TYPE_Q4_1:
|
||||
ggml_vk_mul_mat_q4_1(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne10, ne0, ne1, ne01, ne11, ne12, ne02);
|
||||
break;
|
||||
case GGML_TYPE_Q6_K:
|
||||
ggml_vk_mul_mat_q6_k(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne10, ne0, ne1, ne01, ne11, ne12, ne02);
|
||||
break;
|
||||
default: {
|
||||
fprintf(stderr, "%s: %s: Unsupported quantization: %u/%u\n", __func__, ggml_op_name(dst->op), src0t, src1t);
|
||||
goto not_implemented;
|
||||
}
|
||||
}
|
||||
|
||||
} break;
|
||||
case GGML_OP_GET_ROWS:
|
||||
{
|
||||
|
@ -1,60 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2023 Nomic, Inc. All rights reserved.
|
||||
*
|
||||
* This software is licensed under the terms of the Software for Open Models
|
||||
* License (SOM), version 1.0, as detailed in the LICENSE_SOM.txt file. A copy
|
||||
* of this license should accompany this software. Except as expressly granted
|
||||
* in the SOM license, all rights are reserved by Nomic, Inc.
|
||||
*/
|
||||
|
||||
#version 450
|
||||
|
||||
#include "common.comp"
|
||||
|
||||
#extension GL_KHR_shader_subgroup_arithmetic : require
|
||||
#extension GL_EXT_debug_printf : enable
|
||||
|
||||
// device subgroup size
|
||||
layout (local_size_x_id = 0) in;
|
||||
|
||||
layout(binding = 0) readonly buffer tensorInA { float16_t inA[]; };
|
||||
layout(binding = 1) readonly buffer tensorInB { float inB[]; };
|
||||
layout(binding = 2) writeonly buffer tensorOut { float out_[]; };
|
||||
|
||||
layout(push_constant) uniform parameter {
|
||||
uint inAOff;
|
||||
uint inBOff;
|
||||
uint outOff;
|
||||
int ne00;
|
||||
int ne01;
|
||||
int ne02;
|
||||
int ne11;
|
||||
int ne12;
|
||||
uint nb01;
|
||||
uint nb02;
|
||||
uint nb11;
|
||||
uint nb12;
|
||||
uint nb1;
|
||||
uint nb2;
|
||||
}
|
||||
pcs;
|
||||
|
||||
|
||||
void main() {
|
||||
uvec3 gid = gl_WorkGroupID;
|
||||
|
||||
uint bc_ab = pcs.ne12 > pcs.ne02 ? gid.z / (pcs.ne12 / pcs.ne02) : gid.z;
|
||||
uint bc_ba = pcs.ne02 > pcs.ne12 ? gid.z / (pcs.ne02 / pcs.ne12) : gid.z;
|
||||
|
||||
const uint x = (gid.x*pcs.nb01 + bc_ab*pcs.nb02) / 2 + pcs.inAOff; // Based from inA
|
||||
const uint y = (gid.y*pcs.nb11 + bc_ba*pcs.nb12) / 4 + pcs.inBOff; // based from inB
|
||||
float sum = 0.0f;
|
||||
for (uint i = gl_SubgroupInvocationID.x; i < pcs.ne00; i += gl_SubgroupSize) {
|
||||
sum += float(inA[x+i]) * float(inB[y+i]);
|
||||
}
|
||||
|
||||
const float all_sum = subgroupAdd(sum);
|
||||
if (subgroupElect()) {
|
||||
out_[gid.z*(pcs.nb2/4) + gid.y*(pcs.nb1/4) + gid.x + pcs.outOff] = all_sum;
|
||||
}
|
||||
}
|
@ -1,77 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2023 Nomic, Inc. All rights reserved.
|
||||
*
|
||||
* This software is licensed under the terms of the Software for Open Models
|
||||
* License (SOM), version 1.0, as detailed in the LICENSE_SOM.txt file. A copy
|
||||
* of this license should accompany this software. Except as expressly granted
|
||||
* in the SOM license, all rights are reserved by Nomic, Inc.
|
||||
*/
|
||||
|
||||
#version 450
|
||||
|
||||
#include "common.comp"
|
||||
|
||||
#extension GL_KHR_shader_subgroup_arithmetic : require
|
||||
#extension GL_EXT_debug_printf : enable
|
||||
|
||||
layout (local_size_x_id = 0) in;
|
||||
layout (local_size_y_id = 1) in;
|
||||
layout (constant_id = 1) const uint nsg = 2;
|
||||
|
||||
layout(binding = 0) readonly buffer tensorInA { uint8_t inA[]; };
|
||||
layout(binding = 1) readonly buffer tensorInB { float inB[]; };
|
||||
layout(binding = 2) writeonly buffer tensorOut { float out_[]; };
|
||||
|
||||
layout(push_constant) uniform parameter {
|
||||
uint inAOff;
|
||||
uint inBOff;
|
||||
uint outOff;
|
||||
int ne00;
|
||||
int ne01;
|
||||
int ne02;
|
||||
int ne11;
|
||||
int ne12;
|
||||
uint nb01;
|
||||
uint nb02;
|
||||
uint nb11;
|
||||
uint nb12;
|
||||
uint nb1;
|
||||
uint nb2;
|
||||
}
|
||||
pcs;
|
||||
|
||||
const uint els_per_block = 32;
|
||||
const uint qs_offset = 2;
|
||||
const uint block_size = (els_per_block / 2) + qs_offset;
|
||||
|
||||
|
||||
void main() {
|
||||
uvec3 gid = gl_WorkGroupID;
|
||||
uvec3 lid = gl_LocalInvocationID;
|
||||
gid.y = gid.y * nsg + lid.y;
|
||||
|
||||
uint bc_ab = pcs.ne12 > pcs.ne02 ? gid.z / (pcs.ne12 / pcs.ne02) : gid.z;
|
||||
uint bc_ba = pcs.ne02 > pcs.ne12 ? gid.z / (pcs.ne02 / pcs.ne12) : gid.z;
|
||||
|
||||
const uint x = (gid.x*pcs.nb01 + bc_ab*pcs.nb02) + pcs.inAOff; // Based from inA
|
||||
const uint y = (gid.y*pcs.nb11 + bc_ba*pcs.nb12) / 4 + pcs.inBOff; // based from inB
|
||||
float sum = 0.0f;
|
||||
for (uint i = gl_SubgroupInvocationID * 2; i < pcs.ne00; i+=gl_SubgroupSize * 2) {
|
||||
const uint block_number = i / els_per_block;
|
||||
const uint block_offset = block_number * block_size;
|
||||
const float d = u8BufToFloat16(inA, x + block_offset);
|
||||
const uint j = (i % els_per_block) / 2;
|
||||
const uint byte_position_in_block = j;
|
||||
const int q0 = (inA[x+block_offset+qs_offset+byte_position_in_block] & 0x0F) - 8;
|
||||
const int q1 = (inA[x+block_offset+qs_offset+byte_position_in_block] >> 4) - 8;
|
||||
const float dq0 = d * q0;
|
||||
const float dq1 = d * q1;
|
||||
const uint block_base = block_number * els_per_block;
|
||||
sum += (dq0 * float(inB[y+block_base+j])) + \
|
||||
(dq1 * float(inB[y+block_base+j+(els_per_block/2)]));
|
||||
}
|
||||
|
||||
const float all_sum = subgroupAdd(sum);
|
||||
if (subgroupElect())
|
||||
out_[gid.z*(pcs.nb2/4) + gid.y*(pcs.nb1/4) + gid.x + pcs.outOff] = all_sum;
|
||||
}
|
@ -1,73 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2023 Nomic, Inc. All rights reserved.
|
||||
*
|
||||
* This software is licensed under the terms of the Software for Open Models
|
||||
* License (SOM), version 1.0, as detailed in the LICENSE_SOM.txt file. A copy
|
||||
* of this license should accompany this software. Except as expressly granted
|
||||
* in the SOM license, all rights are reserved by Nomic, Inc.
|
||||
*/
|
||||
|
||||
#version 450
|
||||
|
||||
#include "common.comp"
|
||||
|
||||
#extension GL_KHR_shader_subgroup_arithmetic : require
|
||||
#extension GL_EXT_debug_printf : enable
|
||||
|
||||
layout(local_size_x = 32) in;
|
||||
|
||||
layout(binding = 0) readonly buffer tensorInA { uint8_t inA[]; };
|
||||
layout(binding = 1) readonly buffer tensorInB { float inB[]; };
|
||||
layout(binding = 2) writeonly buffer tensorOut { float out_[]; };
|
||||
|
||||
layout(push_constant) uniform parameter {
|
||||
uint inAOff;
|
||||
uint inBOff;
|
||||
uint outOff;
|
||||
int ne00;
|
||||
int ne01;
|
||||
int ne02;
|
||||
int ne11;
|
||||
int ne12;
|
||||
uint nb01;
|
||||
uint nb02;
|
||||
uint nb11;
|
||||
uint nb12;
|
||||
uint nb1;
|
||||
uint nb2;
|
||||
}
|
||||
pcs;
|
||||
|
||||
#define ELS_PER_BLOCK 32
|
||||
#define M_OFFSET 2
|
||||
#define QS_OFFSET 4
|
||||
#define BLOCK_SIZE ((ELS_PER_BLOCK / 2) + QS_OFFSET)
|
||||
|
||||
void main() {
|
||||
uvec3 gid = gl_GlobalInvocationID;
|
||||
|
||||
uint bc_ab = pcs.ne12 > pcs.ne02 ? gid.z / (pcs.ne12 / pcs.ne02) : gid.z;
|
||||
uint bc_ba = pcs.ne02 > pcs.ne12 ? gid.z / (pcs.ne02 / pcs.ne12) : gid.z;
|
||||
|
||||
|
||||
const uint x = (gid.x*pcs.nb01 + bc_ab*pcs.nb02) + pcs.inAOff; // Based from inA
|
||||
const uint y = (gid.y*pcs.nb11 + bc_ba*pcs.nb12) / 4 + pcs.inBOff; // based from inB
|
||||
float sum = 0.0f;
|
||||
for (uint i = 0; i < pcs.ne00; i+=ELS_PER_BLOCK) {
|
||||
for (uint j = 0; j < ELS_PER_BLOCK / 2; j++) {
|
||||
const uint block_number = i / ELS_PER_BLOCK;
|
||||
const uint block_offset = block_number * BLOCK_SIZE;
|
||||
const float d = u8BufToFloat16(inA, x + block_offset);
|
||||
const float m = u8BufToFloat16(inA, x + block_offset + M_OFFSET);
|
||||
const uint byte_position_in_block = j;
|
||||
const int q0 = (inA[x+block_offset+QS_OFFSET+byte_position_in_block] & 0x0F);
|
||||
const int q1 = (inA[x+block_offset+QS_OFFSET+byte_position_in_block] >> 4);
|
||||
const float dq0 = (d * q0) + m;
|
||||
const float dq1 = (d * q1) + m;
|
||||
sum += (dq0 * float(inB[y+i+j])) + \
|
||||
(dq1 * float(inB[y+i+j+(ELS_PER_BLOCK/2)]));
|
||||
}
|
||||
}
|
||||
|
||||
out_[gid.z*(pcs.nb2/4) + gid.y*(pcs.nb1/4) + gid.x + pcs.outOff] = sum;
|
||||
}
|
@ -1,88 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2023 Nomic, Inc. All rights reserved.
|
||||
*
|
||||
* This software is licensed under the terms of the Software for Open Models
|
||||
* License (SOM), version 1.0, as detailed in the LICENSE_SOM.txt file. A copy
|
||||
* of this license should accompany this software. Except as expressly granted
|
||||
* in the SOM license, all rights are reserved by Nomic, Inc.
|
||||
*/
|
||||
|
||||
#version 450
|
||||
|
||||
#include "common.comp"
|
||||
|
||||
#extension GL_KHR_shader_subgroup_arithmetic : require
|
||||
#extension GL_EXT_debug_printf : enable
|
||||
|
||||
layout(local_size_x = 256) in;
|
||||
|
||||
layout(binding = 0) readonly buffer tensorInA { uint8_t inA[]; };
|
||||
layout(binding = 1) readonly buffer tensorInB { float inB[]; };
|
||||
layout(binding = 2) writeonly buffer tensorOut { float out_[]; };
|
||||
|
||||
layout(push_constant) uniform parameter {
|
||||
uint inAOff;
|
||||
uint inBOff;
|
||||
uint outOff;
|
||||
int ne00;
|
||||
int ne01;
|
||||
int ne02;
|
||||
int ne11;
|
||||
int ne12;
|
||||
uint nb01;
|
||||
uint nb02;
|
||||
uint nb11;
|
||||
uint nb12;
|
||||
uint nb1;
|
||||
uint nb2;
|
||||
}
|
||||
pcs;
|
||||
|
||||
|
||||
#define ELS_PER_BLOCK 256 //QK_K
|
||||
#define QH_OFFSET (ELS_PER_BLOCK / 2)
|
||||
#define QSCALES_OFFSET (QH_OFFSET + (ELS_PER_BLOCK / 4))
|
||||
#define SCALE_SCALE_OFFSET (QSCALES_OFFSET + (ELS_PER_BLOCK / 16))
|
||||
#define BLOCK_SIZE (SCALE_SCALE_OFFSET + 2)
|
||||
|
||||
void main() {
|
||||
uvec3 gid = gl_GlobalInvocationID;
|
||||
|
||||
uint bc_ab = pcs.ne12 > pcs.ne02 ? gid.z / (pcs.ne12 / pcs.ne02) : gid.z;
|
||||
uint bc_ba = pcs.ne02 > pcs.ne12 ? gid.z / (pcs.ne02 / pcs.ne12) : gid.z;
|
||||
|
||||
const uint x = (gid.x*pcs.nb01 + bc_ab*pcs.nb02) + pcs.inAOff; // Based from inA
|
||||
const uint y = (gid.y*pcs.nb11 + bc_ba*pcs.nb12) / 4 + pcs.inBOff; // based from inB
|
||||
|
||||
float sum = 0.0f;
|
||||
const uint n_blocks = pcs.ne00 / ELS_PER_BLOCK;
|
||||
// this is pretty much all lifted right from dequantize_row_q6_K
|
||||
uint outoff = 0;
|
||||
for (uint i = 0; i < n_blocks; i++) {
|
||||
const uint block_number = i;
|
||||
const uint block_offset = block_number * BLOCK_SIZE;
|
||||
const float scales_d = u8BufToFloat16(inA, x + block_offset + SCALE_SCALE_OFFSET);
|
||||
uint qloff = block_offset;
|
||||
uint qhoff = block_offset + QH_OFFSET;
|
||||
uint scoff = block_offset + QSCALES_OFFSET;
|
||||
for (int n = 0; n < 256; n += 128) {
|
||||
for (int l = 0; l < 32; ++l) {
|
||||
int is = l/16;
|
||||
const int q1 = int((inA[x + qloff + l + 0] & 0xF) | (((inA[x + qhoff + l] >> 0) & 3) << 4)) - 32;
|
||||
const int q2 = int((inA[x + qloff + l + 32] & 0xF) | (((inA[x + qhoff + l] >> 2) & 3) << 4)) - 32;
|
||||
const int q3 = int((inA[x + qloff + l + 0] >> 4) | (((inA[x + qhoff + l] >> 4) & 3) << 4)) - 32;
|
||||
const int q4 = int((inA[x + qloff + l + 32] >> 4) | (((inA[x + qhoff + l] >> 6) & 3) << 4)) - 32;
|
||||
sum += inB[y + outoff + l + 0] * scales_d * int8_t(inA[x + scoff + is + 0]) * q1;
|
||||
sum += inB[y + outoff + l + 32] * scales_d * int8_t(inA[x + scoff + is + 2]) * q2;
|
||||
sum += inB[y + outoff + l + 64] * scales_d * int8_t(inA[x + scoff + is + 4]) * q3;
|
||||
sum += inB[y + outoff + l + 96] * scales_d * int8_t(inA[x + scoff + is + 6]) * q4;
|
||||
}
|
||||
outoff += 128;
|
||||
qloff += 64;
|
||||
qhoff += 32;
|
||||
scoff += 8;
|
||||
}
|
||||
}
|
||||
|
||||
out_[gid.z*(pcs.nb2/4) + gid.y*(pcs.nb1/4) + gid.x + pcs.outOff] = sum;
|
||||
}
|
@ -1,66 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2023 Nomic, Inc. All rights reserved.
|
||||
*
|
||||
* This software is licensed under the terms of the Software for Open Models
|
||||
* License (SOM), version 1.0, as detailed in the LICENSE_SOM.txt file. A copy
|
||||
* of this license should accompany this software. Except as expressly granted
|
||||
* in the SOM license, all rights are reserved by Nomic, Inc.
|
||||
*/
|
||||
|
||||
#version 450
|
||||
|
||||
#include "common.comp"
|
||||
|
||||
#extension GL_KHR_shader_subgroup_arithmetic : require
|
||||
#extension GL_EXT_debug_printf : enable
|
||||
|
||||
// layout(local_size_x = 8) in;
|
||||
|
||||
layout(binding = 0) readonly buffer tensorInA { uint8_t inA[]; };
|
||||
layout(binding = 1) readonly buffer tensorInB { float inB[]; };
|
||||
layout(binding = 2) writeonly buffer tensorOut { float out_[]; };
|
||||
|
||||
layout(push_constant) uniform parameter {
|
||||
uint inAOff;
|
||||
uint inBOff;
|
||||
uint outOff;
|
||||
int ne00;
|
||||
int ne01;
|
||||
int ne02;
|
||||
int ne11;
|
||||
int ne12;
|
||||
uint nb01;
|
||||
uint nb02;
|
||||
uint nb11;
|
||||
uint nb12;
|
||||
uint nb1;
|
||||
uint nb2;
|
||||
}
|
||||
pcs;
|
||||
|
||||
#define ELS_PER_BLOCK 32
|
||||
#define QS_OFFSET 2 // d
|
||||
#define BLOCK_SIZE (ELS_PER_BLOCK + 2)
|
||||
|
||||
void main() {
|
||||
uvec3 gid = gl_GlobalInvocationID;
|
||||
|
||||
uint bc_ab = pcs.ne12 > pcs.ne02 ? gid.z / (pcs.ne12 / pcs.ne02) : gid.z;
|
||||
uint bc_ba = pcs.ne02 > pcs.ne12 ? gid.z / (pcs.ne02 / pcs.ne12) : gid.z;
|
||||
|
||||
|
||||
const uint x = (gid.x*pcs.nb01 + bc_ab*pcs.nb02) + pcs.inAOff; // Based from inA
|
||||
const uint y = (gid.y*pcs.nb11 + bc_ba*pcs.nb12) / 4 + pcs.inBOff; // based from inB
|
||||
float sum = 0.0f;
|
||||
for (uint i = 0; i < pcs.ne00; i++) {
|
||||
const uint block_number = i / ELS_PER_BLOCK;
|
||||
const uint block_offset = block_number * BLOCK_SIZE;
|
||||
const float d = u8BufToFloat16(inA, x + block_offset);
|
||||
const uint position_in_block = i % ELS_PER_BLOCK;
|
||||
const int q0 = int8_t(inA[x+block_offset+QS_OFFSET+position_in_block]);
|
||||
const float dq0 = d * q0;
|
||||
sum += (dq0 * float(inB[y+i]));
|
||||
}
|
||||
|
||||
out_[gid.z*(pcs.nb2/4) + gid.y*(pcs.nb1/4) + gid.x + pcs.outOff] = sum;
|
||||
}
|
Loading…
Reference in New Issue
Block a user