mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-14 14:59:52 +00:00
59 lines
1.6 KiB
Plaintext
59 lines
1.6 KiB
Plaintext
|
#version 450
|
||
|
|
||
|
#include "common.comp"
|
||
|
|
||
|
layout(local_size_x = 1024) in;
|
||
|
|
||
|
layout(binding = 0) buffer restrict readonly tensorInA { float inA[]; };
|
||
|
layout(binding = 1) buffer restrict readonly tensorInB { float inB[]; };
|
||
|
layout(binding = 2) buffer restrict writeonly tensorOut { float out_[]; };
|
||
|
|
||
|
layout(push_constant) uniform PushConstants {
|
||
|
uint inAOff;
|
||
|
uint inBOff;
|
||
|
uint outOff;
|
||
|
int ne00;
|
||
|
int nb00;
|
||
|
int nb01;
|
||
|
int nb02;
|
||
|
int nb03;
|
||
|
int ne10;
|
||
|
int ne11;
|
||
|
int ne12;
|
||
|
int ne13;
|
||
|
int nb10;
|
||
|
int nb11;
|
||
|
int nb12;
|
||
|
int nb13;
|
||
|
int ne0;
|
||
|
int nb0;
|
||
|
int nb1;
|
||
|
int nb2;
|
||
|
int nb3;
|
||
|
//int offs; // TODO: needed for GGML_OP_ACC, see metal code
|
||
|
} pcs;
|
||
|
|
||
|
// general-purpose kernel for addition of two tensors
|
||
|
// pros: works for non-contiguous tensors, supports broadcast across dims 1, 2 and 3
|
||
|
// cons: not very efficient
|
||
|
void main() {
|
||
|
const uint i03 = gl_WorkGroupID.z;
|
||
|
const uint i02 = gl_WorkGroupID.y;
|
||
|
const uint i01 = gl_WorkGroupID.x;
|
||
|
|
||
|
const uint i13 = i03 % pcs.ne13;
|
||
|
const uint i12 = i02 % pcs.ne12;
|
||
|
const uint i11 = i01 % pcs.ne11;
|
||
|
|
||
|
int offs = 0; // TMP (see above)
|
||
|
|
||
|
uint src0_off = uint((i03*pcs.nb03 + i02*pcs.nb02 + i01*pcs.nb01 + offs) / 4);
|
||
|
uint src1_off = uint((i13*pcs.nb13 + i12*pcs.nb12 + i11*pcs.nb11 ) / 4);
|
||
|
uint dst_off = uint((i03*pcs.nb3 + i02*pcs.nb2 + i01*pcs.nb1 + offs) / 4);
|
||
|
|
||
|
for (uint i0 = gl_LocalInvocationID.x; i0 < pcs.ne0; i0 += gl_WorkGroupSize.x) {
|
||
|
const uint i10 = i0 % pcs.ne10;
|
||
|
out_[pcs.outOff + dst_off + i0] = inA[pcs.inAOff + src0_off + i0] + inB[pcs.inBOff + src1_off + i10];
|
||
|
}
|
||
|
}
|