mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-30 21:34:36 +00:00
54 lines
1.5 KiB
Plaintext
54 lines
1.5 KiB
Plaintext
#version 450
|
|
|
|
#include "common.comp"
|
|
|
|
#define BLOCKS_IN_QUANT QK4_1
|
|
#define SIZE_OF_BLOCK sizeof_block_q4_1
|
|
#define N_ROWS 4
|
|
|
|
layout(local_size_x_id = 0) in;
|
|
layout(local_size_y = 1) in;
|
|
layout(local_size_z = 1) in;
|
|
|
|
layout (binding = 0) readonly buffer tensorInA { uint8_t inA[]; };
|
|
layout (binding = 1) readonly buffer tensorInB { float inB[]; };
|
|
layout (binding = 2) writeonly buffer tensorOut { float out_[]; };
|
|
|
|
layout (push_constant) uniform parameter {
|
|
uint inAOff;
|
|
uint inBOff;
|
|
uint outOff;
|
|
int ne00;
|
|
int ne10;
|
|
int ne0;
|
|
int ne1;
|
|
int ne01;
|
|
int gqa;
|
|
} pcs;
|
|
|
|
// The q4_1 version of this function
|
|
float block_q_n_dot_y(uint block_index, uint yb, uint il) {
|
|
vec2 acc = vec2(0.0, 0.0);
|
|
const uint index = (block_index) * SIZE_OF_BLOCK + pcs.inAOff;
|
|
float d = float(u8BufToFloat16(inA, index));
|
|
float m = float(u8BufToFloat16(inA, index+2));
|
|
|
|
float sumy = 0.0f;
|
|
for (int i = 0; i < BLOCKS_IN_QUANT/4; i+=2) {
|
|
const uint16_t b = u8BufToU16(inA, index + 4 + il + i);
|
|
|
|
const float yl0 = inB[yb + i];
|
|
const float yl1 = inB[yb + i + 1];
|
|
const float yl8 = inB[yb + i + BLOCKS_IN_QUANT/2];
|
|
const float yl9 = inB[yb + i + BLOCKS_IN_QUANT/2 + 1];
|
|
|
|
sumy += yl0 + yl1 + yl8 + yl9;
|
|
|
|
acc[0] += yl0 * (b & 0x000F) + yl1 / 256.f * (b & 0x0F00);
|
|
acc[1] += yl8 / 16.f * (b & 0x00F0) + yl9 / 4096.f * (b & 0xF000);
|
|
}
|
|
return d * (acc[0] + acc[1]) + sumy * m;
|
|
}
|
|
|
|
#include "op_mul_mv_q_n.comp"
|