llama.cpp/kompute/op_mul_mat_q4_1.comp

219 lines
6.3 KiB
Plaintext
Raw Normal View History

/**
* Copyright (c) 2023 Nomic, Inc. All rights reserved.
*
* This software is licensed under the terms of the Software for Open Models License (SOM),
* version 1.0, as detailed in the LICENSE_SOM.txt file. A copy of this license should accompany
* this software. Except as expressly granted in the SOM license, all rights are reserved by Nomic, Inc.
*/
#version 450
#extension GL_EXT_shader_16bit_storage: require
#extension GL_EXT_shader_8bit_storage: require
#extension GL_EXT_shader_explicit_arithmetic_types_float16: require
#extension GL_EXT_shader_explicit_arithmetic_types_int8: require
#extension GL_EXT_shader_explicit_arithmetic_types_int16: require
#extension GL_EXT_control_flow_attributes: enable
#define QK4_0 32
#define QR4_0 2
#define QK4_1 32
#define GELU_COEF_A 0.044715
#define SQRT_2_OVER_PI 0.79788456080286535587989211986876
#ifndef QK_K
#define QK_K 256
#endif
#if QK_K == 256
#define K_SCALE_SIZE 12
#else
#define K_SCALE_SIZE 4
#endif
#define BM 128
#define BN 128
#define BK 8
#define TM 8
#define TN 8
#define u8BufToU16(buf, idx) (((uint16_t(buf[idx + 1]) << 8)) | buf[idx])
#define u8BufToFloat16(buf, idx) uint16BitsToHalf u8BufToU16(buf, idx)
#define u8BufToU32(buf, idx) (((uint32_t u8BufToU16(buf, idx + 2) << 8 | buf[idx + 1]) << 8) | buf[idx])
#define u8BufToFloat(buf, idx) uintBitsToFloat u8BufToU32(buf, idx)
#define sizeof_block_q4_0 0x12
#define sizeof_block_q4_1 0x14
struct block_q4_0 {
float16_t d;
uint8_t qs[QK4_0 / 2];
};
struct block_q4_1 {
float16_t d;
float16_t m;
uint8_t qs[QK4_1 / 2];
};
#ifndef QK_K
#define QK_K 256
#endif
#if QK_K == 256
#define K_SCALE_SIZE 12
#else
#define K_SCALE_SIZE 4
#endif
struct block_q2_K {
uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
uint8_t qs[QK_K/4]; // quants
float16_t d; // super-block scale for quantized scales
float16_t dmin; // super-block scale for quantized mins
};
// 84 bytes / block
struct block_q3_K {
uint8_t hmask[QK_K/8]; // quants - high bit
uint8_t qs[QK_K/4]; // quants - low 2 bits
#if QK_K == 64
uint8_t scales[2];
#else
uint8_t scales[K_SCALE_SIZE]; // scales, quantized with 6 bits
#endif
float16_t d; // super-block scale
};
#if QK_K == 64
typedef struct {
float16_t d[2]; // super-block scales/mins
uint8_t scales[2];
uint8_t qs[QK_K/2]; // 4-bit quants
} block_q4_K;
#else
struct block_q4_K {
float16_t d; // super-block scale for quantized scales
float16_t dmin; // super-block scale for quantized mins
uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
uint8_t qs[QK_K/2]; // 4--bit quants
};
#endif
#if QK_K == 64
struct block_q5_K {
float16_t d; // super-block scales/mins
int8_t scales[QK_K/16]; // 8-bit block scales
uint8_t qh[QK_K/8]; // quants, high bit
uint8_t qs[QK_K/2]; // quants, low 4 bits
};
#else
struct block_q5_K {
float16_t d; // super-block scale for quantized scales
float16_t dmin; // super-block scale for quantized mins
uint8_t scales[3*QK_K/64]; // scales and mins, quantized with 6 bits
uint8_t qh[QK_K/8]; // quants, high bit
uint8_t qs[QK_K/2]; // quants, low 4 bits
};
// 176 bytes / block
#endif
struct block_q6_K {
uint8_t ql[QK_K/2]; // quants, lower 4 bits
uint8_t qh[QK_K/4]; // quants, upper 2 bits
int8_t scales[QK_K/16]; // scales, quantized with 8 bits
float16_t d; // super-block scale
};
// 210 bytes / block
layout(local_size_x = 8, local_size_y = 8) in;
layout (binding = 0) readonly buffer tensorInA { uint8_t inA[]; };
layout (binding = 1) readonly buffer tensorInB { float inB[]; };
layout (binding = 2) writeonly buffer tensorOut { float out_[]; };
layout (push_constant) uniform parameter {
uint inAOff;
uint inBOff;
uint outOff;
int ne00;
int ne10;
int ne0;
} pcs;
shared float sum[gl_WorkGroupSize.x*gl_WorkGroupSize.y];
#define UNALIGNED_INPUT inA
block_q4_1 get_unaligned_block_q4_1(uint index) {
block_q4_1 fres;
fres.d = u8BufToFloat16(UNALIGNED_INPUT, index);
fres.m = u8BufToFloat16(UNALIGNED_INPUT, index+2);
[[unroll]] for (uint it = 0; it != QK4_1 / 2; it++) {
fres.qs[it] = UNALIGNED_INPUT[index+4+it];
}
return fres;
}
void main() {
const uint nb = uint(pcs.ne00/QK4_1);
const uint r0 = gl_WorkGroupID.x;
const uint r1 = gl_WorkGroupID.y;
const uint x = r0*nb; // Based from inA without base offset
const uint y = r1*uint(pcs.ne10) + pcs.inBOff; // Based from inB
const uint nth = gl_WorkGroupSize.x*gl_WorkGroupSize.y;
const uint ith = gl_WorkGroupSize.y*gl_LocalInvocationID.x + gl_LocalInvocationID.y;
const uint ix = gl_LocalInvocationID.y/4; // 0 or 1
const uint iy = gl_LocalInvocationID.y - 4*ix; // 0...3
const uint first = 4 * iy;
float sumf = 0.0;
for (uint i = 2*gl_LocalInvocationID.x + ix; i < nb; i += 2*gl_WorkGroupSize.x) {
//TODO: Removing the use of pointers has been quite hairy here. If something goes wrong here, this is most likely it:
const block_q4_1 block = get_unaligned_block_q4_1((x+i)*sizeof_block_q4_1+pcs.inAOff);
const float d = float(block.d);
const float m = float(block.m);
const uint xl = first; // Based from bl->qs
const uint yl = y + i * QK4_1 + first; // Based from inB
vec2 acc = vec2(0.0, 0.0);
for (int j = 0; j < 4; ++j) {
acc.x += inB[yl+j] * (d * (block.qs[xl+j] & 0xF) + m);
acc.y += inB[yl+j+16] * (d * (block.qs[xl+j] >> 4) + m);
}
sumf += d * (acc.x - acc.y);
}
sum[ith] = sumf;
//
// Accumulate the sum from all threads in the threadgroup
//
barrier();
memoryBarrierShared();
if (ith%4 == 0) {
sum[ith] += sum[ith+1] + sum[ith+2] + sum[ith+3];
}
barrier();
memoryBarrierShared();
if (ith%16 == 0) {
sum[ith] += sum[ith+4] + sum[ith+8] + sum[ith+12];
}
barrier();
memoryBarrierShared();
if (ith == 0) {
for (uint i = 16; i < nth; i += 16) sum[0] += sum[i];
out_[r1*uint(pcs.ne0) + r0 + pcs.outOff] = sum[0];
}
}