mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-30 21:34:36 +00:00
210 lines
5.7 KiB
Plaintext
210 lines
5.7 KiB
Plaintext
|
/**
|
||
|
* Copyright (c) 2023 Nomic, Inc. All rights reserved.
|
||
|
*
|
||
|
* This software is licensed under the terms of the Software for Open Models License (SOM),
|
||
|
* version 1.0, as detailed in the LICENSE_SOM.txt file. A copy of this license should accompany
|
||
|
* this software. Except as expressly granted in the SOM license, all rights are reserved by Nomic, Inc.
|
||
|
*/
|
||
|
|
||
|
#version 450
|
||
|
|
||
|
#extension GL_EXT_shader_16bit_storage: require
|
||
|
#extension GL_EXT_shader_8bit_storage: require
|
||
|
#extension GL_EXT_shader_explicit_arithmetic_types_float16: require
|
||
|
#extension GL_EXT_shader_explicit_arithmetic_types_int8: require
|
||
|
#extension GL_EXT_shader_explicit_arithmetic_types_int16: require
|
||
|
#extension GL_EXT_control_flow_attributes: enable
|
||
|
|
||
|
#define QK4_0 32
|
||
|
#define QR4_0 2
|
||
|
#define QK4_1 32
|
||
|
|
||
|
#define GELU_COEF_A 0.044715
|
||
|
#define SQRT_2_OVER_PI 0.79788456080286535587989211986876
|
||
|
|
||
|
#ifndef QK_K
|
||
|
#define QK_K 256
|
||
|
#endif
|
||
|
|
||
|
#if QK_K == 256
|
||
|
#define K_SCALE_SIZE 12
|
||
|
#else
|
||
|
#define K_SCALE_SIZE 4
|
||
|
#endif
|
||
|
|
||
|
#define BM 128
|
||
|
#define BN 128
|
||
|
#define BK 8
|
||
|
#define TM 8
|
||
|
#define TN 8
|
||
|
|
||
|
#define u8BufToU16(buf, idx) (((uint16_t(buf[idx + 1]) << 8)) | buf[idx])
|
||
|
#define u8BufToFloat16(buf, idx) uint16BitsToHalf u8BufToU16(buf, idx)
|
||
|
#define u8BufToU32(buf, idx) (((uint32_t u8BufToU16(buf, idx + 2) << 8 | buf[idx + 1]) << 8) | buf[idx])
|
||
|
#define u8BufToFloat(buf, idx) uintBitsToFloat u8BufToU32(buf, idx)
|
||
|
|
||
|
#define sizeof_block_q4_0 0x12
|
||
|
#define sizeof_block_q4_1 0x14
|
||
|
struct block_q4_0 {
|
||
|
float16_t d;
|
||
|
uint8_t qs[QK4_0 / 2];
|
||
|
};
|
||
|
struct block_q4_1 {
|
||
|
float16_t d;
|
||
|
float16_t m;
|
||
|
uint8_t qs[QK4_1 / 2];
|
||
|
};
|
||
|
|
||
|
#ifndef QK_K
|
||
|
#define QK_K 256
|
||
|
#endif
|
||
|
|
||
|
#if QK_K == 256
|
||
|
#define K_SCALE_SIZE 12
|
||
|
#else
|
||
|
#define K_SCALE_SIZE 4
|
||
|
#endif
|
||
|
|
||
|
struct block_q2_K {
|
||
|
uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
|
||
|
uint8_t qs[QK_K/4]; // quants
|
||
|
float16_t d; // super-block scale for quantized scales
|
||
|
float16_t dmin; // super-block scale for quantized mins
|
||
|
};
|
||
|
// 84 bytes / block
|
||
|
|
||
|
struct block_q3_K {
|
||
|
uint8_t hmask[QK_K/8]; // quants - high bit
|
||
|
uint8_t qs[QK_K/4]; // quants - low 2 bits
|
||
|
#if QK_K == 64
|
||
|
uint8_t scales[2];
|
||
|
#else
|
||
|
uint8_t scales[K_SCALE_SIZE]; // scales, quantized with 6 bits
|
||
|
#endif
|
||
|
float16_t d; // super-block scale
|
||
|
};
|
||
|
|
||
|
#if QK_K == 64
|
||
|
typedef struct {
|
||
|
float16_t d[2]; // super-block scales/mins
|
||
|
uint8_t scales[2];
|
||
|
uint8_t qs[QK_K/2]; // 4-bit quants
|
||
|
} block_q4_K;
|
||
|
#else
|
||
|
struct block_q4_K {
|
||
|
float16_t d; // super-block scale for quantized scales
|
||
|
float16_t dmin; // super-block scale for quantized mins
|
||
|
uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
|
||
|
uint8_t qs[QK_K/2]; // 4--bit quants
|
||
|
};
|
||
|
#endif
|
||
|
|
||
|
#if QK_K == 64
|
||
|
struct block_q5_K {
|
||
|
float16_t d; // super-block scales/mins
|
||
|
int8_t scales[QK_K/16]; // 8-bit block scales
|
||
|
uint8_t qh[QK_K/8]; // quants, high bit
|
||
|
uint8_t qs[QK_K/2]; // quants, low 4 bits
|
||
|
};
|
||
|
#else
|
||
|
struct block_q5_K {
|
||
|
float16_t d; // super-block scale for quantized scales
|
||
|
float16_t dmin; // super-block scale for quantized mins
|
||
|
uint8_t scales[3*QK_K/64]; // scales and mins, quantized with 6 bits
|
||
|
uint8_t qh[QK_K/8]; // quants, high bit
|
||
|
uint8_t qs[QK_K/2]; // quants, low 4 bits
|
||
|
};
|
||
|
// 176 bytes / block
|
||
|
#endif
|
||
|
|
||
|
struct block_q6_K {
|
||
|
uint8_t ql[QK_K/2]; // quants, lower 4 bits
|
||
|
uint8_t qh[QK_K/4]; // quants, upper 2 bits
|
||
|
int8_t scales[QK_K/16]; // scales, quantized with 8 bits
|
||
|
float16_t d; // super-block scale
|
||
|
};
|
||
|
// 210 bytes / block
|
||
|
|
||
|
#define nth 256
|
||
|
|
||
|
layout(local_size_x = nth) in;
|
||
|
|
||
|
layout(binding = 0) buffer restrict readonly tensorIn { float in_[]; };
|
||
|
layout(binding = 1) buffer restrict tensorOut { float out_[]; };
|
||
|
|
||
|
layout(push_constant) uniform PushConstants {
|
||
|
uint inOff;
|
||
|
uint outOff;
|
||
|
uint ne00;
|
||
|
uint nb01;
|
||
|
float eps;
|
||
|
} pcs;
|
||
|
|
||
|
shared float sum[nth];
|
||
|
|
||
|
void main() {
|
||
|
const uint x = (gl_WorkGroupID.x*pcs.nb01/4) + pcs.inOff; // Based from in_
|
||
|
// MEAN
|
||
|
// parallel sum
|
||
|
sum[gl_LocalInvocationID.x] = 0.0;
|
||
|
for (uint i00 = gl_LocalInvocationID.x; i00 < pcs.ne00; i00 += nth) {
|
||
|
sum[gl_LocalInvocationID.x] += in_[x+i00];
|
||
|
}
|
||
|
|
||
|
// reduce
|
||
|
barrier();
|
||
|
memoryBarrierShared();
|
||
|
[[unroll]] for (uint i = nth/2; i > 0; i /= 2) {
|
||
|
if (gl_LocalInvocationID.x < i) {
|
||
|
sum[gl_LocalInvocationID.x] += sum[gl_LocalInvocationID.x + i];
|
||
|
}
|
||
|
barrier();
|
||
|
memoryBarrierShared();
|
||
|
}
|
||
|
|
||
|
// broadcast
|
||
|
if (gl_LocalInvocationID.x == 0) {
|
||
|
sum[0] /= float(pcs.ne00);
|
||
|
}
|
||
|
barrier();
|
||
|
memoryBarrierShared();
|
||
|
const float mean = sum[0];
|
||
|
|
||
|
// recenter
|
||
|
const uint y = (gl_WorkGroupID.x*pcs.ne00/4) + pcs.outOff; // Based from out_
|
||
|
for (uint i00 = gl_LocalInvocationID.x; i00 < pcs.ne00; i00 += nth) {
|
||
|
out_[y+i00] = in_[x+i00] - mean;
|
||
|
}
|
||
|
|
||
|
// VARIANCE
|
||
|
// parallel sum
|
||
|
sum[gl_LocalInvocationID.x] = 0.0;
|
||
|
for (uint i00 = gl_LocalInvocationID.x; i00 < pcs.ne00; i00 += nth) {
|
||
|
sum[gl_LocalInvocationID.x] += out_[y+i00] * out_[y+i00];
|
||
|
}
|
||
|
|
||
|
// reduce
|
||
|
barrier();
|
||
|
memoryBarrierShared();
|
||
|
[[unroll]] for (uint i = nth/2; i > 0; i /= 2) {
|
||
|
if (gl_LocalInvocationID.x < i) {
|
||
|
sum[gl_LocalInvocationID.x] += sum[gl_LocalInvocationID.x + i];
|
||
|
}
|
||
|
barrier();
|
||
|
memoryBarrierShared();
|
||
|
}
|
||
|
|
||
|
// broadcast
|
||
|
if (gl_LocalInvocationID.x == 0) {
|
||
|
sum[0] /= float(pcs.ne00);
|
||
|
}
|
||
|
barrier();
|
||
|
memoryBarrierShared();
|
||
|
const float variance = sum[0];
|
||
|
|
||
|
const float scale = 1.0f/sqrt(variance + pcs.eps);
|
||
|
for (uint i00 = gl_LocalInvocationID.x; i00 < pcs.ne00; i00 += nth) {
|
||
|
out_[y+i00] *= scale;
|
||
|
}
|
||
|
}
|