mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-15 15:29:53 +00:00
f3f65429c4
* scripts : update sync [no ci] * files : relocate [no ci] * ci : disable kompute build [no ci] * cmake : fixes [no ci] * server : fix mingw build ggml-ci * cmake : minor [no ci] * cmake : link math library [no ci] * cmake : build normal ggml library (not object library) [no ci] * cmake : fix kompute build ggml-ci * make,cmake : fix LLAMA_CUDA + replace GGML_CDEF_PRIVATE ggml-ci * move public backend headers to the public include directory (#8122) * move public backend headers to the public include directory * nix test * spm : fix metal header --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * scripts : fix sync paths [no ci] * scripts : sync ggml-blas.h [no ci] --------- Co-authored-by: slaren <slarengh@gmail.com>
32 lines
787 B
Plaintext
32 lines
787 B
Plaintext
#version 450
|
|
|
|
#include "common.comp"
|
|
|
|
layout(local_size_x = 1) in;
|
|
|
|
layout (binding = 0) readonly buffer tensorInA { float16_t inA[]; };
|
|
layout (binding = 1) readonly buffer tensorInB { int inB[]; };
|
|
layout (binding = 2) writeonly buffer tensorOut { float out_[]; };
|
|
|
|
layout (push_constant) uniform parameter {
|
|
uint inAOff;
|
|
uint inBOff;
|
|
uint outOff;
|
|
int ne00;
|
|
int nb01;
|
|
int nb1;
|
|
} pcs;
|
|
|
|
void dequantize_row_f16(uint x /*Based from inA unaligned*/, uint y /*Based from out_*/, int k) {
|
|
for (int j = 0; j < k; j++) {
|
|
out_[y + j] = inA[x + j];
|
|
}
|
|
}
|
|
|
|
void main() {
|
|
const uint i = gl_WorkGroupID.x;
|
|
const int r = inB[i + pcs.inBOff];
|
|
|
|
dequantize_row_f16(r*pcs.nb01/2/*bytes for float16*/ + pcs.inAOff, i*pcs.nb1/4 + pcs.outOff, pcs.ne00);
|
|
}
|