mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 11:24:35 +00:00
fbf1ddec69
Signed-off-by: Jared Van Bortel <jared@nomic.ai> Co-authored-by: niansa <anton-sa@web.de> Co-authored-by: Adam Treat <treat.adam@gmail.com> Co-authored-by: Aaron Miller <apage43@ninjawhale.com> Co-authored-by: ToKiNoBug <tokinobug@163.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> Co-authored-by: slaren <slarengh@gmail.com>
20 lines
432 B
Plaintext
20 lines
432 B
Plaintext
#version 450
|
|
|
|
#include "common.comp"
|
|
|
|
layout(local_size_x = 1) in;
|
|
|
|
layout(binding = 0) buffer restrict readonly tensorIn { float in_[]; };
|
|
layout(binding = 1) buffer restrict writeonly tensorOut { float out_[]; };
|
|
|
|
layout(push_constant) uniform PushConstants {
|
|
uint inOff;
|
|
uint outOff;
|
|
float scale;
|
|
} pcs;
|
|
|
|
void main() {
|
|
const uint i = gl_WorkGroupID.x;
|
|
out_[i + pcs.outOff] = in_[i + pcs.inOff] * pcs.scale;
|
|
}
|