mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-11 21:39:52 +00:00
7c7836d9d4
* Refactor shaders, extract GLSL code from ggml_vk_generate_shaders.py into vulkan-shaders directory * Improve debug log code * Add memory debug output option * Fix flake8 * Fix unnecessary high llama-3 VRAM use
27 lines
702 B
Plaintext
27 lines
702 B
Plaintext
#version 450
|
|
|
|
#include "types.comp"
|
|
#include "generic_binary_head.comp"
|
|
|
|
void main() {
|
|
const uint i00 = gl_GlobalInvocationID.x;
|
|
const uint i10 = gl_GlobalInvocationID.y;
|
|
const uint i11 = (gl_GlobalInvocationID.z)/p.ne12;
|
|
const uint i12 = (gl_GlobalInvocationID.z)%p.ne12;
|
|
|
|
if (i00 >= p.ne00) {
|
|
return;
|
|
}
|
|
|
|
const uint i01 = data_b[i10*p.nb10 + i11*p.nb11 + i12*p.nb12];
|
|
|
|
const uint a_offset = i01*p.nb01 + i11*p.nb02 + i12*p.nb03;
|
|
const uint d_offset = i10*p.nb21 + i11*p.nb22 + i12*p.nb23;
|
|
|
|
#ifndef OPTIMIZATION_ERROR_WORKAROUND
|
|
data_d[d_offset + i00] = D_TYPE(data_a[a_offset + i00]);
|
|
#else
|
|
data_d[d_offset + i00] = data_a[a_offset + i00];
|
|
#endif
|
|
}
|