mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 11:24:35 +00:00
7c7836d9d4
* Refactor shaders, extract GLSL code from ggml_vk_generate_shaders.py into vulkan-shaders directory * Improve debug log code * Add memory debug output option * Fix flake8 * Fix unnecessary high llama-3 VRAM use
14 lines
249 B
Plaintext
14 lines
249 B
Plaintext
#extension GL_EXT_control_flow_attributes : require
|
|
#extension GL_EXT_shader_16bit_storage : require
|
|
|
|
layout (push_constant) uniform parameter
|
|
{
|
|
uint M;
|
|
uint K;
|
|
uint stride_a;
|
|
uint stride_b;
|
|
uint nel;
|
|
} p;
|
|
|
|
#include "types.comp"
|