Compare commits

...

4 Commits

Author SHA1 Message Date
Zhenwei Jin
71345218ad
Merge 9373e2ba58 into dea5e86051 2024-10-31 20:02:23 +08:00
Diego Devesa
dea5e86051
ggml : check tensor name lengths in gguf files (#10100) 2024-10-31 11:40:59 +01:00
Sergio López
1329c0a75e
kompute: add mul_mat_q4_k shader (#10097)
This is a more or less direct translation from the Metal implementation
to GLSL.

Signed-off-by: Sergio Lopez <slp@redhat.com>
2024-10-31 11:09:52 +02:00
zhenweijin
9373e2ba58 cann: merge get row operations of float type 2024-08-21 10:25:22 +08:00
9 changed files with 278 additions and 212 deletions

View File

@ -800,6 +800,7 @@ if (GGML_KOMPUTE)
kompute-shaders/op_mul_mat_q8_0.comp kompute-shaders/op_mul_mat_q8_0.comp
kompute-shaders/op_mul_mat_q4_0.comp kompute-shaders/op_mul_mat_q4_0.comp
kompute-shaders/op_mul_mat_q4_1.comp kompute-shaders/op_mul_mat_q4_1.comp
kompute-shaders/op_mul_mat_q4_k.comp
kompute-shaders/op_mul_mat_q6_k.comp kompute-shaders/op_mul_mat_q6_k.comp
kompute-shaders/op_getrows_f32.comp kompute-shaders/op_getrows_f32.comp
kompute-shaders/op_getrows_f16.comp kompute-shaders/op_getrows_f16.comp
@ -833,6 +834,7 @@ if (GGML_KOMPUTE)
shaderop_mul_mat_q8_0.h shaderop_mul_mat_q8_0.h
shaderop_mul_mat_q4_0.h shaderop_mul_mat_q4_0.h
shaderop_mul_mat_q4_1.h shaderop_mul_mat_q4_1.h
shaderop_mul_mat_q4_k.h
shaderop_mul_mat_q6_k.h shaderop_mul_mat_q6_k.h
shaderop_getrows_f32.h shaderop_getrows_f32.h
shaderop_getrows_f16.h shaderop_getrows_f16.h

View File

@ -3,8 +3,7 @@ if (NOT SOC_TYPE)
endif() endif()
file(GLOB SRC_FILES file(GLOB SRC_FILES
get_row_f32.cpp get_row_float.cpp
get_row_f16.cpp
get_row_q4_0.cpp get_row_q4_0.cpp
get_row_q8_0.cpp get_row_q8_0.cpp
quantize_f32_q8_0.cpp quantize_f32_q8_0.cpp

View File

@ -1,180 +0,0 @@
#include "kernel_operator.h"
// optimize me. Use template to avoid copy code.
using namespace AscendC;
#define BUFFER_NUM 2
class GET_ROW_F32 {
public:
__aicore__ inline GET_ROW_F32() {}
__aicore__ inline void init(GM_ADDR input, GM_ADDR indices, GM_ADDR output,
int64_t *input_ne_ub, size_t *input_nb_ub,
int64_t *indices_ne_ub, size_t *indices_nb_ub,
int64_t *output_ne_ub, size_t *output_nb_ub) {
int64_t op_block_num = GetBlockNum();
int64_t op_block_idx = GetBlockIdx();
for (int i = 0; i < 4; i++) {
input_ne[i] = input_ne_ub[i];
input_stride[i] = input_nb_ub[i] / input_nb_ub[0];
indices_ne[i] = indices_ne_ub[i];
indices_stride[i] = indices_nb_ub[i] / indices_nb_ub[0];
output_ne[i] = output_ne_ub[i];
output_stride[i] = output_nb_ub[i] / output_nb_ub[0];
}
// Indices has two dims. n_elements = all rows should get.
// dr, all rows should this thread get.
uint64_t n_elements =
indices_ne[0] * indices_ne[1] * indices_ne[2] * indices_ne[3];
dr = n_elements / op_block_num;
uint64_t tails = n_elements % op_block_num;
if (op_block_idx < tails) {
dr += 1;
ir = dr * op_block_idx;
} else {
ir = dr * op_block_idx + tails;
}
input_gm.SetGlobalBuffer((__gm__ float *)input);
indices_gm.SetGlobalBuffer((__gm__ int32_t *)indices);
output_gm.SetGlobalBuffer((__gm__ float *)output);
uint64_t local_buffer_size = ((input_ne[0] * sizeof(float) + 31) & ~31);
local_buffer_elems = local_buffer_size / sizeof(float);
// TODO, consider long row that can't put in UB.
// All data should asign to 32. It's ok because all data is align to 32.
pipe.InitBuffer(input_queue, BUFFER_NUM, local_buffer_size);
pipe.InitBuffer(output_queue, BUFFER_NUM, local_buffer_size);
}
__aicore__ inline void copy_in(uint32_t offset, size_t len) {
LocalTensor<float> input_local = input_queue.AllocTensor<float>();
size_t tail = len % 32;
len = len & ~31;
DataCopy(input_local, input_gm[offset], len);
if(tail != 0) {
DataCopyExtParams dataCopyParams;
dataCopyParams.blockCount = 1;
dataCopyParams.blockLen = tail * sizeof(float);
DataCopyPadExtParams<float> padParams;
DataCopyPad(input_local[len], input_gm[offset + len],
dataCopyParams, padParams);
}
input_queue.EnQue(input_local);
}
__aicore__ inline void copy_out(uint32_t offset, size_t len) {
LocalTensor<float> output_local = output_queue.DeQue<float>();
size_t tail = len % 32;
len = len & ~31;
DataCopy(output_gm[offset], output_local, len);
if(tail != 0) {
DataCopyExtParams dataCopyParams;
dataCopyParams.blockCount = 1;
dataCopyParams.blockLen = tail * sizeof(float);
DataCopyPad(output_gm[offset + len], output_local[len],
dataCopyParams);
}
output_queue.FreeTensor(output_local);
}
__aicore__ inline void calculate_row(int64_t idx) {
const int64_t indices_ne2_idx = idx / (indices_ne[0] * indices_ne[1]);
const int64_t indices_ne1_idx =
(idx - indices_ne2_idx * indices_ne[0] * indices_ne[1]) /
indices_ne[0];
const int64_t indices_ne0_idx =
(idx - indices_ne2_idx * indices_ne[0] * indices_ne[1] -
indices_ne1_idx * indices_ne[0]);
const int64_t indices_offset = indices_ne0_idx * indices_stride[0] +
indices_ne1_idx * indices_stride[1] +
indices_ne2_idx * indices_stride[2];
const int32_t selected_row_idx = indices_gm.GetValue(indices_offset);
const int64_t input_offset = selected_row_idx * input_stride[1] +
indices_ne1_idx * input_stride[2] +
indices_ne2_idx * input_stride[3];
const int64_t output_offset = indices_ne0_idx * output_stride[1] +
indices_ne1_idx * output_stride[2] +
indices_ne2_idx * output_stride[3];
copy_in(input_offset, input_ne[0]);
LocalTensor<float> input_local = input_queue.DeQue<float>();
LocalTensor<float> output_local = output_queue.AllocTensor<float>();
DataCopy(output_local, input_local, local_buffer_elems);
output_queue.EnQue(output_local);
copy_out(output_offset, input_ne[0]);
input_queue.FreeTensor(input_local);
}
__aicore__ inline void calculate() {
for (int64_t i = ir; i < ir + dr; i++) {
calculate_row(i);
}
}
private:
int64_t input_ne[4];
size_t input_stride[4];
int64_t indices_ne[4];
size_t indices_stride[4];
int64_t output_ne[4];
size_t output_stride[4];
size_t local_buffer_elems;
int64_t ir;
int64_t dr;
TPipe pipe;
GlobalTensor<float> input_gm;
GlobalTensor<int32_t> indices_gm;
GlobalTensor<float> output_gm;
TQue<QuePosition::VECIN, BUFFER_NUM> input_queue;
TQue<QuePosition::VECOUT, BUFFER_NUM> output_queue;
};
template <typename T>
__aicore__ inline void copy_to_ub(GM_ADDR gm, T *ub, size_t size) {
auto gm_ptr = (__gm__ uint8_t *)gm;
auto ub_ptr = (uint8_t *)(ub);
for (int32_t i = 0; i < size; ++i, ++ub_ptr, ++gm_ptr) {
*ub_ptr = *gm_ptr;
}
}
extern "C" __global__ __aicore__ void ascendc_get_row_f32(
GM_ADDR input_gm, GM_ADDR indices_gm, GM_ADDR output_gm,
GM_ADDR input_ne_gm, GM_ADDR input_nb_gm, GM_ADDR indices_ne_gm,
GM_ADDR indices_nb_gm, GM_ADDR output_ne_gm, GM_ADDR output_nb_gm) {
int64_t input_ne_ub[4];
size_t input_nb_ub[4];
int64_t indices_ne_ub[4];
size_t indices_nb_ub[4];
int64_t output_ne_ub[4];
size_t output_nb_ub[4];
copy_to_ub(input_ne_gm, input_ne_ub, 32);
copy_to_ub(input_nb_gm, input_nb_ub, 32);
copy_to_ub(indices_ne_gm, indices_ne_ub, 32);
copy_to_ub(indices_nb_gm, indices_nb_ub, 32);
copy_to_ub(output_ne_gm, output_ne_ub, 32);
copy_to_ub(output_nb_gm, output_nb_ub, 32);
GET_ROW_F32 op;
op.init(input_gm, indices_gm, output_gm, input_ne_ub, input_nb_ub,
indices_ne_ub, indices_nb_ub, output_ne_ub, output_nb_ub);
op.calculate();
}

View File

@ -5,14 +5,14 @@ using namespace AscendC;
#define BUFFER_NUM 2 #define BUFFER_NUM 2
class GET_ROW_F16 { template<typename ROW_TYPE>
public: class GET_ROW_FLOAT {
__aicore__ inline GET_ROW_F16() {} public:
__aicore__ inline GET_ROW_FLOAT() {}
__aicore__ inline void init(GM_ADDR input, GM_ADDR indices, GM_ADDR output, __aicore__ inline void init(GM_ADDR input, GM_ADDR indices, GM_ADDR output,
int64_t *input_ne_ub, size_t *input_nb_ub, int64_t *input_ne_ub, size_t *input_nb_ub,
int64_t *indices_ne_ub, size_t *indices_nb_ub, int64_t *indices_ne_ub, size_t *indices_nb_ub,
int64_t *output_ne_ub, size_t *output_nb_ub) { int64_t *output_ne_ub, size_t *output_nb_ub) {
// TODO, use template for F16/f32
int64_t op_block_num = GetBlockNum(); int64_t op_block_num = GetBlockNum();
int64_t op_block_idx = GetBlockIdx(); int64_t op_block_idx = GetBlockIdx();
@ -41,16 +41,18 @@ class GET_ROW_F16 {
ir = dr * op_block_idx + tails; ir = dr * op_block_idx + tails;
} }
input_gm.SetGlobalBuffer((__gm__ half *)input); input_gm.SetGlobalBuffer((__gm__ ROW_TYPE *)input);
indices_gm.SetGlobalBuffer((__gm__ int32_t *)indices); indices_gm.SetGlobalBuffer((__gm__ int32_t *)indices);
output_gm.SetGlobalBuffer((__gm__ float *)output); output_gm.SetGlobalBuffer((__gm__ float *)output);
uint64_t input_local_buffer_size = ((input_ne[0] * sizeof(half) + 31) uint64_t input_local_buffer_size = ((input_ne[0] * sizeof(ROW_TYPE) + 31) & ~31);
& ~31); uint64_t output_local_buffer_size = 0;
uint64_t output_local_buffer_size = ((input_ne[0] * sizeof(float) + 31) if constexpr (std::is_same<ROW_TYPE, half>::value) {
& ~31); output_local_buffer_size = ((input_ne[0] * sizeof(float) + 31) & ~31);
} else if constexpr (std::is_same<ROW_TYPE, float>::value) {
local_buffer_elems = input_local_buffer_size / sizeof(half); output_local_buffer_size = input_local_buffer_size;
}
local_buffer_elems = input_local_buffer_size / sizeof(ROW_TYPE);
// TODO, consider long row that can't put in UB. // TODO, consider long row that can't put in UB.
// All data should asign to 32. It's ok because all data is align to 32. // All data should asign to 32. It's ok because all data is align to 32.
@ -59,15 +61,15 @@ class GET_ROW_F16 {
} }
__aicore__ inline void copy_in(uint32_t offset, size_t len) { __aicore__ inline void copy_in(uint32_t offset, size_t len) {
LocalTensor<half> input_local = input_queue.AllocTensor<half>(); LocalTensor<ROW_TYPE> input_local = input_queue.AllocTensor<ROW_TYPE>();
size_t tail = len % 32; size_t tail = len % 32;
len = len & ~31; len = len & ~31;
DataCopy(input_local, input_gm[offset], len); DataCopy(input_local, input_gm[offset], len);
if(tail != 0) { if(tail != 0) {
DataCopyExtParams dataCopyParams; DataCopyExtParams dataCopyParams;
dataCopyParams.blockCount = 1; dataCopyParams.blockCount = 1;
dataCopyParams.blockLen = tail * sizeof(half); dataCopyParams.blockLen = tail * sizeof(ROW_TYPE);
DataCopyPadExtParams<half> padParams; DataCopyPadExtParams<ROW_TYPE> padParams;
DataCopyPad(input_local[len], input_gm[offset + len], DataCopyPad(input_local[len], input_gm[offset + len],
dataCopyParams, padParams); dataCopyParams, padParams);
} }
@ -112,11 +114,16 @@ class GET_ROW_F16 {
indices_ne2_idx * output_stride[3]; indices_ne2_idx * output_stride[3];
copy_in(input_offset, input_ne[0]); copy_in(input_offset, input_ne[0]);
LocalTensor<half> input_local = input_queue.DeQue<half>(); LocalTensor<ROW_TYPE> input_local = input_queue.DeQue<ROW_TYPE>();
LocalTensor<float> output_local = output_queue.AllocTensor<float>(); LocalTensor<float> output_local = output_queue.AllocTensor<float>();
Cast(output_local, input_local, RoundMode::CAST_NONE, if constexpr (std::is_same<ROW_TYPE, half>::value) {
local_buffer_elems); Cast(output_local, input_local, RoundMode::CAST_NONE,
local_buffer_elems);
} else if constexpr (std::is_same<ROW_TYPE, float>::value) {
DataCopy(output_local, input_local, local_buffer_elems);
}
output_queue.EnQue(output_local); output_queue.EnQue(output_local);
copy_out(output_offset, input_ne[0]); copy_out(output_offset, input_ne[0]);
@ -145,7 +152,7 @@ class GET_ROW_F16 {
int64_t dr; int64_t dr;
TPipe pipe; TPipe pipe;
GlobalTensor<half> input_gm; GlobalTensor<ROW_TYPE> input_gm;
GlobalTensor<int32_t> indices_gm; GlobalTensor<int32_t> indices_gm;
GlobalTensor<float> output_gm; GlobalTensor<float> output_gm;
TQue<QuePosition::VECIN, BUFFER_NUM> input_queue; TQue<QuePosition::VECIN, BUFFER_NUM> input_queue;
@ -179,7 +186,31 @@ extern "C" __global__ __aicore__ void ascendc_get_row_f16(
copy_to_ub(output_ne_gm, output_ne_ub, 32); copy_to_ub(output_ne_gm, output_ne_ub, 32);
copy_to_ub(output_nb_gm, output_nb_ub, 32); copy_to_ub(output_nb_gm, output_nb_ub, 32);
GET_ROW_F16 op; GET_ROW_FLOAT<half> op;
op.init(input_gm, indices_gm, output_gm, input_ne_ub, input_nb_ub,
indices_ne_ub, indices_nb_ub, output_ne_ub, output_nb_ub);
op.calculate();
}
extern "C" __global__ __aicore__ void ascendc_get_row_f32(
GM_ADDR input_gm, GM_ADDR indices_gm, GM_ADDR output_gm,
GM_ADDR input_ne_gm, GM_ADDR input_nb_gm, GM_ADDR indices_ne_gm,
GM_ADDR indices_nb_gm, GM_ADDR output_ne_gm, GM_ADDR output_nb_gm) {
int64_t input_ne_ub[4];
size_t input_nb_ub[4];
int64_t indices_ne_ub[4];
size_t indices_nb_ub[4];
int64_t output_ne_ub[4];
size_t output_nb_ub[4];
copy_to_ub(input_ne_gm, input_ne_ub, 32);
copy_to_ub(input_nb_gm, input_nb_ub, 32);
copy_to_ub(indices_ne_gm, indices_ne_ub, 32);
copy_to_ub(indices_nb_gm, indices_nb_ub, 32);
copy_to_ub(output_ne_gm, output_ne_ub, 32);
copy_to_ub(output_nb_gm, output_nb_ub, 32);
GET_ROW_FLOAT<float> op;
op.init(input_gm, indices_gm, output_gm, input_ne_ub, input_nb_ub, op.init(input_gm, indices_gm, output_gm, input_ne_ub, input_nb_ub,
indices_ne_ub, indices_nb_ub, output_ne_ub, output_nb_ub); indices_ne_ub, indices_nb_ub, output_ne_ub, output_nb_ub);
op.calculate(); op.calculate();

View File

@ -20,6 +20,7 @@
#include "shaderop_mul_mat_q8_0.h" #include "shaderop_mul_mat_q8_0.h"
#include "shaderop_mul_mat_q4_0.h" #include "shaderop_mul_mat_q4_0.h"
#include "shaderop_mul_mat_q4_1.h" #include "shaderop_mul_mat_q4_1.h"
#include "shaderop_mul_mat_q4_k.h"
#include "shaderop_mul_mat_q6_k.h" #include "shaderop_mul_mat_q6_k.h"
#include "shaderop_mul_mat_mat_f32.h" #include "shaderop_mul_mat_mat_f32.h"
#include "shaderop_getrows_f32.h" #include "shaderop_getrows_f32.h"
@ -1067,6 +1068,40 @@ static void ggml_vk_mul_mat_q8_0(Args&&... args) {
ggml_vk_mul_mat_impl(spirv, "q8_0", 1/*We access blocks unaligned*/, std::forward<Args>(args)...); ggml_vk_mul_mat_impl(spirv, "q8_0", 1/*We access blocks unaligned*/, std::forward<Args>(args)...);
} }
static void ggml_vk_mul_mat_q4_k(
kp::Sequence& seq,
const std::shared_ptr<kp::Tensor>& inA,
const std::shared_ptr<kp::Tensor>& inB,
const std::shared_ptr<kp::Tensor>& out,
uint32_t inAOff, uint32_t inBOff, uint32_t outOff,
int32_t ne00, int32_t ne01, int32_t ne02, int32_t ne10,
int32_t ne11, int32_t ne12, int32_t ne13, int32_t ne0,
int32_t ne1, int32_t r2, int32_t r3
) {
const static auto spirv = getSpirvShader(kp::shader_data::op_mul_mat_q4_k_comp_spv,
kp::shader_data::op_mul_mat_q4_k_comp_spv_len);
struct PushConstants {
uint32_t inAOff, inBOff, outOff;
int32_t ne00, ne10, ne0, ne1, ne01, ne02, ne12, r2, r3;
} pushConsts {
0, 0, 0,
ne00, ne10, ne0, ne1, ne01, ne02, ne12, r2, r3
};
std::shared_ptr<kp::Algorithm> s_algo = nullptr;
if (!komputeManager()->hasAlgorithm(__func__)) {
s_algo = komputeManager()->algorithm<uint32_t, PushConstants>(__func__, s_kompute_context->pool.get(), {inA, inB, out}, spirv, {unsigned((ne01 + 3)/4), unsigned(ne11), unsigned(ne12) * unsigned(ne13)}, {}, {pushConsts});
} else {
s_algo = komputeManager()->getAlgorithm(__func__);
s_algo->setTensors({inA, inB, out});
s_algo->setWorkgroup({unsigned((ne01 + 3)/4), unsigned(ne11), unsigned(ne12) * unsigned(ne13)});
s_algo->setPushConstants<PushConstants>({pushConsts});
s_algo->updateDescriptors(s_kompute_context->pool.get());
}
seq.record<kp::OpAlgoDispatch>(s_algo);
}
static void ggml_vk_mul_mat_q6_k( static void ggml_vk_mul_mat_q6_k(
kp::Sequence& seq, kp::Sequence& seq,
const std::shared_ptr<kp::Tensor>& inA, const std::shared_ptr<kp::Tensor>& inA,
@ -1384,6 +1419,7 @@ static bool ggml_backend_kompute_device_supports_op(ggml_backend_dev_t dev, cons
case GGML_TYPE_Q8_0: case GGML_TYPE_Q8_0:
case GGML_TYPE_Q4_0: case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_1: case GGML_TYPE_Q4_1:
case GGML_TYPE_Q4_K:
return true; return true;
default: default:
; ;
@ -1635,6 +1671,12 @@ static void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml
ne00, ne01, ne02, ne10, ne11, ne12, ne13, ne0, ne1, r2, r3 ne00, ne01, ne02, ne10, ne11, ne12, ne13, ne0, ne1, r2, r3
); );
break; break;
case GGML_TYPE_Q4_K:
ggml_vk_mul_mat_q4_k(
seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst,
ne00, ne01, ne02, ne10, ne11, ne12, ne13, ne0, ne1, ne12/ne02, ne13/ne03
);
break;
case GGML_TYPE_Q6_K: case GGML_TYPE_Q6_K:
ggml_vk_mul_mat_q6_k( ggml_vk_mul_mat_q6_k(
seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst,

View File

@ -22102,18 +22102,46 @@ static size_t gguf_type_size(enum gguf_type type) {
return GGUF_TYPE_SIZE[type]; return GGUF_TYPE_SIZE[type];
} }
static void gguf_tensor_info_sanitize(struct gguf_tensor_info * info) { static bool gguf_tensor_info_sanitize(struct gguf_tensor_info * info) {
GGML_ASSERT(info->n_dims <= GGML_MAX_DIMS); if (info->n_dims > GGML_MAX_DIMS) {
GGML_ASSERT(0 <= info->type && info->type < GGML_TYPE_COUNT); fprintf(stderr, "%s: invalid number of dimensions (%" PRIu32 ")\n", __func__, info->n_dims);
return false;
}
if (info->type < 0 || info->type >= GGML_TYPE_COUNT) {
fprintf(stderr, "%s: invalid type (%d)\n", __func__, info->type);
return false;
}
if (strlen(info->name.data) >= GGML_MAX_NAME) {
fprintf(stderr, "%s: tensor '%s' name is too long\n", __func__, info->name.data);
return false;
}
for (uint32_t i = 0; i < info->n_dims; ++i) { for (uint32_t i = 0; i < info->n_dims; ++i) {
GGML_ASSERT(info->ne[i] > 0); if (info->ne[i] <= 0) {
fprintf(stderr, "%s: invalid number of elements (%" PRIu64 ")\n", __func__, info->ne[i]);
return false;
}
} }
// prevent overflow for total number of elements // prevent overflow for total number of elements
GGML_ASSERT(INT64_MAX/info->ne[1] > info->ne[0]); if (INT64_MAX/info->ne[1] <= info->ne[0]) {
GGML_ASSERT(INT64_MAX/info->ne[2] > info->ne[0]*info->ne[1]); fprintf(stderr, "%s: invalid number of elements (%" PRIu64 ")\n", __func__, info->ne[1]);
GGML_ASSERT(INT64_MAX/info->ne[3] > info->ne[0]*info->ne[1]*info->ne[2]); return false;
}
if (INT64_MAX/info->ne[2] <= info->ne[0]*info->ne[1]) {
fprintf(stderr, "%s: invalid number of elements (%" PRIu64 ")\n", __func__, info->ne[2]);
return false;
}
if (INT64_MAX/info->ne[3] <= info->ne[0]*info->ne[1]*info->ne[2]) {
fprintf(stderr, "%s: invalid number of elements (%" PRIu64 ")\n", __func__, info->ne[3]);
return false;
}
return true;
} }
static bool gguf_fread_el(FILE * file, void * dst, size_t size, size_t * offset) { static bool gguf_fread_el(FILE * file, void * dst, size_t size, size_t * offset) {
@ -22414,8 +22442,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
ok = ok && gguf_fread_el (file, &info->type, sizeof(info->type), &offset); ok = ok && gguf_fread_el (file, &info->type, sizeof(info->type), &offset);
ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset), &offset); ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset), &offset);
// TODO: return an error instead of crashing with GGML_ASSERT ok = ok && gguf_tensor_info_sanitize(info);
gguf_tensor_info_sanitize(info);
// make sure there is no duplicated tensor names // make sure there is no duplicated tensor names
for (uint64_t j = 0; j < i && ok; ++j) { for (uint64_t j = 0; j < i && ok; ++j) {

View File

@ -15,6 +15,7 @@
#define TWOPI_F 6.283185307179586f #define TWOPI_F 6.283185307179586f
#define QK_K 256 #define QK_K 256
#define K_SCALE_SIZE 12
#define u8BufToU16(buf, idx) (((uint16_t(buf[idx + 1]) << 8)) | buf[idx]) #define u8BufToU16(buf, idx) (((uint16_t(buf[idx + 1]) << 8)) | buf[idx])
#define u8BufToFloat16(buf, idx) uint16BitsToHalf u8BufToU16(buf, idx) #define u8BufToFloat16(buf, idx) uint16BitsToHalf u8BufToU16(buf, idx)
@ -64,6 +65,14 @@ mat4 dequantize_q4_1(const block_q4_1 xb, uint il) {
return reg; return reg;
} }
#define sizeof_block_q4_k 144
struct block_q4_k {
float16_t d;
float16_t dmin;
uint8_t scales[K_SCALE_SIZE];
uint8_t qs[QK_K/2];
};
#define sizeof_block_q6_k 210 #define sizeof_block_q6_k 210
struct block_q6_k { struct block_q6_k {
uint8_t ql[QK_K/2]; // quants, lower 4 bits uint8_t ql[QK_K/2]; // quants, lower 4 bits

View File

@ -0,0 +1,133 @@
#version 450
#include "common.comp"
#define N_DST 4
#define SIZE_OF_BLOCK sizeof_block_q4_k
layout(local_size_x = 4) in;
layout(local_size_y = 8) in;
layout(local_size_z = 1) in;
layout (binding = 0) readonly buffer tensorInA { block_q4_k inA[]; };
layout (binding = 1) readonly buffer tensorInB { float inB[]; };
layout (binding = 2) writeonly buffer tensorOut { float out_[]; };
layout (push_constant) uniform parameter {
uint inAOff;
uint inBOff;
uint outOff;
int ne00;
int ne10;
int ne0;
int ne1;
int ne01;
int ne02;
int ne12;
int r2;
int r3;
} pcs;
void main() {
const uint16_t kmask1 = uint16_t(0x3f3f);
const uint16_t kmask2 = uint16_t(0x0f0f);
const uint16_t kmask3 = uint16_t(0xc0c0);
const uint ix = gl_SubgroupInvocationID/8; // 0...3
const uint it = gl_SubgroupInvocationID%8; // 0...7
const uint iq = it/4; // 0 or 1
const uint ir = it%4; // 0...3
const uint nb = pcs.ne00/QK_K;
const uint r0 = gl_WorkGroupID.x;
const uint r1 = gl_WorkGroupID.y;
const uint im = gl_WorkGroupID.z;
const uint first_row = r0 * N_DST;
const uint ib_row = first_row * nb;
const uint i12 = im%pcs.ne12;
const uint i13 = im/pcs.ne12;
const uint offset0 = (i12/pcs.r2)*(nb*pcs.ne01) + (i13/pcs.r3)*(nb*pcs.ne01*pcs.ne02);
const uint xblk = ib_row + offset0 + pcs.inAOff;
const uint y = r1*pcs.ne10 + im*pcs.ne00*pcs.ne1 + pcs.inBOff;
float yl[16];
float yh[16];
float sumf[N_DST] = {0.f, 0.f, 0.f, 0.f};
float all_sum = 0.f;
uint y4 = y + ix * QK_K + 64 * iq + 8 * ir;
for (uint ib = ix; ib < nb; ib += 4) {
const uint blk_idx = ib + xblk;
float sumy[4] = {0.f, 0.f, 0.f, 0.f};
for (int i = 0; i < 8; ++i) {
yl[i+0] = inB[y4+i+ 0]; sumy[0] += yl[i+0];
yl[i+8] = inB[y4+i+ 32]; sumy[1] += yl[i+8];
yh[i+0] = inB[y4+i+128]; sumy[2] += yh[i+0];
yh[i+8] = inB[y4+i+160]; sumy[3] += yh[i+8];
}
for (int row = 0; row < N_DST; row++) {
uint row_idx = row * nb;
uint16_t sc_0 = u8BufToU16(inA[blk_idx + row_idx].scales, iq * 2 + 0);
uint16_t sc_1 = u8BufToU16(inA[blk_idx + row_idx].scales, iq * 2 + 2);
uint16_t sc_2 = u8BufToU16(inA[blk_idx + row_idx].scales, iq * 2 + 4);
uint16_t sc_3 = u8BufToU16(inA[blk_idx + row_idx].scales, iq * 2 + 6);
uint16_t sc_4 = u8BufToU16(inA[blk_idx + row_idx].scales, iq * 2 + 8);
uint16_t sc16[4];
sc16[0] = sc_0 & kmask1;
sc16[1] = sc_2 & kmask1;
sc16[2] = ((sc_4 >> 0) & kmask2) | ((sc_0 & kmask3) >> 2);
sc16[3] = ((sc_4 >> 4) & kmask2) | ((sc_2 & kmask3) >> 2);
float acc1[4] = {0.f, 0.f, 0.f, 0.f};
float acc2[4] = {0.f, 0.f, 0.f, 0.f};
for (int i = 0; i < 8; i += 2) {
uint16_t q1 = u8BufToU16(inA[blk_idx + row_idx].qs, 32 * iq + 8 * ir + i);
uint16_t q2 = u8BufToU16(inA[blk_idx + row_idx].qs, 64 + 32 * iq + 8 * ir + i);
acc1[0] += yl[i+0] * (q1 & 0x000F);
acc1[1] += yl[i+1] * (q1 & 0x0F00);
acc1[2] += yl[i+8] * (q1 & 0x00F0);
acc1[3] += yl[i+9] * (q1 & 0xF000);
acc2[0] += yh[i+0] * (q2 & 0x000F);
acc2[1] += yh[i+1] * (q2 & 0x0F00);
acc2[2] += yh[i+8] * (q2 & 0x00F0);
acc2[3] += yh[i+9] * (q2 & 0xF000);
}
uint8_t sc8_0 = uint8_t(sc16[0] & 0xFF);
uint8_t sc8_1 = uint8_t(sc16[0] >> 8 );
uint8_t sc8_2 = uint8_t(sc16[1] & 0xFF);
uint8_t sc8_3 = uint8_t(sc16[1] >> 8 );
uint8_t sc8_4 = uint8_t(sc16[2] & 0xFF);
uint8_t sc8_5 = uint8_t(sc16[2] >> 8 );
uint8_t sc8_6 = uint8_t(sc16[3] & 0xFF);
uint8_t sc8_7 = uint8_t(sc16[3] >> 8 );
float dall = float(inA[blk_idx + row_idx].d);
float dmin = float(inA[blk_idx + row_idx].dmin);
sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc1[1]) * sc8_0 +
(acc1[2] + 1.f/256.f * acc1[3]) * sc8_1 * 1.f/16.f +
(acc2[0] + 1.f/256.f * acc2[1]) * sc8_4 +
(acc2[2] + 1.f/256.f * acc2[3]) * sc8_5 * 1.f/16.f) -
dmin * (sumy[0] * sc8_2 + sumy[1] * sc8_3 + sumy[2] * sc8_6 + sumy[3] * sc8_7);
}
y4 += 4 * QK_K;
}
for (int row = 0; row < N_DST; ++row) {
all_sum = subgroupAdd(sumf[row]);
if (subgroupElect()) {
out_[r1*pcs.ne0 + im*pcs.ne0*pcs.ne1 + first_row + row + pcs.outOff] = all_sum;
}
}
}

View File

@ -4273,8 +4273,11 @@ struct llama_model_loader {
llama_tensor_weight(const llama_file * file, uint16_t idx, const char * name, const struct gguf_context * gguf_ctx, ggml_tensor * tensor) : idx(idx), tensor(tensor) { llama_tensor_weight(const llama_file * file, uint16_t idx, const char * name, const struct gguf_context * gguf_ctx, ggml_tensor * tensor) : idx(idx), tensor(tensor) {
const int tensor_idx = gguf_find_tensor(gguf_ctx, name); const int tensor_idx = gguf_find_tensor(gguf_ctx, name);
offs = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, tensor_idx); if (tensor_idx < 0) {
throw std::runtime_error(format("tensor '%s' not found in the model", name));
}
offs = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, tensor_idx);
if (offs + ggml_nbytes(tensor) < offs || offs + ggml_nbytes(tensor) > file->size) { if (offs + ggml_nbytes(tensor) < offs || offs + ggml_nbytes(tensor) > file->size) {
throw std::runtime_error(format("tensor '%s' data is not within the file bounds, model is corrupted or incomplete", name)); throw std::runtime_error(format("tensor '%s' data is not within the file bounds, model is corrupted or incomplete", name));
} }
@ -7426,7 +7429,7 @@ static bool llm_load_tensors(
if (flags & llama_model_loader::TENSOR_NOT_REQUIRED) { if (flags & llama_model_loader::TENSOR_NOT_REQUIRED) {
return nullptr; return nullptr;
} }
throw std::runtime_error(format("missing tensor %s", tn.str().c_str())); throw std::runtime_error(format("missing tensor '%s'", tn.str().c_str()));
} }
// some models use the token embedding tensor as the output, but since these are used in different layers and with different ops // some models use the token embedding tensor as the output, but since these are used in different layers and with different ops