mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
ggml-quants : allow using vdotq_s32 in TQ2_0 vec_dot
Not yet tested on harware which supports it, might not work or might not even compile. But also it might. It should make the performance better on recent ARM CPUs. * ggml-quants : remove comment about possible format change of TQ2_0 Making it slightly more convenient for AVX512 but less convenient for everything else is not worth the trouble.
This commit is contained in:
parent
f034aa1bb1
commit
96b3d411e0
@ -3391,7 +3391,6 @@ void quantize_row_tq2_0_ref(const float * restrict x, block_tq2_0 * restrict y,
|
||||
|
||||
y[i].d = GGML_FP32_TO_FP16(d);
|
||||
|
||||
// TODO: should it be along 64 bytes instead for AVX512?
|
||||
for (size_t j = 0; j < sizeof(y->qs); j += 32) {
|
||||
for (size_t m = 0; m < 32; ++m) {
|
||||
uint8_t q = 0;
|
||||
@ -5957,7 +5956,67 @@ void ggml_vec_dot_tq2_0_q8_K(int n, float * restrict s, size_t bs, const void *
|
||||
|
||||
const int nb = n / QK_K;
|
||||
|
||||
#if defined __ARM_NEON
|
||||
#if defined __ARM_NEON && defined __ARM_FEATURE_DOTPROD
|
||||
float sumf = 0.0f;
|
||||
|
||||
const uint8x16_t m3 = vdupq_n_u8(3);
|
||||
|
||||
for (int i = 0; i < nb; ++i) {
|
||||
int32x4_t sumi0 = vdupq_n_s32(0);
|
||||
int32x4_t sumi1 = vdupq_n_s32(0);
|
||||
|
||||
for (size_t j = 0; j < sizeof(x->qs); j += 32) {
|
||||
uint8x16_t qx0 = vld1q_u8(x[i].qs + j);
|
||||
uint8x16_t qx1 = vld1q_u8(x[i].qs + j + 16);
|
||||
uint8x16_t qx2 = vshrq_n_u8(qx0, 2);
|
||||
uint8x16_t qx3 = vshrq_n_u8(qx1, 2);
|
||||
uint8x16_t qx4 = vshrq_n_u8(qx0, 4);
|
||||
uint8x16_t qx5 = vshrq_n_u8(qx1, 4);
|
||||
uint8x16_t qx6 = vshrq_n_u8(qx0, 6);
|
||||
uint8x16_t qx7 = vshrq_n_u8(qx1, 6);
|
||||
|
||||
int8x16_t sqx0 = vreinterpretq_s8_u8(vandq_u8(qx0, m3));
|
||||
int8x16_t sqx1 = vreinterpretq_s8_u8(vandq_u8(qx1, m3));
|
||||
int8x16_t sqx2 = vreinterpretq_s8_u8(vandq_u8(qx2, m3));
|
||||
int8x16_t sqx3 = vreinterpretq_s8_u8(vandq_u8(qx3, m3));
|
||||
int8x16_t sqx4 = vreinterpretq_s8_u8(vandq_u8(qx4, m3));
|
||||
int8x16_t sqx5 = vreinterpretq_s8_u8(vandq_u8(qx5, m3));
|
||||
int8x16_t sqx6 = vreinterpretq_s8_u8(vandq_u8(qx6, m3));
|
||||
int8x16_t sqx7 = vreinterpretq_s8_u8(vandq_u8(qx7, m3));
|
||||
|
||||
const int8x16_t qy0 = vld1q_s8(y[i].qs + j*4 + 0);
|
||||
const int8x16_t qy1 = vld1q_s8(y[i].qs + j*4 + 16);
|
||||
const int8x16_t qy2 = vld1q_s8(y[i].qs + j*4 + 32);
|
||||
const int8x16_t qy3 = vld1q_s8(y[i].qs + j*4 + 48);
|
||||
const int8x16_t qy4 = vld1q_s8(y[i].qs + j*4 + 64);
|
||||
const int8x16_t qy5 = vld1q_s8(y[i].qs + j*4 + 80);
|
||||
const int8x16_t qy6 = vld1q_s8(y[i].qs + j*4 + 96);
|
||||
const int8x16_t qy7 = vld1q_s8(y[i].qs + j*4 + 112);
|
||||
|
||||
sumi0 = vdotq_s32(sumi0, sqx0, qy0);
|
||||
sumi1 = vdotq_s32(sumi1, sqx1, qy1);
|
||||
sumi0 = vdotq_s32(sumi0, sqx2, qy2);
|
||||
sumi1 = vdotq_s32(sumi1, sqx3, qy3);
|
||||
sumi0 = vdotq_s32(sumi0, sqx4, qy4);
|
||||
sumi1 = vdotq_s32(sumi1, sqx5, qy5);
|
||||
sumi0 = vdotq_s32(sumi0, sqx6, qy6);
|
||||
sumi1 = vdotq_s32(sumi1, sqx7, qy7);
|
||||
}
|
||||
|
||||
const int16x8_t ysum0 = vld1q_s16(y[i].bsums);
|
||||
const int16x8_t ysum1 = vld1q_s16(y[i].bsums + 8);
|
||||
|
||||
sumi0 = vaddq_s32(sumi0, sumi1);
|
||||
sumi0 = vsubq_s32(sumi0, vpaddlq_s16(vaddq_s16(ysum0, ysum1)));
|
||||
|
||||
const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
|
||||
|
||||
sumf += d * (float) vaddvq_s32(sumi0);
|
||||
}
|
||||
|
||||
*s = sumf;
|
||||
|
||||
#elif defined __ARM_NEON
|
||||
float sumf = 0.0f;
|
||||
|
||||
const uint8x16_t m3 = vdupq_n_u8(3);
|
||||
|
Loading…
Reference in New Issue
Block a user