mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-24 18:34:36 +00:00
k-quants : add AVX support to dot functions (#1916)
* k_quants : add AVX support * k_quants : apply review comments
This commit is contained in:
parent
412c60e473
commit
5743ca8092
547
k_quants.c
547
k_quants.c
@ -1393,6 +1393,112 @@ void ggml_vec_dot_q2_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
|
||||
*s = hsum_float_8(acc);
|
||||
|
||||
#elif defined __AVX__
|
||||
|
||||
const __m128i m3 = _mm_set1_epi8(0x3);
|
||||
const __m128i m4 = _mm_set1_epi8(0xF);
|
||||
const __m128i m2 = _mm_set1_epi8(0x2);
|
||||
|
||||
__m256 acc = _mm256_setzero_ps();
|
||||
|
||||
for (int i = 0; i < nb; ++i) {
|
||||
|
||||
const float dall = y[i].d * ggml_fp16_to_fp32(x[i].d);
|
||||
const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin);
|
||||
|
||||
const uint8_t * restrict q2 = x[i].qs;
|
||||
const int8_t * restrict q8 = y[i].qs;
|
||||
|
||||
// load mins and scales from block_q2_K.scales[QK_K/16]
|
||||
const __m128i mins_and_scales = _mm_loadu_si128((const __m128i*)x[i].scales);
|
||||
const __m128i scales16 = _mm_and_si128(mins_and_scales, m4);
|
||||
const __m128i mins16 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4);
|
||||
const __m128i mins_0 = _mm_cvtepi8_epi16(mins16);
|
||||
const __m128i mins_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(mins16, mins16));
|
||||
|
||||
// summs = y[i].bsums * (x[i].scales >> 4) in 16bits*8*2 to 32bits*4*2
|
||||
const __m128i summs_0 = _mm_madd_epi16(mins_0, _mm_loadu_si128((const __m128i*)&y[i].bsums[0]));
|
||||
const __m128i summs_1 = _mm_madd_epi16(mins_1, _mm_loadu_si128((const __m128i*)&y[i].bsums[8]));
|
||||
|
||||
// sumf += -dmin * summs in 32bits*8
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(_mm256_set_m128i(summs_1, summs_0))), acc);
|
||||
|
||||
const __m128i scales_0 = _mm_cvtepi8_epi16(scales16);
|
||||
const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales16, scales16));
|
||||
const __m128i scales[2] = { scales_0, scales_1 };
|
||||
|
||||
__m128i sumi_0 = _mm_setzero_si128();
|
||||
__m128i sumi_1 = _mm_setzero_si128();
|
||||
|
||||
for (int j = 0; j < QK_K/128; ++j) {
|
||||
|
||||
// load Q8 quants int8*16*8 from block_q8_K.qs[QK_K]
|
||||
const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
|
||||
// load 2bits*16*8 from block_q2_K.qs[QK_K/4]
|
||||
__m128i q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16;
|
||||
const __m128i q2_0 = _mm_and_si128(q2bits, m3);
|
||||
const __m128i q2_2 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
|
||||
const __m128i q2_4 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
|
||||
const __m128i q2_6 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
|
||||
q2bits = _mm_loadu_si128((const __m128i*)q2); q2 += 16;
|
||||
const __m128i q2_1 = _mm_and_si128(q2bits, m3);
|
||||
const __m128i q2_3 = _mm_and_si128(_mm_srli_epi16(q2bits, 2), m3);
|
||||
const __m128i q2_5 = _mm_and_si128(_mm_srli_epi16(q2bits, 4), m3);
|
||||
const __m128i q2_7 = _mm_and_si128(_mm_srli_epi16(q2bits, 6), m3);
|
||||
|
||||
// isuml = q8[l] * ((q2[l] >> shift) & 3) in 8bits*16*8 to 16bits*8*8
|
||||
__m128i p0 = _mm_maddubs_epi16(q2_0, q8_0);
|
||||
__m128i p1 = _mm_maddubs_epi16(q2_1, q8_1);
|
||||
__m128i p2 = _mm_maddubs_epi16(q2_2, q8_2);
|
||||
__m128i p3 = _mm_maddubs_epi16(q2_3, q8_3);
|
||||
__m128i p4 = _mm_maddubs_epi16(q2_4, q8_4);
|
||||
__m128i p5 = _mm_maddubs_epi16(q2_5, q8_5);
|
||||
__m128i p6 = _mm_maddubs_epi16(q2_6, q8_6);
|
||||
__m128i p7 = _mm_maddubs_epi16(q2_7, q8_7);
|
||||
|
||||
// isum += (x[i].scales[is++] & 0xF) * isuml in 16bits*8*8 to 32bits*4*8
|
||||
__m128i shuffle = _mm_set1_epi16(0x0100);
|
||||
p0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p0);
|
||||
shuffle = _mm_add_epi16(shuffle, m2);
|
||||
p1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p1);
|
||||
shuffle = _mm_add_epi16(shuffle, m2);
|
||||
p2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p2);
|
||||
shuffle = _mm_add_epi16(shuffle, m2);
|
||||
p3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p3);
|
||||
shuffle = _mm_add_epi16(shuffle, m2);
|
||||
p4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p4);
|
||||
shuffle = _mm_add_epi16(shuffle, m2);
|
||||
p5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p5);
|
||||
shuffle = _mm_add_epi16(shuffle, m2);
|
||||
p6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p6);
|
||||
shuffle = _mm_add_epi16(shuffle, m2);
|
||||
p7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p7);
|
||||
|
||||
p0 = _mm_add_epi32(p0, p1);
|
||||
p2 = _mm_add_epi32(p2, p3);
|
||||
p4 = _mm_add_epi32(p4, p5);
|
||||
p6 = _mm_add_epi32(p6, p7);
|
||||
|
||||
// isum in 32bits*4*2
|
||||
sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p0, p2));
|
||||
sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p4, p6));
|
||||
}
|
||||
|
||||
// sumf += dall * isum - dmin * summs in 32bits
|
||||
__m256i sumi = _mm256_set_m128i(sumi_1, sumi_0);
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&dall), _mm256_cvtepi32_ps(sumi)), acc);
|
||||
}
|
||||
|
||||
*s = hsum_float_8(acc);
|
||||
|
||||
#else
|
||||
|
||||
float sumf = 0;
|
||||
@ -1831,6 +1937,147 @@ void ggml_vec_dot_q3_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
|
||||
*s = hsum_float_8(acc);
|
||||
|
||||
#elif defined __AVX__
|
||||
|
||||
const __m128i m3 = _mm_set1_epi8(3);
|
||||
const __m128i mone = _mm_set1_epi8(1);
|
||||
const __m128i m32 = _mm_set1_epi8(32);
|
||||
const __m128i m2 = _mm_set1_epi8(2);
|
||||
|
||||
__m256 acc = _mm256_setzero_ps();
|
||||
|
||||
uint32_t *aux;
|
||||
|
||||
for (int i = 0; i < nb; ++i) {
|
||||
|
||||
const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
|
||||
|
||||
const uint8_t * restrict q3 = x[i].qs;
|
||||
const int8_t * restrict q8 = y[i].qs;
|
||||
|
||||
// Set up scales
|
||||
aux = (uint32_t *)x[i].scales;
|
||||
__m128i scales128 = _mm_set_epi32(
|
||||
((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4),
|
||||
((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4),
|
||||
(aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4),
|
||||
(aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4));
|
||||
scales128 = _mm_sub_epi8(scales128, m32);
|
||||
const __m128i scales_0 = _mm_cvtepi8_epi16(scales128);
|
||||
const __m128i scales_1 = _mm_cvtepi8_epi16(_mm_unpackhi_epi64(scales128, scales128));
|
||||
const __m128i scales[2] = { scales_0, scales_1 };
|
||||
|
||||
// high bit *128*2 from block_q3_K.hmask[QK_K/8]
|
||||
const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].hmask[0]);
|
||||
const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].hmask[16]);
|
||||
|
||||
// integer accumulator
|
||||
__m128i sumi_0 = _mm_setzero_si128();
|
||||
__m128i sumi_1 = _mm_setzero_si128();
|
||||
|
||||
for (int j = 0; j < QK_K/128; ++j) {
|
||||
// load low 2 bits *64*2 from block_q3_K.qs[QK_K/4]
|
||||
const __m128i q3bits_0 = _mm_loadu_si128((const __m128i*)q3); q3 += 16;
|
||||
const __m128i q3bits_1 = _mm_loadu_si128((const __m128i*)q3); q3 += 16;
|
||||
|
||||
// prepare low and high bits
|
||||
const int bit = j << 2;
|
||||
const __m128i q3l_0 = _mm_and_si128(q3bits_0, m3);
|
||||
const __m128i q3l_1 = _mm_and_si128(q3bits_1, m3);
|
||||
const __m128i q3h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit)), bit), 2);
|
||||
const __m128i q3h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit)), bit), 2);
|
||||
|
||||
const __m128i q3l_2 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 2), m3);
|
||||
const __m128i q3l_3 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 2), m3);
|
||||
const __m128i q3h_2 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+1)), bit+1), 2);
|
||||
const __m128i q3h_3 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+1)), bit+1), 2);
|
||||
|
||||
const __m128i q3l_4 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 4), m3);
|
||||
const __m128i q3l_5 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 4), m3);
|
||||
const __m128i q3h_4 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+2)), bit+2), 2);
|
||||
const __m128i q3h_5 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+2)), bit+2), 2);
|
||||
|
||||
const __m128i q3l_6 = _mm_and_si128(_mm_srli_epi16(q3bits_0, 6), m3);
|
||||
const __m128i q3l_7 = _mm_and_si128(_mm_srli_epi16(q3bits_1, 6), m3);
|
||||
const __m128i q3h_6 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_0, _mm_slli_epi16(mone, bit+3)), bit+3), 2);
|
||||
const __m128i q3h_7 = _mm_slli_epi16(_mm_srli_epi16(_mm_andnot_si128(hbits_1, _mm_slli_epi16(mone, bit+3)), bit+3), 2);
|
||||
|
||||
// load Q8 quants from block_q8_K.qs[QK_K]
|
||||
const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
|
||||
// Dot product: we multiply the 2 low bits and 1 high bit part separately, so we can use _mm256_maddubs_epi16,
|
||||
// and then subtract. The high bit part has the 2 already subtracted (and so, it is zero if the high bit was not set,
|
||||
// and 2 if the high bit was set)
|
||||
__m128i q8s_0 = _mm_maddubs_epi16(q3h_0, q8_0);
|
||||
__m128i q8s_1 = _mm_maddubs_epi16(q3h_1, q8_1);
|
||||
__m128i q8s_2 = _mm_maddubs_epi16(q3h_2, q8_2);
|
||||
__m128i q8s_3 = _mm_maddubs_epi16(q3h_3, q8_3);
|
||||
__m128i q8s_4 = _mm_maddubs_epi16(q3h_4, q8_4);
|
||||
__m128i q8s_5 = _mm_maddubs_epi16(q3h_5, q8_5);
|
||||
__m128i q8s_6 = _mm_maddubs_epi16(q3h_6, q8_6);
|
||||
__m128i q8s_7 = _mm_maddubs_epi16(q3h_7, q8_7);
|
||||
|
||||
__m128i p16_0 = _mm_maddubs_epi16(q3l_0, q8_0);
|
||||
__m128i p16_1 = _mm_maddubs_epi16(q3l_1, q8_1);
|
||||
__m128i p16_2 = _mm_maddubs_epi16(q3l_2, q8_2);
|
||||
__m128i p16_3 = _mm_maddubs_epi16(q3l_3, q8_3);
|
||||
__m128i p16_4 = _mm_maddubs_epi16(q3l_4, q8_4);
|
||||
__m128i p16_5 = _mm_maddubs_epi16(q3l_5, q8_5);
|
||||
__m128i p16_6 = _mm_maddubs_epi16(q3l_6, q8_6);
|
||||
__m128i p16_7 = _mm_maddubs_epi16(q3l_7, q8_7);
|
||||
|
||||
p16_0 = _mm_sub_epi16(p16_0, q8s_0);
|
||||
p16_1 = _mm_sub_epi16(p16_1, q8s_1);
|
||||
p16_2 = _mm_sub_epi16(p16_2, q8s_2);
|
||||
p16_3 = _mm_sub_epi16(p16_3, q8s_3);
|
||||
p16_4 = _mm_sub_epi16(p16_4, q8s_4);
|
||||
p16_5 = _mm_sub_epi16(p16_5, q8s_5);
|
||||
p16_6 = _mm_sub_epi16(p16_6, q8s_6);
|
||||
p16_7 = _mm_sub_epi16(p16_7, q8s_7);
|
||||
|
||||
// multiply with scales
|
||||
__m128i shuffle = _mm_set1_epi16(0x0100);
|
||||
p16_0 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_0);
|
||||
shuffle = _mm_add_epi16(shuffle, m2);
|
||||
p16_1 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_1);
|
||||
shuffle = _mm_add_epi16(shuffle, m2);
|
||||
p16_2 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_2);
|
||||
shuffle = _mm_add_epi16(shuffle, m2);
|
||||
p16_3 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_3);
|
||||
shuffle = _mm_add_epi16(shuffle, m2);
|
||||
p16_4 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_4);
|
||||
shuffle = _mm_add_epi16(shuffle, m2);
|
||||
p16_5 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_5);
|
||||
shuffle = _mm_add_epi16(shuffle, m2);
|
||||
p16_6 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_6);
|
||||
shuffle = _mm_add_epi16(shuffle, m2);
|
||||
p16_7 = _mm_madd_epi16(_mm_shuffle_epi8(scales[j], shuffle), p16_7);
|
||||
|
||||
// accumulate
|
||||
p16_0 = _mm_add_epi32(p16_0, p16_1);
|
||||
p16_2 = _mm_add_epi32(p16_2, p16_3);
|
||||
p16_4 = _mm_add_epi32(p16_4, p16_5);
|
||||
p16_6 = _mm_add_epi32(p16_6, p16_7);
|
||||
sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
|
||||
sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_4, p16_6));
|
||||
|
||||
}
|
||||
|
||||
// multiply with block scale and accumulate
|
||||
__m256i sumi = _mm256_set_m128i(sumi_1, sumi_0);
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
|
||||
|
||||
}
|
||||
|
||||
*s = hsum_float_8(acc);
|
||||
|
||||
#else
|
||||
// scalar version
|
||||
// This function is written like this so the compiler can manage to vectorize most of it
|
||||
@ -2264,6 +2511,88 @@ void ggml_vec_dot_q4_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
|
||||
*s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
|
||||
|
||||
#elif defined __AVX__
|
||||
|
||||
const __m128i m4 = _mm_set1_epi8(0xF);
|
||||
const __m128i m2 = _mm_set1_epi8(0x2);
|
||||
|
||||
__m256 acc = _mm256_setzero_ps();
|
||||
__m128 acc_m = _mm_setzero_ps();
|
||||
|
||||
for (int i = 0; i < nb; ++i) {
|
||||
|
||||
const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
|
||||
const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin);
|
||||
|
||||
const uint8_t * restrict q4 = x[i].qs;
|
||||
const int8_t * restrict q8 = y[i].qs;
|
||||
|
||||
memcpy(utmp, x[i].scales, 12);
|
||||
utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
|
||||
const uint32_t uaux = utmp[1] & kmask1;
|
||||
utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
|
||||
utmp[2] = uaux;
|
||||
utmp[0] &= kmask1;
|
||||
|
||||
const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]);
|
||||
const __m128i scales = _mm_cvtepu8_epi16(utmps);
|
||||
const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps));
|
||||
|
||||
const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]);
|
||||
const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]);
|
||||
const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1);
|
||||
const __m128i prod = _mm_madd_epi16(mins, q8s);
|
||||
acc_m = _mm_add_ps(_mm_mul_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod)), acc_m);
|
||||
|
||||
__m128i sumi_0 = _mm_setzero_si128();
|
||||
__m128i sumi_1 = _mm_setzero_si128();
|
||||
|
||||
__m128i shuffle = _mm_set1_epi16(0x0100);
|
||||
for (int j = 0; j < QK_K/64; ++j) {
|
||||
|
||||
const __m128i scale_l = _mm_shuffle_epi8(scales, shuffle);
|
||||
shuffle = _mm_add_epi16(shuffle, m2);
|
||||
const __m128i scale_h = _mm_shuffle_epi8(scales, shuffle);
|
||||
shuffle = _mm_add_epi16(shuffle, m2);
|
||||
|
||||
__m128i q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
|
||||
const __m128i q4l_0 = _mm_and_si128(q4bits, m4);
|
||||
const __m128i q4h_0 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4);
|
||||
q4bits = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
|
||||
const __m128i q4l_1 = _mm_and_si128(q4bits, m4);
|
||||
const __m128i q4h_1 = _mm_and_si128(_mm_srli_epi16(q4bits, 4), m4);
|
||||
|
||||
const __m128i q8l_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
__m128i p16l = _mm_maddubs_epi16(q4l_0, q8l_0);
|
||||
p16l = _mm_madd_epi16(scale_l, p16l);
|
||||
sumi_0 = _mm_add_epi32(sumi_0, p16l);
|
||||
const __m128i q8l_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
p16l = _mm_maddubs_epi16(q4l_1, q8l_1);
|
||||
p16l = _mm_madd_epi16(scale_l, p16l);
|
||||
sumi_1 = _mm_add_epi32(sumi_1, p16l);
|
||||
|
||||
const __m128i q8h_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
__m128i p16h = _mm_maddubs_epi16(q4h_0, q8h_0);
|
||||
p16h = _mm_madd_epi16(scale_h, p16h);
|
||||
sumi_0 = _mm_add_epi32(sumi_0, p16h);
|
||||
const __m128i q8h_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
p16h = _mm_maddubs_epi16(q4h_1, q8h_1);
|
||||
p16h = _mm_madd_epi16(scale_h, p16h);
|
||||
sumi_1 = _mm_add_epi32(sumi_1, p16h);
|
||||
|
||||
}
|
||||
|
||||
__m256 vd = _mm256_set1_ps(d);
|
||||
__m256i sumi = _mm256_set_m128i(sumi_1, sumi_0);
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
|
||||
|
||||
}
|
||||
|
||||
acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m));
|
||||
acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m));
|
||||
|
||||
*s = hsum_float_8(acc) + _mm_cvtss_f32(acc_m);
|
||||
|
||||
#else
|
||||
|
||||
|
||||
@ -2679,6 +3008,106 @@ void ggml_vec_dot_q5_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
|
||||
*s = hsum_float_8(acc) + summs;
|
||||
|
||||
#elif defined __AVX__
|
||||
|
||||
const __m128i m4 = _mm_set1_epi8(0xF);
|
||||
const __m128i mzero = _mm_setzero_si128();
|
||||
const __m128i mone = _mm_set1_epi8(1);
|
||||
const __m128i m2 = _mm_set1_epi8(2);
|
||||
|
||||
__m256 acc = _mm256_setzero_ps();
|
||||
|
||||
float summs = 0.f;
|
||||
|
||||
for (int i = 0; i < nb; ++i) {
|
||||
|
||||
const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
|
||||
const float dmin = -y[i].d * ggml_fp16_to_fp32(x[i].dmin);
|
||||
|
||||
const uint8_t * restrict q5 = x[i].qs;
|
||||
const int8_t * restrict q8 = y[i].qs;
|
||||
|
||||
memcpy(utmp, x[i].scales, 12);
|
||||
utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
|
||||
const uint32_t uaux = utmp[1] & kmask1;
|
||||
utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
|
||||
utmp[2] = uaux;
|
||||
utmp[0] &= kmask1;
|
||||
|
||||
const __m128i utmps = _mm_set_epi32(utmp[3], utmp[2], utmp[1], utmp[0]);
|
||||
const __m128i scales = _mm_cvtepu8_epi16(utmps);
|
||||
const __m128i mins = _mm_cvtepu8_epi16(_mm_unpackhi_epi64(utmps, utmps));
|
||||
|
||||
const __m128i q8sums_0 = _mm_loadu_si128((const __m128i*)&y[i].bsums[0]);
|
||||
const __m128i q8sums_1 = _mm_loadu_si128((const __m128i*)&y[i].bsums[8]);
|
||||
const __m128i q8s = _mm_hadd_epi16(q8sums_0, q8sums_1);
|
||||
const __m128i prod = _mm_madd_epi16(mins, q8s);
|
||||
const __m128i hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero);
|
||||
summs += dmin * _mm_extract_epi32(hsum, 0);
|
||||
|
||||
const __m128i hbits_0 = _mm_loadu_si128((const __m128i*)&x[i].qh[0]);
|
||||
const __m128i hbits_1 = _mm_loadu_si128((const __m128i*)&x[i].qh[16]);
|
||||
__m128i hmask = mone;
|
||||
|
||||
__m128i sumi_0 = _mm_setzero_si128();
|
||||
__m128i sumi_1 = _mm_setzero_si128();
|
||||
|
||||
int bit = 0;
|
||||
|
||||
__m128i shuffle = _mm_set1_epi16(0x0100);
|
||||
for (int j = 0; j < QK_K/64; ++j) {
|
||||
|
||||
const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
|
||||
shuffle = _mm_add_epi16(shuffle, m2);
|
||||
const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
|
||||
shuffle = _mm_add_epi16(shuffle, m2);
|
||||
|
||||
const __m128i q5bits_0 = _mm_loadu_si128((const __m128i*)q5); q5 += 16;
|
||||
const __m128i q5bits_1 = _mm_loadu_si128((const __m128i*)q5); q5 += 16;
|
||||
|
||||
__m128i q5l_0 = _mm_and_si128(q5bits_0, m4);
|
||||
__m128i q5l_1 = _mm_and_si128(q5bits_1, m4);
|
||||
__m128i q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4);
|
||||
__m128i q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4);
|
||||
__m128i q5_0 = _mm_add_epi8(q5l_0, q5h_0);
|
||||
__m128i q5_1 = _mm_add_epi8(q5l_1, q5h_1);
|
||||
hmask = _mm_slli_epi16(hmask, 1);
|
||||
|
||||
__m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
__m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
__m128i p16_0 = _mm_maddubs_epi16(q5_0, q8_0);
|
||||
__m128i p16_1 = _mm_maddubs_epi16(q5_1, q8_1);
|
||||
p16_0 = _mm_madd_epi16(scale_0, p16_0);
|
||||
p16_1 = _mm_madd_epi16(scale_0, p16_1);
|
||||
|
||||
q5l_0 = _mm_and_si128(_mm_srli_epi16(q5bits_0, 4), m4);
|
||||
q5l_1 = _mm_and_si128(_mm_srli_epi16(q5bits_1, 4), m4);
|
||||
q5h_0 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_0, hmask), bit), 4);
|
||||
q5h_1 = _mm_slli_epi16(_mm_srli_epi16(_mm_and_si128(hbits_1, hmask), bit++), 4);
|
||||
q5_0 = _mm_add_epi8(q5l_0, q5h_0);
|
||||
q5_1 = _mm_add_epi8(q5l_1, q5h_1);
|
||||
hmask = _mm_slli_epi16(hmask, 1);
|
||||
|
||||
q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
__m128i p16_2 = _mm_maddubs_epi16(q5_0, q8_0);
|
||||
__m128i p16_3 = _mm_maddubs_epi16(q5_1, q8_1);
|
||||
p16_2 = _mm_madd_epi16(scale_1, p16_2);
|
||||
p16_3 = _mm_madd_epi16(scale_1, p16_3);
|
||||
|
||||
sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
|
||||
sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
|
||||
|
||||
}
|
||||
|
||||
__m256 vd = _mm256_set1_ps(d);
|
||||
__m256i sumi = _mm256_set_m128i(sumi_1, sumi_0);
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(vd, _mm256_cvtepi32_ps(sumi)), acc);
|
||||
|
||||
}
|
||||
|
||||
*s = hsum_float_8(acc) + summs;
|
||||
|
||||
#else
|
||||
|
||||
const uint8_t * scales = (const uint8_t*)&utmp[0];
|
||||
@ -3130,6 +3559,124 @@ void ggml_vec_dot_q6_K_q8_K(const int n, float * restrict s, const void * restri
|
||||
|
||||
*s = hsum_float_8(acc);
|
||||
|
||||
#elif defined __AVX__
|
||||
|
||||
const __m128i m4 = _mm_set1_epi8(0xF);
|
||||
const __m128i m3 = _mm_set1_epi8(3);
|
||||
const __m128i m32s = _mm_set1_epi8(32);
|
||||
const __m128i m2 = _mm_set1_epi8(2);
|
||||
|
||||
__m256 acc = _mm256_setzero_ps();
|
||||
|
||||
for (int i = 0; i < nb; ++i) {
|
||||
|
||||
const float d = y[i].d * ggml_fp16_to_fp32(x[i].d);
|
||||
|
||||
const uint8_t * restrict q4 = x[i].ql;
|
||||
const uint8_t * restrict qh = x[i].qh;
|
||||
const int8_t * restrict q8 = y[i].qs;
|
||||
|
||||
const __m128i scales = _mm_loadu_si128((const __m128i*)x[i].scales);
|
||||
|
||||
__m128i sumi_0 = _mm_setzero_si128();
|
||||
__m128i sumi_1 = _mm_setzero_si128();
|
||||
|
||||
__m128i shuffle = _mm_set_epi64x(0x0101010101010101, 0x0000000000000000);
|
||||
for (int j = 0; j < QK_K/128; ++j) {
|
||||
|
||||
const __m128i q4bitsH_0 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
|
||||
const __m128i q4bitsH_1 = _mm_loadu_si128((const __m128i*)qh); qh += 16;
|
||||
|
||||
const __m128i q4h_0 = _mm_slli_epi16(_mm_and_si128(q4bitsH_0, m3), 4);
|
||||
const __m128i q4h_1 = _mm_slli_epi16(_mm_and_si128(q4bitsH_1, m3), 4);
|
||||
const __m128i q4h_2 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 2), m3), 4);
|
||||
const __m128i q4h_3 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 2), m3), 4);
|
||||
const __m128i q4h_4 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 4), m3), 4);
|
||||
const __m128i q4h_5 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 4), m3), 4);
|
||||
const __m128i q4h_6 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_0, 6), m3), 4);
|
||||
const __m128i q4h_7 = _mm_slli_epi16(_mm_and_si128(_mm_srli_epi16(q4bitsH_1, 6), m3), 4);
|
||||
|
||||
const __m128i q4bits1_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
|
||||
const __m128i q4bits1_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
|
||||
const __m128i q4bits2_0 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
|
||||
const __m128i q4bits2_1 = _mm_loadu_si128((const __m128i*)q4); q4 += 16;
|
||||
|
||||
const __m128i q4_0 = _mm_or_si128(_mm_and_si128(q4bits1_0, m4), q4h_0);
|
||||
const __m128i q4_1 = _mm_or_si128(_mm_and_si128(q4bits1_1, m4), q4h_1);
|
||||
const __m128i q4_2 = _mm_or_si128(_mm_and_si128(q4bits2_0, m4), q4h_2);
|
||||
const __m128i q4_3 = _mm_or_si128(_mm_and_si128(q4bits2_1, m4), q4h_3);
|
||||
const __m128i q4_4 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_0, 4), m4), q4h_4);
|
||||
const __m128i q4_5 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits1_1, 4), m4), q4h_5);
|
||||
const __m128i q4_6 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_0, 4), m4), q4h_6);
|
||||
const __m128i q4_7 = _mm_or_si128(_mm_and_si128(_mm_srli_epi16(q4bits2_1, 4), m4), q4h_7);
|
||||
|
||||
const __m128i q8_0 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_1 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_2 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_3 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_4 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_5 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_6 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
const __m128i q8_7 = _mm_loadu_si128((const __m128i*)q8); q8 += 16;
|
||||
|
||||
__m128i q8s_0 = _mm_maddubs_epi16(m32s, q8_0);
|
||||
__m128i q8s_1 = _mm_maddubs_epi16(m32s, q8_1);
|
||||
__m128i q8s_2 = _mm_maddubs_epi16(m32s, q8_2);
|
||||
__m128i q8s_3 = _mm_maddubs_epi16(m32s, q8_3);
|
||||
__m128i q8s_4 = _mm_maddubs_epi16(m32s, q8_4);
|
||||
__m128i q8s_5 = _mm_maddubs_epi16(m32s, q8_5);
|
||||
__m128i q8s_6 = _mm_maddubs_epi16(m32s, q8_6);
|
||||
__m128i q8s_7 = _mm_maddubs_epi16(m32s, q8_7);
|
||||
|
||||
__m128i p16_0 = _mm_maddubs_epi16(q4_0, q8_0);
|
||||
__m128i p16_1 = _mm_maddubs_epi16(q4_1, q8_1);
|
||||
__m128i p16_2 = _mm_maddubs_epi16(q4_2, q8_2);
|
||||
__m128i p16_3 = _mm_maddubs_epi16(q4_3, q8_3);
|
||||
__m128i p16_4 = _mm_maddubs_epi16(q4_4, q8_4);
|
||||
__m128i p16_5 = _mm_maddubs_epi16(q4_5, q8_5);
|
||||
__m128i p16_6 = _mm_maddubs_epi16(q4_6, q8_6);
|
||||
__m128i p16_7 = _mm_maddubs_epi16(q4_7, q8_7);
|
||||
|
||||
p16_0 = _mm_sub_epi16(p16_0, q8s_0);
|
||||
p16_1 = _mm_sub_epi16(p16_1, q8s_1);
|
||||
p16_2 = _mm_sub_epi16(p16_2, q8s_2);
|
||||
p16_3 = _mm_sub_epi16(p16_3, q8s_3);
|
||||
p16_4 = _mm_sub_epi16(p16_4, q8s_4);
|
||||
p16_5 = _mm_sub_epi16(p16_5, q8s_5);
|
||||
p16_6 = _mm_sub_epi16(p16_6, q8s_6);
|
||||
p16_7 = _mm_sub_epi16(p16_7, q8s_7);
|
||||
|
||||
const __m128i scale_0 = _mm_shuffle_epi8(scales, shuffle);
|
||||
shuffle = _mm_add_epi8(shuffle, m2);
|
||||
const __m128i scale_1 = _mm_shuffle_epi8(scales, shuffle);
|
||||
shuffle = _mm_add_epi8(shuffle, m2);
|
||||
const __m128i scale_2 = _mm_shuffle_epi8(scales, shuffle);
|
||||
shuffle = _mm_add_epi8(shuffle, m2);
|
||||
const __m128i scale_3 = _mm_shuffle_epi8(scales, shuffle);
|
||||
shuffle = _mm_add_epi8(shuffle, m2);
|
||||
|
||||
p16_0 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_0), p16_0);
|
||||
p16_1 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_0, scale_0)), p16_1);
|
||||
p16_2 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_1), p16_2);
|
||||
p16_3 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_1, scale_1)), p16_3);
|
||||
p16_4 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_2), p16_4);
|
||||
p16_5 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_2, scale_2)), p16_5);
|
||||
p16_6 = _mm_madd_epi16(_mm_cvtepi8_epi16(scale_3), p16_6);
|
||||
p16_7 = _mm_madd_epi16(_mm_cvtepi8_epi16(_mm_unpackhi_epi64(scale_3, scale_3)), p16_7);
|
||||
|
||||
sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_0, p16_2));
|
||||
sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_1, p16_3));
|
||||
sumi_0 = _mm_add_epi32(sumi_0, _mm_add_epi32(p16_4, p16_6));
|
||||
sumi_1 = _mm_add_epi32(sumi_1, _mm_add_epi32(p16_5, p16_7));
|
||||
|
||||
}
|
||||
|
||||
__m256i sumi = _mm256_set_m128i(sumi_1, sumi_0);
|
||||
acc = _mm256_add_ps(_mm256_mul_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi)), acc);
|
||||
}
|
||||
|
||||
*s = hsum_float_8(acc);
|
||||
|
||||
#else
|
||||
|
||||
int8_t aux8[QK_K];
|
||||
|
Loading…
Reference in New Issue
Block a user