2024-03-25 12:50:23 +00:00
|
|
|
#include "mmq.cuh"
|
|
|
|
|
|
|
|
void ggml_cuda_op_mul_mat_q(
|
|
|
|
ggml_backend_cuda_context & ctx,
|
|
|
|
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i,
|
|
|
|
const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols,
|
|
|
|
const int64_t src1_padded_row_size, cudaStream_t stream) {
|
|
|
|
|
|
|
|
const int64_t ne00 = src0->ne[0];
|
|
|
|
|
2024-06-05 14:53:00 +00:00
|
|
|
const int64_t nb01 = src0->nb[1];
|
|
|
|
|
2024-03-25 12:50:23 +00:00
|
|
|
const int64_t ne10 = src1->ne[0];
|
2024-06-09 07:42:25 +00:00
|
|
|
const int64_t ne11 = src1->ne[1];
|
2024-03-25 12:50:23 +00:00
|
|
|
GGML_ASSERT(ne10 % QK8_1 == 0);
|
|
|
|
|
|
|
|
const int64_t ne0 = dst->ne[0];
|
|
|
|
|
|
|
|
const int64_t row_diff = row_high - row_low;
|
2024-06-05 14:53:00 +00:00
|
|
|
const int64_t stride00 = nb01 / ggml_type_size(src0->type);
|
2024-03-25 12:50:23 +00:00
|
|
|
|
|
|
|
int id = ggml_cuda_get_device();
|
2024-05-21 14:02:12 +00:00
|
|
|
const int compute_capability = ggml_cuda_info().devices[id].cc;
|
2024-03-25 12:50:23 +00:00
|
|
|
|
|
|
|
// the main device has a larger memory buffer to hold the results from all GPUs
|
|
|
|
// nrows_dst == nrows of the matrix that the kernel writes into
|
|
|
|
const int64_t nrows_dst = id == ctx.device ? ne0 : row_diff;
|
|
|
|
|
2024-06-09 07:42:25 +00:00
|
|
|
const mmq_args args = {src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stride00, src1_padded_row_size, src1_ncols, ne11, nrows_dst};
|
2024-05-21 14:02:12 +00:00
|
|
|
|
2024-03-25 12:50:23 +00:00
|
|
|
switch (src0->type) {
|
|
|
|
case GGML_TYPE_Q4_0:
|
2024-06-20 12:39:21 +00:00
|
|
|
mul_mat_q_case<GGML_TYPE_Q4_0>(ctx, args, stream);
|
2024-03-25 12:50:23 +00:00
|
|
|
break;
|
|
|
|
case GGML_TYPE_Q4_1:
|
2024-06-20 12:39:21 +00:00
|
|
|
mul_mat_q_case<GGML_TYPE_Q4_1>(ctx, args, stream);
|
2024-03-25 12:50:23 +00:00
|
|
|
break;
|
|
|
|
case GGML_TYPE_Q5_0:
|
2024-06-20 12:39:21 +00:00
|
|
|
mul_mat_q_case<GGML_TYPE_Q5_0>(ctx, args, stream);
|
2024-03-25 12:50:23 +00:00
|
|
|
break;
|
|
|
|
case GGML_TYPE_Q5_1:
|
2024-06-20 12:39:21 +00:00
|
|
|
mul_mat_q_case<GGML_TYPE_Q5_1>(ctx, args, stream);
|
2024-03-25 12:50:23 +00:00
|
|
|
break;
|
|
|
|
case GGML_TYPE_Q8_0:
|
2024-06-20 12:39:21 +00:00
|
|
|
mul_mat_q_case<GGML_TYPE_Q8_0>(ctx, args, stream);
|
2024-03-25 12:50:23 +00:00
|
|
|
break;
|
|
|
|
case GGML_TYPE_Q2_K:
|
2024-06-20 12:39:21 +00:00
|
|
|
mul_mat_q_case<GGML_TYPE_Q2_K>(ctx, args, stream);
|
2024-03-25 12:50:23 +00:00
|
|
|
break;
|
|
|
|
case GGML_TYPE_Q3_K:
|
2024-06-20 12:39:21 +00:00
|
|
|
mul_mat_q_case<GGML_TYPE_Q3_K>(ctx, args, stream);
|
2024-03-25 12:50:23 +00:00
|
|
|
break;
|
|
|
|
case GGML_TYPE_Q4_K:
|
2024-06-20 12:39:21 +00:00
|
|
|
mul_mat_q_case<GGML_TYPE_Q4_K>(ctx, args, stream);
|
2024-03-25 12:50:23 +00:00
|
|
|
break;
|
|
|
|
case GGML_TYPE_Q5_K:
|
2024-06-20 12:39:21 +00:00
|
|
|
mul_mat_q_case<GGML_TYPE_Q5_K>(ctx, args, stream);
|
2024-03-25 12:50:23 +00:00
|
|
|
break;
|
|
|
|
case GGML_TYPE_Q6_K:
|
2024-06-20 12:39:21 +00:00
|
|
|
mul_mat_q_case<GGML_TYPE_Q6_K>(ctx, args, stream);
|
2024-03-25 12:50:23 +00:00
|
|
|
break;
|
2024-05-21 14:02:12 +00:00
|
|
|
default:
|
|
|
|
GGML_ASSERT(false);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2024-03-25 12:50:23 +00:00
|
|
|
GGML_UNUSED(src1);
|
|
|
|
GGML_UNUSED(dst);
|
|
|
|
GGML_UNUSED(src1_ddf_i);
|
|
|
|
}
|
|
|
|
|
2024-06-24 15:43:42 +00:00
|
|
|
bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) {
|
|
|
|
#ifdef GGML_CUDA_FORCE_CUBLAS
|
|
|
|
return false;
|
|
|
|
#endif // GGML_CUDA_FORCE_CUBLAS
|
|
|
|
|
|
|
|
bool mmq_supported;
|
|
|
|
|
2024-03-25 12:50:23 +00:00
|
|
|
switch (type) {
|
|
|
|
case GGML_TYPE_Q4_0:
|
|
|
|
case GGML_TYPE_Q4_1:
|
|
|
|
case GGML_TYPE_Q5_0:
|
|
|
|
case GGML_TYPE_Q5_1:
|
|
|
|
case GGML_TYPE_Q8_0:
|
|
|
|
case GGML_TYPE_Q2_K:
|
|
|
|
case GGML_TYPE_Q3_K:
|
|
|
|
case GGML_TYPE_Q4_K:
|
|
|
|
case GGML_TYPE_Q5_K:
|
|
|
|
case GGML_TYPE_Q6_K:
|
2024-06-24 15:43:42 +00:00
|
|
|
mmq_supported = true;
|
|
|
|
break;
|
2024-03-25 12:50:23 +00:00
|
|
|
default:
|
2024-06-24 15:43:42 +00:00
|
|
|
mmq_supported = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!mmq_supported) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (int8_mma_available(cc)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cc < MIN_CC_DP4A) {
|
|
|
|
return false;
|
2024-03-25 12:50:23 +00:00
|
|
|
}
|
2024-06-24 15:43:42 +00:00
|
|
|
|
|
|
|
#ifdef GGML_CUDA_FORCE_MMQ
|
|
|
|
return true;
|
|
|
|
#endif //GGML_CUDA_FORCE_MMQ
|
|
|
|
|
|
|
|
if (cc < CC_OFFSET_AMD) {
|
|
|
|
return cc < CC_VOLTA || ne11 < MMQ_DP4A_MAX_BATCH_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return cc < CC_RDNA3 || ne11 < MMQ_DP4A_MAX_BATCH_SIZE;
|
2024-03-25 12:50:23 +00:00
|
|
|
}
|