CLBlast: Add outer loops over src0 for broadcasting in mulmat

Reduce repeated dequantization of the same data.
This commit is contained in:
shibe2 2023-10-12 16:01:23 +04:00
parent d1031cf49c
commit 465219b914

View File

@ -1489,23 +1489,20 @@ static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr
cl_mem d_D = ggml_cl_pool_malloc(sizeof(float) * d_ne, &d_size); cl_mem d_D = ggml_cl_pool_malloc(sizeof(float) * d_ne, &d_size);
size_t x_offset = 0; size_t x_offset = 0;
int64_t pi02 = -1;
int64_t pi03 = -1;
for (int64_t i13 = 0; i13 < ne13; i13++) { for (int64_t i03 = 0; i03 < ne03; i03++) {
int64_t i03 = i13 / r3; // TODO: copy src0 here when r3>1
for (int64_t i13 = i03 * r3, e13 = i13 + r3; i13 < e13; i13++) {
for (int64_t i12 = 0; i12 < ne12; i12++) { for (int64_t i02 = 0; i02 < ne02; i02++) {
int64_t i02 = i12 / r2;
// copy data to device
if (src0->backend == GGML_BACKEND_GPU) { if (src0->backend == GGML_BACKEND_GPU) {
x_offset = (i03 * ne02 + i02) * x_ne; x_offset = (i03 * ne02 + i02) * x_ne;
} else if (i02 != pi02 || i03 != pi03) { } else {
// copy src0 to device
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, NULL)); CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, NULL));
pi02 = i02;
pi03 = i03;
} }
for (int64_t i12 = i02 * r2, e12 = i12 + r2; i12 < e12; i12++) {
// copy src1 to device
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, NULL)); CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, NULL));
CL_CHECK(clFinish(queue)); CL_CHECK(clFinish(queue));
@ -1531,6 +1528,8 @@ static void ggml_cl_mul_mat_f32(const ggml_tensor * src0, const ggml_tensor * sr
CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * d_ne, d, 1, &ev_sgemm, NULL)); CL_CHECK(clEnqueueReadBuffer(queue, d_D, true, 0, sizeof(float) * d_ne, d, 1, &ev_sgemm, NULL));
} }
} }
}
}
if (src0->backend != GGML_BACKEND_GPU) { if (src0->backend != GGML_BACKEND_GPU) {
ggml_cl_pool_free(d_X, x_size); ggml_cl_pool_free(d_X, x_size);
@ -1589,24 +1588,19 @@ static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr
bool src1_cont_cols = (size_t)nb11 == ne11*sizeof(float); bool src1_cont_cols = (size_t)nb11 == ne11*sizeof(float);
size_t x_offset = 0; size_t x_offset = 0;
int64_t pi02 = -1;
int64_t pi03 = -1;
for (int64_t i13 = 0; i13 < ne13; i13++) { for (int64_t i03 = 0; i03 < ne03; i03++) {
int64_t i03 = i13 / r3; // TODO: copy src0 here when r3>1
for (int64_t i13 = i03 * r3, e13 = i13 + r3; i13 < e13; i13++) {
for (int64_t i12 = 0; i12 < ne12; i12++) { for (int64_t i02 = 0; i02 < ne02; i02++) {
int64_t i02 = i12 / r2;
// copy src0 to device
if (src0->backend == GGML_BACKEND_GPU) { if (src0->backend == GGML_BACKEND_GPU) {
x_offset = (i03 * ne02 + i02) * x_ne; x_offset = (i03 * ne02 + i02) * x_ne;
} else if (i02 != pi02 || i03 != pi03) { } else {
// copy src0 to device
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, NULL)); CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_X, 0, src0, i03, i02, NULL));
pi02 = i02;
pi03 = i03;
} }
for (int64_t i12 = i02 * r2, e12 = i12 + r2; i12 < e12; i12++) {
// convert src1 to fp16 // convert src1 to fp16
// TODO: use multiple threads // TODO: use multiple threads
char * src1i = (char *) src1->data + i13*nb13 + i12*nb12; char * src1i = (char *) src1->data + i13*nb13 + i12*nb12;
@ -1658,6 +1652,8 @@ static void ggml_cl_mul_mat_f16(const ggml_tensor * src0, const ggml_tensor * sr
ggml_fp16_to_fp32_row(tmp, d, d_ne); ggml_fp16_to_fp32_row(tmp, d, d_ne);
} }
} }
}
}
if (src0->backend != GGML_BACKEND_GPU) { if (src0->backend != GGML_BACKEND_GPU) {
ggml_cl_pool_free(d_X, x_size); ggml_cl_pool_free(d_X, x_size);
@ -1718,28 +1714,30 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor *
size_t ev_idx = 0; size_t ev_idx = 0;
std::vector<cl_event> events; std::vector<cl_event> events;
int64_t pi02 = -1; for (int64_t i03 = 0; i03 < ne03; i03++) {
int64_t pi03 = -1; // TODO: copy and dequantize src0 here when r3>1
for (int64_t i13 = i03 * r3, e13 = i13 + r3; i13 < e13; i13++) {
for (int64_t i13 = 0; i13 < ne13; i13++) { for (int64_t i02 = 0; i02 < ne02; i02++) {
int64_t i03 = i13 / r3;
for (int64_t i12 = 0; i12 < ne12; i12++) {
int64_t i02 = i12 / r2;
// copy src0 to device if necessary // copy src0 to device if necessary
if (src0->backend == GGML_BACKEND_CPU) { if (src0->backend == GGML_BACKEND_CPU) {
if (i02 != pi02 || i03 != pi03) {
events.emplace_back(); events.emplace_back();
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Q, 0, src0, i03, i02, events.data() + ev_idx++)); CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Q, 0, src0, i03, i02, events.data() + ev_idx++));
pi02 = i02;
pi03 = i03;
}
} else if (src0->backend == GGML_BACKEND_GPU) { } else if (src0->backend == GGML_BACKEND_GPU) {
d_Q = (cl_mem) src0->extra; d_Q = (cl_mem) src0->extra;
} else { } else {
GGML_ASSERT(false); GGML_ASSERT(false);
} }
if (!mul_mat_vec) {
// convert src0 to fp32 on device
const size_t global = x_ne / global_denom;
const size_t offset = src0->backend == GGML_BACKEND_GPU ? (i03 * ne02 + i02) * x_bps : 0;
CL_CHECK(clSetKernelArg(*to_fp32_cl, 0, sizeof(cl_mem), &d_Q));
CL_CHECK(clSetKernelArg(*to_fp32_cl, 1, sizeof(cl_mem), &d_X));
CL_CHECK(clEnqueueNDRangeKernel(queue, *to_fp32_cl, 1, &offset, &global, local > 0 ? &local : NULL, events.size(), !events.empty() ? events.data() : NULL, NULL));
}
for (int64_t i12 = i02 * r2, e12 = i12 + r2; i12 < e12; i12++) {
if (mul_mat_vec) { // specialized dequantize_mul_mat_vec kernel if (mul_mat_vec) { // specialized dequantize_mul_mat_vec kernel
// copy src1 to device // copy src1 to device
events.emplace_back(); events.emplace_back();
@ -1756,23 +1754,15 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor *
CL_CHECK(clSetKernelArg(*dmmv, 3, sizeof(cl_mem), &d_D)); CL_CHECK(clSetKernelArg(*dmmv, 3, sizeof(cl_mem), &d_D));
CL_CHECK(clSetKernelArg(*dmmv, 4, sizeof(cl_int), &ncols)); CL_CHECK(clSetKernelArg(*dmmv, 4, sizeof(cl_int), &ncols));
CL_CHECK(clEnqueueNDRangeKernel(queue, *dmmv, 1, &offset, &global, &local, events.size() - 1, events.data(), events.data() + ev_idx++)); CL_CHECK(clEnqueueNDRangeKernel(queue, *dmmv, 1, &offset, &global, &local, events.size() - 1, events.data(), events.data() + ev_idx++));
} else { // general dequantization kernel + CLBlast matrix matrix multiplication } else { // CLBlast matrix matrix multiplication
// convert src0 to fp32 on device
const size_t global = x_ne / global_denom;
const size_t offset = src0->backend == GGML_BACKEND_GPU ? (i03 * ne02 + i02) * x_bps : 0;
CL_CHECK(clSetKernelArg(*to_fp32_cl, 0, sizeof(cl_mem), &d_Q));
CL_CHECK(clSetKernelArg(*to_fp32_cl, 1, sizeof(cl_mem), &d_X));
CL_CHECK(clEnqueueNDRangeKernel(queue, *to_fp32_cl, 1, offset > 0 ? &offset : NULL, &global, local > 0 ? &local : NULL, events.size(), !events.empty() ? events.data() : NULL, NULL));
// copy src1 to device // copy src1 to device
CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, NULL)); CL_CHECK(ggml_cl_h2d_tensor_2d(queue, d_Y, 0, src1, i13, i12, NULL));
events.emplace_back();
// wait for conversion // wait for conversion
CL_CHECK(clFinish(queue)); CL_CHECK(clFinish(queue));
// compute // compute
events.emplace_back();
clblast::StatusCode status = clblast::Gemm<cl_float>(clblast::Layout::kColMajor, clblast::StatusCode status = clblast::Gemm<cl_float>(clblast::Layout::kColMajor,
clblast::Transpose::kYes, clblast::Transpose::kNo, clblast::Transpose::kYes, clblast::Transpose::kNo,
ne01, ne11, ne10, ne01, ne11, ne10,
@ -1799,6 +1789,8 @@ static void ggml_cl_mul_mat_q_f32(const ggml_tensor * src0, const ggml_tensor *
events.clear(); events.clear();
} }
} }
}
}
if (!mul_mat_vec) { if (!mul_mat_vec) {
ggml_cl_pool_free(d_X, x_size); ggml_cl_pool_free(d_X, x_size);