cuda : utilize tensor cores with multiple GPU devices

This commit is contained in:
Georgi Gerganov 2023-10-27 13:05:33 +03:00
parent 2f9ec7e271
commit 1a0843c493
No known key found for this signature in database
GPG Key ID: 449E073F9DC10735

View File

@ -6361,7 +6361,7 @@ inline void ggml_cuda_op_mul_mat_cublas(
const int compute_capability = g_compute_capabilities[id]; const int compute_capability = g_compute_capabilities[id];
if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0) && row_diff == src0->ne[1]) { if (compute_capability >= CC_VOLTA && (src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && ggml_is_contiguous(src0)) {
// convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32 // convert src0 and src1 to fp16, multiply as fp16, convert dst to fp32
half * src0_as_f16 = nullptr; half * src0_as_f16 = nullptr;
size_t src0_as = 0; size_t src0_as = 0;
@ -6386,7 +6386,7 @@ inline void ggml_cuda_op_mul_mat_cublas(
const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddq_i : src1_as_f16; const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddq_i : src1_as_f16;
size_t dst_as = 0; size_t dst_as = 0;
half * dst_f16 = (half *) ggml_cuda_pool_malloc(row_diff*src1_ncols * sizeof(half), &dst_as); half * dst_f16 = (half *) ggml_cuda_pool_malloc(ldc*src1_ncols * sizeof(half), &dst_as);
const half alpha_f16 = 1.0f; const half alpha_f16 = 1.0f;
const half beta_f16 = 0.0f; const half beta_f16 = 0.0f;
@ -6402,7 +6402,7 @@ inline void ggml_cuda_op_mul_mat_cublas(
CUBLAS_GEMM_DEFAULT_TENSOR_OP)); CUBLAS_GEMM_DEFAULT_TENSOR_OP));
const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16); const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(GGML_TYPE_F16);
to_fp32_cuda(dst_f16, dst_dd_i, row_diff*src1_ncols, stream); to_fp32_cuda(dst_f16, dst_dd_i, ldc*src1_ncols, stream);
ggml_cuda_pool_free(dst_f16, dst_as); ggml_cuda_pool_free(dst_f16, dst_as);