mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-24 10:24:35 +00:00
ggml-backend : fix async copy from CPU (#8897)
* ggml-backend : fix async copy from CPU * cuda : more reliable async copy, fix stream used when the devices are the same
This commit is contained in:
parent
0478174d59
commit
be55695eff
@ -351,15 +351,10 @@ void ggml_backend_tensor_copy_async(ggml_backend_t backend_src, ggml_backend_t b
|
|||||||
}
|
}
|
||||||
|
|
||||||
// an async copy would normally happen after all the queued operations on both backends are completed
|
// an async copy would normally happen after all the queued operations on both backends are completed
|
||||||
// sync src, set_async dst
|
// to simulate the same behavior, we need to synchronize both backends first, and do a blocking copy
|
||||||
if (ggml_backend_buffer_is_host(src->buffer)) {
|
ggml_backend_synchronize(backend_src);
|
||||||
ggml_backend_synchronize(backend_src);
|
ggml_backend_synchronize(backend_dst);
|
||||||
ggml_backend_tensor_set_async(backend_dst, dst, src->data, 0, ggml_nbytes(src));
|
ggml_backend_tensor_copy(src, dst);
|
||||||
} else {
|
|
||||||
ggml_backend_synchronize(backend_src);
|
|
||||||
ggml_backend_tensor_copy(src, dst);
|
|
||||||
ggml_backend_synchronize(backend_dst);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// events
|
// events
|
||||||
@ -1782,7 +1777,17 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
|
|||||||
} else {
|
} else {
|
||||||
ggml_backend_synchronize(split_backend);
|
ggml_backend_synchronize(split_backend);
|
||||||
}
|
}
|
||||||
ggml_backend_tensor_copy_async(input_backend, split_backend, input, input_cpy);
|
// try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events
|
||||||
|
// TODO: add public function to facilitate this, since applications do not have direct access to the backend interface
|
||||||
|
if (!split_backend->iface.cpy_tensor_async || !split_backend->iface.cpy_tensor_async(input_backend, split_backend, input, input_cpy)) {
|
||||||
|
ggml_backend_synchronize(input_backend);
|
||||||
|
if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
|
||||||
|
ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
|
||||||
|
} else {
|
||||||
|
ggml_backend_synchronize(split_backend);
|
||||||
|
}
|
||||||
|
ggml_backend_tensor_copy(input, input_cpy);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2358,33 +2358,35 @@ GGML_CALL static void ggml_backend_cuda_get_tensor_async(ggml_backend_t backend,
|
|||||||
}
|
}
|
||||||
|
|
||||||
GGML_CALL static bool ggml_backend_cuda_cpy_tensor_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, const ggml_tensor * src, ggml_tensor * dst) {
|
GGML_CALL static bool ggml_backend_cuda_cpy_tensor_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, const ggml_tensor * src, ggml_tensor * dst) {
|
||||||
GGML_ASSERT(ggml_backend_is_cuda(backend_src) || ggml_backend_is_cuda(backend_dst));
|
|
||||||
|
|
||||||
ggml_backend_buffer_t buf_src = src->view_src ? src->view_src->buffer : src->buffer;
|
ggml_backend_buffer_t buf_src = src->view_src ? src->view_src->buffer : src->buffer;
|
||||||
ggml_backend_buffer_t buf_dst = dst->view_src ? dst->view_src->buffer : dst->buffer;
|
ggml_backend_buffer_t buf_dst = dst->view_src ? dst->view_src->buffer : dst->buffer;
|
||||||
|
|
||||||
if (!ggml_backend_buffer_is_cuda(src->buffer)) {
|
if (!ggml_backend_is_cuda(backend_src) || !ggml_backend_is_cuda(backend_dst)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!ggml_backend_buffer_is_cuda(dst->buffer)) {
|
if (!ggml_backend_buffer_is_cuda(src->buffer) || !ggml_backend_buffer_is_cuda(dst->buffer)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// device -> device
|
// device -> device copy
|
||||||
ggml_backend_cuda_context * cuda_ctx_src = (ggml_backend_cuda_context *)backend_src->context;
|
ggml_backend_cuda_context * cuda_ctx_src = (ggml_backend_cuda_context *)backend_src->context;
|
||||||
ggml_backend_cuda_context * cuda_ctx_dst = (ggml_backend_cuda_context *)backend_dst->context;
|
ggml_backend_cuda_context * cuda_ctx_dst = (ggml_backend_cuda_context *)backend_dst->context;
|
||||||
|
|
||||||
|
ggml_backend_cuda_buffer_context * buf_ctx_src = (ggml_backend_cuda_buffer_context *)buf_src->context;
|
||||||
|
ggml_backend_cuda_buffer_context * buf_ctx_dst = (ggml_backend_cuda_buffer_context *)buf_dst->context;
|
||||||
|
|
||||||
|
if (cuda_ctx_src->device != buf_ctx_src->device || cuda_ctx_dst->device != buf_ctx_dst->device) {
|
||||||
|
#ifndef NDEBUG
|
||||||
|
GGML_CUDA_LOG_WARN("%s: backend and buffer devices do not match\n", __func__);
|
||||||
|
#endif
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
if (backend_src != backend_dst) {
|
if (backend_src != backend_dst) {
|
||||||
ggml_backend_cuda_buffer_context * buf_ctx_src = (ggml_backend_cuda_buffer_context *)buf_src->context;
|
|
||||||
ggml_backend_cuda_buffer_context * buf_ctx_dst = (ggml_backend_cuda_buffer_context *)buf_dst->context;
|
|
||||||
|
|
||||||
GGML_ASSERT(cuda_ctx_src->device == buf_ctx_src->device);
|
|
||||||
GGML_ASSERT(cuda_ctx_dst->device == buf_ctx_dst->device);
|
|
||||||
|
|
||||||
// copy on src stream
|
// copy on src stream
|
||||||
if (cuda_ctx_src->device == cuda_ctx_dst->device) {
|
if (cuda_ctx_src->device == cuda_ctx_dst->device) {
|
||||||
CUDA_CHECK(cudaMemcpyAsync(dst->data, src->data, ggml_nbytes(dst), cudaMemcpyDeviceToDevice, cuda_ctx_dst->stream()));
|
CUDA_CHECK(cudaMemcpyAsync(dst->data, src->data, ggml_nbytes(dst), cudaMemcpyDeviceToDevice, cuda_ctx_src->stream()));
|
||||||
} else {
|
} else {
|
||||||
#ifdef GGML_CUDA_NO_PEER_COPY
|
#ifdef GGML_CUDA_NO_PEER_COPY
|
||||||
return false;
|
return false;
|
||||||
@ -2393,7 +2395,7 @@ GGML_CALL static bool ggml_backend_cuda_cpy_tensor_async(ggml_backend_t backend_
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
// record event on src stream
|
// record event on src stream after the copy
|
||||||
if (!cuda_ctx_src->copy_event) {
|
if (!cuda_ctx_src->copy_event) {
|
||||||
ggml_cuda_set_device(cuda_ctx_src->device);
|
ggml_cuda_set_device(cuda_ctx_src->device);
|
||||||
CUDA_CHECK(cudaEventCreateWithFlags(&cuda_ctx_src->copy_event, cudaEventDisableTiming));
|
CUDA_CHECK(cudaEventCreateWithFlags(&cuda_ctx_src->copy_event, cudaEventDisableTiming));
|
||||||
@ -2405,7 +2407,7 @@ GGML_CALL static bool ggml_backend_cuda_cpy_tensor_async(ggml_backend_t backend_
|
|||||||
CUDA_CHECK(cudaStreamWaitEvent(cuda_ctx_dst->stream(), cuda_ctx_src->copy_event, 0));
|
CUDA_CHECK(cudaStreamWaitEvent(cuda_ctx_dst->stream(), cuda_ctx_src->copy_event, 0));
|
||||||
} else {
|
} else {
|
||||||
// src and dst are on the same backend
|
// src and dst are on the same backend
|
||||||
CUDA_CHECK(cudaMemcpyAsync(dst->data, src->data, ggml_nbytes(dst), cudaMemcpyDeviceToDevice, cuda_ctx_dst->stream()));
|
CUDA_CHECK(cudaMemcpyAsync(dst->data, src->data, ggml_nbytes(dst), cudaMemcpyDeviceToDevice, cuda_ctx_src->stream()));
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user