mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 11:24:35 +00:00
Addressed comments
This commit is contained in:
parent
c3d4ead136
commit
d403b180a6
127
ggml-cuda.cu
127
ggml-cuda.cu
@ -2411,19 +2411,19 @@ GGML_CALL static void ggml_backend_cuda_synchronize(ggml_backend_t backend) {
|
|||||||
|
|
||||||
#ifdef USE_CUDA_GRAPH
|
#ifdef USE_CUDA_GRAPH
|
||||||
#define MAX_NODES_IN_CUDA_GRAPH 10000
|
#define MAX_NODES_IN_CUDA_GRAPH 10000
|
||||||
struct ggml_cudaGraph {
|
struct ggml_cuda_graph {
|
||||||
int count = 0;
|
int count = 0;
|
||||||
cudaGraph_t graph = nullptr;
|
cudaGraph_t graph = nullptr;
|
||||||
cudaGraphExec_t instance = nullptr;
|
cudaGraphExec_t instance = nullptr;
|
||||||
size_t numNodes = 0;
|
size_t num_nodes = 0;
|
||||||
int softmax_ne0 = 0;
|
int softmax_ne0 = 0;
|
||||||
cudaGraphNode_t nodes[MAX_NODES_IN_CUDA_GRAPH];
|
cudaGraphNode_t nodes[MAX_NODES_IN_CUDA_GRAPH];
|
||||||
cudaKernelNodeParams params[MAX_NODES_IN_CUDA_GRAPH];
|
cudaKernelNodeParams params[MAX_NODES_IN_CUDA_GRAPH];
|
||||||
bool disableDueToGpuArch=false;
|
bool disable_due_to_gpu_arch = false;
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
const bool disableCudaGraphs = (getenv("LLAMACPP_DISABLE_CUDA_GRAPHS") != nullptr);
|
const bool disable_cuda_graphs = (getenv("LLAMACPP_DISABLE_CUDA_GRAPHS") != nullptr);
|
||||||
|
|
||||||
GGML_CALL static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
|
GGML_CALL static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
|
||||||
ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context;
|
ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context;
|
||||||
@ -2432,33 +2432,29 @@ GGML_CALL static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t
|
|||||||
|
|
||||||
#ifdef USE_CUDA_GRAPH
|
#ifdef USE_CUDA_GRAPH
|
||||||
// Objects required for CUDA Graph
|
// Objects required for CUDA Graph
|
||||||
static ggml_cudaGraph cudaGraph;
|
static ggml_cuda_graph cuda_graph;
|
||||||
bool useCudaGraph = (cudaGraph.count>=7); //avoid CUDA graphs on first few steps due to incompatible initialisations.
|
bool use_cuda_graph = (cuda_graph.count >= 7); //avoid CUDA graphs on first few steps due to incompatible initialisations.
|
||||||
char** updatedKernelArg[MAX_NODES_IN_CUDA_GRAPH];
|
char ** updated_kernel_arg[MAX_NODES_IN_CUDA_GRAPH];
|
||||||
bool cudaGraphUpdateRequired = false;
|
bool cuda_graph_update_required = false;
|
||||||
// pointer to CUDA cpy kernel, which is required to identify
|
// pointer to CUDA cpy kernel, which is required to identify
|
||||||
// kernel parameters which need updated in the graph for each token
|
// kernel parameters which need updated in the graph for each token
|
||||||
void* ggmlCudaCpyFn = nullptr;
|
void * ggml_cuda_cpy_fn_ptr = nullptr;
|
||||||
|
|
||||||
if(cudaGraph.count==0){
|
if(cuda_graph.count == 0){
|
||||||
cudaDeviceProp prop;
|
if (ggml_cuda_info().devices[cuda_ctx->device].cc < 800){
|
||||||
int device;
|
cuda_graph.disable_due_to_gpu_arch=true;
|
||||||
CUDA_CHECK(cudaGetDevice(&device));
|
|
||||||
CUDA_CHECK(cudaGetDeviceProperties(&prop, device));
|
|
||||||
if (prop.major < 8){
|
|
||||||
cudaGraph.disableDueToGpuArch=true;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Disable CUDA graphs in presence of env var or old GPU.
|
// Disable CUDA graphs in presence of env var or old GPU.
|
||||||
// Also disable for multi-gpu for now. TO DO investigate
|
// Also disable for multi-gpu for now. TO DO investigate
|
||||||
if(disableCudaGraphs || cudaGraph.disableDueToGpuArch || ggml_backend_cuda_get_device_count() > 1){
|
if(disable_cuda_graphs || cuda_graph.disable_due_to_gpu_arch || ggml_backend_cuda_get_device_count() > 1){
|
||||||
useCudaGraph = false;
|
use_cuda_graph = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(useCudaGraph) {
|
if(use_cuda_graph) {
|
||||||
|
|
||||||
if(cudaGraph.instance == nullptr) cudaGraphUpdateRequired=true;
|
if(cuda_graph.instance == nullptr) cuda_graph_update_required=true;
|
||||||
|
|
||||||
// Loop over nodes in GGML graph to obtain info needed for CUDA graph
|
// Loop over nodes in GGML graph to obtain info needed for CUDA graph
|
||||||
int k=0;
|
int k=0;
|
||||||
@ -2468,36 +2464,36 @@ GGML_CALL static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t
|
|||||||
// (identified by inspecting soft max op parameters)
|
// (identified by inspecting soft max op parameters)
|
||||||
if(node->op == GGML_OP_SOFT_MAX) {
|
if(node->op == GGML_OP_SOFT_MAX) {
|
||||||
if(node->src[1]->ne[1] > 1){
|
if(node->src[1]->ne[1] > 1){
|
||||||
useCudaGraph = false; // disable CUDA graphs for batch size > 1 for now. TO DO investigate
|
use_cuda_graph = false; // disable CUDA graphs for batch size > 1 for now. TO DO investigate
|
||||||
}
|
}
|
||||||
if(node->src[0]->ne[0] != cudaGraph.softmax_ne0) {
|
if(node->src[0]->ne[0] != cuda_graph.softmax_ne0) {
|
||||||
cudaGraphUpdateRequired = true;
|
cuda_graph_update_required = true;
|
||||||
cudaGraph.softmax_ne0 = node->src[0]->ne[0];
|
cuda_graph.softmax_ne0 = node->src[0]->ne[0];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if(node->op == GGML_OP_CPY) {
|
if(node->op == GGML_OP_CPY) {
|
||||||
// store the copy op parameter which changes with each token.
|
// store the copy op parameter which changes with each token.
|
||||||
updatedKernelArg[k++]=(char**) &(node->src[1]->data);
|
updated_kernel_arg[k++]=(char **) &(node->src[1]->data);
|
||||||
if(ggmlCudaCpyFn == nullptr){
|
if(ggml_cuda_cpy_fn_ptr == nullptr){
|
||||||
// store a pointer to the copy op CUDA kernel to identify it later
|
// store a pointer to the copy op CUDA kernel to identify it later
|
||||||
ggmlCudaCpyFn = ggml_cuda_cpy_fn(node->src[0], node->src[1]);
|
ggml_cuda_cpy_fn_ptr = ggml_cuda_cpy_fn(node->src[0], node->src[1]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if(useCudaGraph && cudaGraphUpdateRequired) { // Start CUDA graph capture
|
if(use_cuda_graph && cuda_graph_update_required) { // Start CUDA graph capture
|
||||||
CUDA_CHECK(cudaStreamBeginCapture(cuda_ctx->stream(), cudaStreamCaptureModeGlobal));
|
CUDA_CHECK(cudaStreamBeginCapture(cuda_ctx->stream(), cudaStreamCaptureModeGlobal));
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
bool useCudaGraph = false;
|
bool use_cuda_graph = false;
|
||||||
bool cudaGraphUpdateRequired = false;
|
bool cuda_graph_update_required = false;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Only perfom the graph exection if CUDA graphs are not enebled, or we are capturing the graph.
|
// Only perfom the graph exection if CUDA graphs are not enebled, or we are capturing the graph.
|
||||||
// With use of CUDA graphs, the execution will be performed by the graph launch.
|
// With use of CUDA graphs, the execution will be performed by the graph launch.
|
||||||
if(!useCudaGraph || cudaGraphUpdateRequired) {
|
if(!use_cuda_graph || cuda_graph_update_required) {
|
||||||
//temporarily avoid indenting here to make code review easier
|
//temporarily avoid indenting here to make code review easier
|
||||||
for (int i = 0; i < cgraph->n_nodes; i++) {
|
for (int i = 0; i < cgraph->n_nodes; i++) {
|
||||||
ggml_tensor * node = cgraph->nodes[i];
|
ggml_tensor * node = cgraph->nodes[i];
|
||||||
@ -2524,67 +2520,74 @@ GGML_CALL static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef USE_CUDA_GRAPH
|
#ifdef USE_CUDA_GRAPH
|
||||||
if(useCudaGraph && (cudaGraphUpdateRequired)) { // End CUDA graph capture
|
if(use_cuda_graph && (cuda_graph_update_required)) { // End CUDA graph capture
|
||||||
CUDA_CHECK(cudaStreamEndCapture(cuda_ctx->stream(), &cudaGraph.graph));
|
CUDA_CHECK(cudaStreamEndCapture(cuda_ctx->stream(), &cuda_graph.graph));
|
||||||
}
|
}
|
||||||
if(useCudaGraph){
|
if(use_cuda_graph){
|
||||||
|
|
||||||
if(cudaGraph.instance == nullptr) { // Create executable graph from captured graph.
|
if(cuda_graph.instance == nullptr) { // Create executable graph from captured graph.
|
||||||
CUDA_CHECK(cudaGraphInstantiate(&cudaGraph.instance, cudaGraph.graph, NULL, NULL, 0));
|
CUDA_CHECK(cudaGraphInstantiate(&cuda_graph.instance, cuda_graph.graph, NULL, NULL, 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Perform update to graph (if required for this token), and change copy parameter (required for every token)
|
// Perform update to graph (if required for this token), and change copy parameter (required for every token)
|
||||||
|
|
||||||
if(cudaGraphUpdateRequired) {
|
if(cuda_graph_update_required) {
|
||||||
// Extract nodes from graph
|
// Extract nodes from graph
|
||||||
if(cudaGraph.numNodes == 0) {
|
if(cuda_graph.num_nodes == 0) {
|
||||||
CUDA_CHECK(cudaGraphGetNodes(cudaGraph.graph, nullptr, &cudaGraph.numNodes));
|
// First call with null argument gets number of nodes in graph
|
||||||
|
CUDA_CHECK(cudaGraphGetNodes(cuda_graph.graph, nullptr, &cuda_graph.num_nodes));
|
||||||
}
|
}
|
||||||
CUDA_CHECK(cudaGraphGetNodes(cudaGraph.graph, cudaGraph.nodes, &cudaGraph.numNodes));
|
// Subsequent call with non-null argument gets nodes
|
||||||
|
CUDA_CHECK(cudaGraphGetNodes(cuda_graph.graph, cuda_graph.nodes, &cuda_graph.num_nodes));
|
||||||
|
|
||||||
// Loop over nodes, and extract kernel parameters fro each node
|
// Loop over nodes, and extract kernel parameters fro each node
|
||||||
for(size_t i=0; i<cudaGraph.numNodes; i++) {
|
for(size_t i=0; i<cuda_graph.num_nodes; i++) {
|
||||||
cudaGraphNodeType nodeType;
|
cudaGraphNodeType node_type;
|
||||||
CUDA_CHECK(cudaGraphNodeGetType(cudaGraph.nodes[i], &nodeType));
|
CUDA_CHECK(cudaGraphNodeGetType(cuda_graph.nodes[i], &node_type));
|
||||||
if (nodeType == cudaGraphNodeTypeKernel) {
|
if (node_type == cudaGraphNodeTypeKernel) {
|
||||||
auto statRT = cudaGraphKernelNodeGetParams(cudaGraph.nodes[i], &cudaGraph.params[i]); // Get params using runtime
|
auto stat = cudaGraphKernelNodeGetParams(cuda_graph.nodes[i], &cuda_graph.params[i]); // Get params using runtime
|
||||||
if(statRT == cudaErrorInvalidDeviceFunction) {
|
if(stat == cudaErrorInvalidDeviceFunction) {
|
||||||
// Fails due to incorrect handling by CUDA runtime of CUDA BLAS node.
|
// Fails due to incorrect handling by CUDA runtime of CUDA BLAS node.
|
||||||
// We don't need to update blas nodes, so clear error and move on.
|
// We don't need to update blas nodes, so clear error and move on.
|
||||||
cudaGetLastError();
|
cudaGetLastError();
|
||||||
}
|
}
|
||||||
|
else {
|
||||||
|
GGML_ASSERT(stat == cudaSuccess);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update copy kernel param (required every token)
|
// One of the arguments to the copy kernel is updated for each token, hence we need to
|
||||||
if(!cudaGraphUpdateRequired) { // on update steps, the live parameters will already be captured
|
// replace that argument with the updated value in the CUDA graph
|
||||||
|
if(!cuda_graph_update_required) { // on update steps, the live parameters will already be captured
|
||||||
int k=0;
|
int k=0;
|
||||||
for(size_t i=0; i<cudaGraph.numNodes; i++) {
|
for(size_t i=0; i<cuda_graph.num_nodes; i++) {
|
||||||
if(cudaGraph.params[i].func == ggmlCudaCpyFn) {
|
if(cuda_graph.params[i].func == ggml_cuda_cpy_fn_ptr) {
|
||||||
char** updatedKernelArgPointer = updatedKernelArg[k++];
|
char ** updated_kernel_arg_ptr = updated_kernel_arg[k++];
|
||||||
cudaGraph.params[i].kernelParams[1] = updatedKernelArgPointer;
|
cuda_graph.params[i].kernelParams[1] = updated_kernel_arg_ptr;
|
||||||
CUDA_CHECK(cudaGraphKernelNodeSetParams(cudaGraph.nodes[i], &cudaGraph.params[i]));
|
CUDA_CHECK(cudaGraphKernelNodeSetParams(cuda_graph.nodes[i], &cuda_graph.params[i]));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update graph executable
|
// Update graph executable
|
||||||
cudaGraphExecUpdateResultInfo resultInfo;
|
cudaGraphExecUpdateResultInfo result_info;
|
||||||
auto stat = cudaGraphExecUpdate(cudaGraph.instance, cudaGraph.graph, &resultInfo);
|
auto stat = cudaGraphExecUpdate(cuda_graph.instance, cuda_graph.graph, &result_info);
|
||||||
if(stat == cudaErrorGraphExecUpdateFailure)
|
if(stat == cudaErrorGraphExecUpdateFailure) {
|
||||||
{
|
|
||||||
// The pre-existing graph exec cannot be updated due to violated constraints
|
// The pre-existing graph exec cannot be updated due to violated constraints
|
||||||
// so instead clar error and re-instantiate
|
// so instead clear error and re-instantiate
|
||||||
cudaGetLastError();
|
cudaGetLastError();
|
||||||
CUDA_CHECK(cudaGraphInstantiate(&cudaGraph.instance, cudaGraph.graph, NULL, NULL, 0));
|
CUDA_CHECK(cudaGraphInstantiate(&cuda_graph.instance, cuda_graph.graph, NULL, NULL, 0));
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
GGML_ASSERT(stat == cudaSuccess);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Launch graph
|
// Launch graph
|
||||||
CUDA_CHECK(cudaGraphLaunch(cudaGraph.instance, cuda_ctx->stream()));
|
CUDA_CHECK(cudaGraphLaunch(cuda_graph.instance, cuda_ctx->stream()));
|
||||||
}
|
}
|
||||||
cudaGraph.count++;
|
cuda_graph.count++;
|
||||||
#endif
|
#endif
|
||||||
return GGML_STATUS_SUCCESS;
|
return GGML_STATUS_SUCCESS;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user