2024-10-02 23:49:47 +00:00
// Note: porting this file to C++ is a work in progress
2024-10-03 15:39:18 +00:00
# ifdef _WIN32
# define WIN32_LEAN_AND_MEAN
# ifndef NOMINMAX
# define NOMINMAX
# endif
# include <windows.h>
# endif
2023-11-13 12:16:23 +00:00
# include "ggml-backend-impl.h"
2023-10-08 17:19:14 +00:00
# include "ggml-alloc.h"
2023-11-13 12:16:23 +00:00
# include "ggml-impl.h"
2023-10-08 17:19:14 +00:00
# include <assert.h>
2023-11-13 12:16:23 +00:00
# include <limits.h>
2023-10-08 17:19:14 +00:00
# include <stdarg.h>
# include <stdio.h>
# include <stdlib.h>
# include <string.h>
2024-10-03 15:39:18 +00:00
# include <string>
2024-10-02 23:49:47 +00:00
# include <vector>
2023-10-08 17:19:14 +00:00
2024-10-03 15:39:18 +00:00
# ifdef __APPLE__
# include <sys/types.h>
# include <sys/sysctl.h>
# endif
2023-12-07 20:26:54 +00:00
// backend buffer type
2024-01-12 19:07:38 +00:00
const char * ggml_backend_buft_name ( ggml_backend_buffer_type_t buft ) {
return buft - > iface . get_name ( buft ) ;
}
2024-10-02 23:49:47 +00:00
ggml_backend_buffer_t ggml_backend_buft_alloc_buffer ( ggml_backend_buffer_type_t buft , size_t size ) {
2023-12-07 20:26:54 +00:00
return buft - > iface . alloc_buffer ( buft , size ) ;
}
size_t ggml_backend_buft_get_alignment ( ggml_backend_buffer_type_t buft ) {
return buft - > iface . get_alignment ( buft ) ;
}
ggml : add Vulkan backend (#2059)
* Vulkan loader code
* Fix matmul kernel, continue implementation
* Continue implementation
* Vulkan memory management
* Vulkan development
* Matmul call
* Add aligned malloc and free for VMA
* Continue implementation
* First matmul success
* GEMM Kernel optimization
* 1D Blocktiling
* 2D Blocktiling
* Write coalescing
* Continue vulkan implementation and optimization
* First FP16 attempt, disabled for now
* Code abstraction, FP16 implementation, fix kernel, add FP16 to FP32 kernel
* Enable device extensions properly, restore fp16 matmul op
* Fix mulmat_f16
* Output FP32 in fp16 matmul shader
* Fix f16_to_f32 kernel
* dequant_q4_0 kernel
* Add VMA library
* Avoid requesting dedicated memory, VMA can decide that by itself
* Add bounds checking to matmul kernels, improve implementation, fix command buffers not freed properly
* add cmake commands
* Add 2d write operation, profiling code
* Fix 2d write
* Fix queue selection for AMD RADV
* Fix trailing whitespace in vk_mem_alloc.h
* Add WIP warp tile mat mul shaders
* Disable glslc optimization
* Disable glslc optimization for CMake
* Optimize warptile matmul shader, replace blocktile with it
* Add split-k optimization for small matrix multiplication
Use semaphores for synchronization instead of fences or waitidle
Rework async write/read for synchronization
* Fix validation errors, improve compatibility with AMD GPUs
* Rework command buffer handling
* Variable matmul kernel using specialization constants
* Fix synchronization on AMD, add barriers for buffer ownership transfer, add debug flag and prints
* Reuse semaphores
* Handle stage flags during command buffer submission properly
* Increase matmul test runs for consistent results
* Fix F32 matmul
* Add vectorized loading and zeropadding for matrix multiplication
* Use pinned memory for f16 preprocessing
* Don't force aligned matmul
* Don't free before queue done
* Replace VMA library with native Vulkan buffer management
* Basic offloading support with mul_f32 and dmmv for q4_0
* Run glslc commands in parallel
* Unroll loops in dmmv shader
* Reduce usage of waitIdle
* Reuse pinned allocation for f16 conversion
* Handle devices with only a single queue
* Fix trailing whitespace in CMakeLists.txt
* Allow parallel execution of kernels, parallelize third and fourth dimension calls
* Add fallback for devices only supporting one DescriptorSet per DescriptorPool
* Move to graph function similar to CUDA implementation
* Use F16 kernel for most things, replace q_f32 with mul_mat_q_f16 function
* Add F32 dmmv shaders
* Batch submissions
* Add .spv to gitignore
* Split off matrix vector multiplication for separate optimization
* Use single command buffer for matrix vector multiplication ops
* Reduce overhead of mul_f32 calls by using a single command buffer
* Add submission batching to mul_f32
* Fix tests
* Add missing barrier
* Add further missing barrier
* Add further ops
* Replace vk::QueueFamilyIgnored with VK_QUEUE_FAMILY_IGNORED to support more Vulkan header versions
* Remove unnecessary cblas link
* Fix descriptor set pre-allocation assert
* Add runtime shader compilation, start transferring shaders to this approach
* Transfer remaining shaders to header and compile on runtime
* Fix fp32 fallback if device doesn't support fp16, add force disable env var GGML_VULKAN_DISABLE_F16
* Add support for q4_1, q5_0, q5_1 and q8_0
* Remove unnecessary scalar layout extension
* Parse graph early to pre-record command buffers
* Add q6_k support
* Add multi-submit for command buffers
* Fix q6_k dequant shader for AMD
* Fix q6_k for GPUs without fp16 support
* Simplify q6_k fp16 fix
* Minor fixes
* Fix wg_denom of m-mulmat shaders
* Add Python-based Vulkan shader generator
* Replace shaderc dependency with precompiled shaders
Fix python script to generate shaders
* Clean up code
* Fix shader generator script Windows compatibility
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
* Close file before deletion
* Fix vulkan shader fp32 name
* Add q2_k and q3_k support
Add validation check to compare shader results to cpu results
* Add q4_k support
* Add q5_k support
* Bake SPIR-V bytecode into the library instead of loading shaders from file
* Switch to signal semaphores for flexibility
Prepare broadcasting support for mul mat
* Finish broadcasting mul mat support for GQA
* Clean up unused functions
Add repeat op
* Add further ops, not yet enabled. Improve semaphore code
* Reduce number of used semaphores by utilizing timelines more properly
* Remove queue information
* Reuse timeline semaphores, allow parallel operation with binary semaphores to work around nvidia driver limitations
* Add Vulkan to llama-bench
* Remove cblas dependency
* Fix matmul k-split bug
* Fix q4_k dmmv K_QUANTS_PER_ITERATION 1 shader
* Add RMS Norm shader, rework op_f32 shader setup, fix matmul bug
* Fix issues with float16 overflows in shaders
* Fix issues with older Vulkan headers on Ubuntu 22.04
* Allow multi-op partial offloading by parsing the graph to preallocate enough between-op buffers
* Implement further ops, rework op_f32 calls, fix bugs
* Finish full offloading support, add last remaining ops, fix bugs, remove redundant code
* Upload generated file ggml-vulkan-shaders.hpp, remove redundant shaders
* Merge upstream changes, fix conflicts, adapt soft_max op
* Fix Python and shader header format
* Free model gpu buffers on exit
* Use single queue per device to simplify code
* Add matmul shader support for running multiple calculations in parallel
* Switch from semaphore-synchronized multiple command buffers per op to single command buffer for multiple ops, whole graph if possible
* Fix missing event cast
* Replace uint64_t(-1) with UINT64_MAX, rename function for clarity
* Fix warning about empty C function parameters
* Fix compiler warnings
* Properly implement Vulkan backend buffer handling
* Fix oversized host staging buffers
* Simplify barrier synchronization calls
* Fix gcc warnings
* Implement max_size for backend buffer types to limit the size of a single allocation
* Use min of maxMemoryAllocationSize and maxBufferSize for device max allocation size
* refactor multi buf
* Disable unsupported ops to fix tests
* Check for maintenance4 support before using it
* Handle devices with only a single queue
* Fix single queue logic
* propagate buffer usage in multi buffers
* Implement rope_neox op
* Cleanup header and other files
* Simplify gpu_extras by removing events and putting staging memcpys into contexts
* Move queue into context
Add not-yet-enabled async backend ops
* Simplify context use, optimize matmul shader for warp size 64 (AMD GCN), fix split_k matmul shader optimization
* Add get_max_size to SYCL backend.
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* llama : fix trailing whitespace
---------
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-01-28 17:03:59 +00:00
size_t ggml_backend_buft_get_max_size ( ggml_backend_buffer_type_t buft ) {
// get_max_size is optional, defaults to SIZE_MAX
if ( buft - > iface . get_max_size ) {
return buft - > iface . get_max_size ( buft ) ;
}
return SIZE_MAX ;
}
2024-10-02 23:49:47 +00:00
size_t ggml_backend_buft_get_alloc_size ( ggml_backend_buffer_type_t buft , struct ggml_tensor * tensor ) {
2023-12-07 20:26:54 +00:00
// get_alloc_size is optional, defaults to ggml_nbytes
if ( buft - > iface . get_alloc_size ) {
2024-01-26 17:59:43 +00:00
size_t size = buft - > iface . get_alloc_size ( buft , tensor ) ;
assert ( size > = ggml_nbytes ( tensor ) ) ;
return size ;
2023-12-07 20:26:54 +00:00
}
return ggml_nbytes ( tensor ) ;
}
2023-12-21 20:07:46 +00:00
bool ggml_backend_buft_is_host ( ggml_backend_buffer_type_t buft ) {
if ( buft - > iface . is_host ) {
return buft - > iface . is_host ( buft ) ;
}
return false ;
}
2024-10-02 23:49:47 +00:00
ggml_backend_dev_t ggml_backend_buft_get_device ( ggml_backend_buffer_type_t buft ) {
return buft - > device ;
}
2023-10-08 17:19:14 +00:00
2024-10-02 23:49:47 +00:00
// backend buffer
2023-10-08 17:19:14 +00:00
2024-10-02 23:49:47 +00:00
ggml_backend_buffer_t ggml_backend_buffer_init (
ggml_backend_buffer_type_t buft ,
struct ggml_backend_buffer_i iface ,
void * context ,
size_t size ) {
ggml_backend_buffer_t buffer = new ggml_backend_buffer {
2023-10-08 17:19:14 +00:00
/* .interface = */ iface ,
2023-12-07 20:26:54 +00:00
/* .buft = */ buft ,
2023-10-08 17:19:14 +00:00
/* .context = */ context ,
/* .size = */ size ,
2024-01-12 19:07:38 +00:00
/* .usage = */ GGML_BACKEND_BUFFER_USAGE_ANY
2023-10-08 17:19:14 +00:00
} ;
return buffer ;
}
2024-01-12 19:07:38 +00:00
const char * ggml_backend_buffer_name ( ggml_backend_buffer_t buffer ) {
return buffer - > iface . get_name ( buffer ) ;
}
2023-10-08 17:19:14 +00:00
void ggml_backend_buffer_free ( ggml_backend_buffer_t buffer ) {
2023-11-13 12:16:23 +00:00
if ( buffer = = NULL ) {
return ;
}
2023-10-08 17:19:14 +00:00
if ( buffer - > iface . free_buffer ! = NULL ) {
buffer - > iface . free_buffer ( buffer ) ;
}
2024-10-02 23:49:47 +00:00
delete buffer ;
2023-10-08 17:19:14 +00:00
}
size_t ggml_backend_buffer_get_size ( ggml_backend_buffer_t buffer ) {
return buffer - > size ;
}
2023-11-13 12:16:23 +00:00
void * ggml_backend_buffer_get_base ( ggml_backend_buffer_t buffer ) {
void * base = buffer - > iface . get_base ( buffer ) ;
GGML_ASSERT ( base ! = NULL & & " backend buffer base cannot be NULL " ) ;
return base ;
}
2024-10-02 23:49:47 +00:00
void ggml_backend_buffer_init_tensor ( ggml_backend_buffer_t buffer , struct ggml_tensor * tensor ) {
2023-11-13 12:16:23 +00:00
// init_tensor is optional
2023-10-08 17:19:14 +00:00
if ( buffer - > iface . init_tensor ) {
buffer - > iface . init_tensor ( buffer , tensor ) ;
}
}
2024-10-02 23:49:47 +00:00
size_t ggml_backend_buffer_get_alignment ( ggml_backend_buffer_t buffer ) {
2024-01-12 19:07:38 +00:00
return ggml_backend_buft_get_alignment ( ggml_backend_buffer_get_type ( buffer ) ) ;
2023-10-08 17:19:14 +00:00
}
ggml : add Vulkan backend (#2059)
* Vulkan loader code
* Fix matmul kernel, continue implementation
* Continue implementation
* Vulkan memory management
* Vulkan development
* Matmul call
* Add aligned malloc and free for VMA
* Continue implementation
* First matmul success
* GEMM Kernel optimization
* 1D Blocktiling
* 2D Blocktiling
* Write coalescing
* Continue vulkan implementation and optimization
* First FP16 attempt, disabled for now
* Code abstraction, FP16 implementation, fix kernel, add FP16 to FP32 kernel
* Enable device extensions properly, restore fp16 matmul op
* Fix mulmat_f16
* Output FP32 in fp16 matmul shader
* Fix f16_to_f32 kernel
* dequant_q4_0 kernel
* Add VMA library
* Avoid requesting dedicated memory, VMA can decide that by itself
* Add bounds checking to matmul kernels, improve implementation, fix command buffers not freed properly
* add cmake commands
* Add 2d write operation, profiling code
* Fix 2d write
* Fix queue selection for AMD RADV
* Fix trailing whitespace in vk_mem_alloc.h
* Add WIP warp tile mat mul shaders
* Disable glslc optimization
* Disable glslc optimization for CMake
* Optimize warptile matmul shader, replace blocktile with it
* Add split-k optimization for small matrix multiplication
Use semaphores for synchronization instead of fences or waitidle
Rework async write/read for synchronization
* Fix validation errors, improve compatibility with AMD GPUs
* Rework command buffer handling
* Variable matmul kernel using specialization constants
* Fix synchronization on AMD, add barriers for buffer ownership transfer, add debug flag and prints
* Reuse semaphores
* Handle stage flags during command buffer submission properly
* Increase matmul test runs for consistent results
* Fix F32 matmul
* Add vectorized loading and zeropadding for matrix multiplication
* Use pinned memory for f16 preprocessing
* Don't force aligned matmul
* Don't free before queue done
* Replace VMA library with native Vulkan buffer management
* Basic offloading support with mul_f32 and dmmv for q4_0
* Run glslc commands in parallel
* Unroll loops in dmmv shader
* Reduce usage of waitIdle
* Reuse pinned allocation for f16 conversion
* Handle devices with only a single queue
* Fix trailing whitespace in CMakeLists.txt
* Allow parallel execution of kernels, parallelize third and fourth dimension calls
* Add fallback for devices only supporting one DescriptorSet per DescriptorPool
* Move to graph function similar to CUDA implementation
* Use F16 kernel for most things, replace q_f32 with mul_mat_q_f16 function
* Add F32 dmmv shaders
* Batch submissions
* Add .spv to gitignore
* Split off matrix vector multiplication for separate optimization
* Use single command buffer for matrix vector multiplication ops
* Reduce overhead of mul_f32 calls by using a single command buffer
* Add submission batching to mul_f32
* Fix tests
* Add missing barrier
* Add further missing barrier
* Add further ops
* Replace vk::QueueFamilyIgnored with VK_QUEUE_FAMILY_IGNORED to support more Vulkan header versions
* Remove unnecessary cblas link
* Fix descriptor set pre-allocation assert
* Add runtime shader compilation, start transferring shaders to this approach
* Transfer remaining shaders to header and compile on runtime
* Fix fp32 fallback if device doesn't support fp16, add force disable env var GGML_VULKAN_DISABLE_F16
* Add support for q4_1, q5_0, q5_1 and q8_0
* Remove unnecessary scalar layout extension
* Parse graph early to pre-record command buffers
* Add q6_k support
* Add multi-submit for command buffers
* Fix q6_k dequant shader for AMD
* Fix q6_k for GPUs without fp16 support
* Simplify q6_k fp16 fix
* Minor fixes
* Fix wg_denom of m-mulmat shaders
* Add Python-based Vulkan shader generator
* Replace shaderc dependency with precompiled shaders
Fix python script to generate shaders
* Clean up code
* Fix shader generator script Windows compatibility
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
* Close file before deletion
* Fix vulkan shader fp32 name
* Add q2_k and q3_k support
Add validation check to compare shader results to cpu results
* Add q4_k support
* Add q5_k support
* Bake SPIR-V bytecode into the library instead of loading shaders from file
* Switch to signal semaphores for flexibility
Prepare broadcasting support for mul mat
* Finish broadcasting mul mat support for GQA
* Clean up unused functions
Add repeat op
* Add further ops, not yet enabled. Improve semaphore code
* Reduce number of used semaphores by utilizing timelines more properly
* Remove queue information
* Reuse timeline semaphores, allow parallel operation with binary semaphores to work around nvidia driver limitations
* Add Vulkan to llama-bench
* Remove cblas dependency
* Fix matmul k-split bug
* Fix q4_k dmmv K_QUANTS_PER_ITERATION 1 shader
* Add RMS Norm shader, rework op_f32 shader setup, fix matmul bug
* Fix issues with float16 overflows in shaders
* Fix issues with older Vulkan headers on Ubuntu 22.04
* Allow multi-op partial offloading by parsing the graph to preallocate enough between-op buffers
* Implement further ops, rework op_f32 calls, fix bugs
* Finish full offloading support, add last remaining ops, fix bugs, remove redundant code
* Upload generated file ggml-vulkan-shaders.hpp, remove redundant shaders
* Merge upstream changes, fix conflicts, adapt soft_max op
* Fix Python and shader header format
* Free model gpu buffers on exit
* Use single queue per device to simplify code
* Add matmul shader support for running multiple calculations in parallel
* Switch from semaphore-synchronized multiple command buffers per op to single command buffer for multiple ops, whole graph if possible
* Fix missing event cast
* Replace uint64_t(-1) with UINT64_MAX, rename function for clarity
* Fix warning about empty C function parameters
* Fix compiler warnings
* Properly implement Vulkan backend buffer handling
* Fix oversized host staging buffers
* Simplify barrier synchronization calls
* Fix gcc warnings
* Implement max_size for backend buffer types to limit the size of a single allocation
* Use min of maxMemoryAllocationSize and maxBufferSize for device max allocation size
* refactor multi buf
* Disable unsupported ops to fix tests
* Check for maintenance4 support before using it
* Handle devices with only a single queue
* Fix single queue logic
* propagate buffer usage in multi buffers
* Implement rope_neox op
* Cleanup header and other files
* Simplify gpu_extras by removing events and putting staging memcpys into contexts
* Move queue into context
Add not-yet-enabled async backend ops
* Simplify context use, optimize matmul shader for warp size 64 (AMD GCN), fix split_k matmul shader optimization
* Add get_max_size to SYCL backend.
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* llama : fix trailing whitespace
---------
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-01-28 17:03:59 +00:00
size_t ggml_backend_buffer_get_max_size ( ggml_backend_buffer_t buffer ) {
return ggml_backend_buft_get_max_size ( ggml_backend_buffer_get_type ( buffer ) ) ;
}
2023-12-07 20:26:54 +00:00
size_t ggml_backend_buffer_get_alloc_size ( ggml_backend_buffer_t buffer , struct ggml_tensor * tensor ) {
2024-01-12 19:07:38 +00:00
return ggml_backend_buft_get_alloc_size ( ggml_backend_buffer_get_type ( buffer ) , tensor ) ;
2023-12-07 20:26:54 +00:00
}
2023-10-08 17:19:14 +00:00
2023-12-21 20:07:46 +00:00
void ggml_backend_buffer_clear ( ggml_backend_buffer_t buffer , uint8_t value ) {
buffer - > iface . clear ( buffer , value ) ;
}
bool ggml_backend_buffer_is_host ( ggml_backend_buffer_t buffer ) {
2024-01-12 19:07:38 +00:00
return ggml_backend_buft_is_host ( ggml_backend_buffer_get_type ( buffer ) ) ;
2023-12-21 20:07:46 +00:00
}
2024-01-12 19:07:38 +00:00
void ggml_backend_buffer_set_usage ( ggml_backend_buffer_t buffer , enum ggml_backend_buffer_usage usage ) {
buffer - > usage = usage ;
ggml : add Vulkan backend (#2059)
* Vulkan loader code
* Fix matmul kernel, continue implementation
* Continue implementation
* Vulkan memory management
* Vulkan development
* Matmul call
* Add aligned malloc and free for VMA
* Continue implementation
* First matmul success
* GEMM Kernel optimization
* 1D Blocktiling
* 2D Blocktiling
* Write coalescing
* Continue vulkan implementation and optimization
* First FP16 attempt, disabled for now
* Code abstraction, FP16 implementation, fix kernel, add FP16 to FP32 kernel
* Enable device extensions properly, restore fp16 matmul op
* Fix mulmat_f16
* Output FP32 in fp16 matmul shader
* Fix f16_to_f32 kernel
* dequant_q4_0 kernel
* Add VMA library
* Avoid requesting dedicated memory, VMA can decide that by itself
* Add bounds checking to matmul kernels, improve implementation, fix command buffers not freed properly
* add cmake commands
* Add 2d write operation, profiling code
* Fix 2d write
* Fix queue selection for AMD RADV
* Fix trailing whitespace in vk_mem_alloc.h
* Add WIP warp tile mat mul shaders
* Disable glslc optimization
* Disable glslc optimization for CMake
* Optimize warptile matmul shader, replace blocktile with it
* Add split-k optimization for small matrix multiplication
Use semaphores for synchronization instead of fences or waitidle
Rework async write/read for synchronization
* Fix validation errors, improve compatibility with AMD GPUs
* Rework command buffer handling
* Variable matmul kernel using specialization constants
* Fix synchronization on AMD, add barriers for buffer ownership transfer, add debug flag and prints
* Reuse semaphores
* Handle stage flags during command buffer submission properly
* Increase matmul test runs for consistent results
* Fix F32 matmul
* Add vectorized loading and zeropadding for matrix multiplication
* Use pinned memory for f16 preprocessing
* Don't force aligned matmul
* Don't free before queue done
* Replace VMA library with native Vulkan buffer management
* Basic offloading support with mul_f32 and dmmv for q4_0
* Run glslc commands in parallel
* Unroll loops in dmmv shader
* Reduce usage of waitIdle
* Reuse pinned allocation for f16 conversion
* Handle devices with only a single queue
* Fix trailing whitespace in CMakeLists.txt
* Allow parallel execution of kernels, parallelize third and fourth dimension calls
* Add fallback for devices only supporting one DescriptorSet per DescriptorPool
* Move to graph function similar to CUDA implementation
* Use F16 kernel for most things, replace q_f32 with mul_mat_q_f16 function
* Add F32 dmmv shaders
* Batch submissions
* Add .spv to gitignore
* Split off matrix vector multiplication for separate optimization
* Use single command buffer for matrix vector multiplication ops
* Reduce overhead of mul_f32 calls by using a single command buffer
* Add submission batching to mul_f32
* Fix tests
* Add missing barrier
* Add further missing barrier
* Add further ops
* Replace vk::QueueFamilyIgnored with VK_QUEUE_FAMILY_IGNORED to support more Vulkan header versions
* Remove unnecessary cblas link
* Fix descriptor set pre-allocation assert
* Add runtime shader compilation, start transferring shaders to this approach
* Transfer remaining shaders to header and compile on runtime
* Fix fp32 fallback if device doesn't support fp16, add force disable env var GGML_VULKAN_DISABLE_F16
* Add support for q4_1, q5_0, q5_1 and q8_0
* Remove unnecessary scalar layout extension
* Parse graph early to pre-record command buffers
* Add q6_k support
* Add multi-submit for command buffers
* Fix q6_k dequant shader for AMD
* Fix q6_k for GPUs without fp16 support
* Simplify q6_k fp16 fix
* Minor fixes
* Fix wg_denom of m-mulmat shaders
* Add Python-based Vulkan shader generator
* Replace shaderc dependency with precompiled shaders
Fix python script to generate shaders
* Clean up code
* Fix shader generator script Windows compatibility
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
* Close file before deletion
* Fix vulkan shader fp32 name
* Add q2_k and q3_k support
Add validation check to compare shader results to cpu results
* Add q4_k support
* Add q5_k support
* Bake SPIR-V bytecode into the library instead of loading shaders from file
* Switch to signal semaphores for flexibility
Prepare broadcasting support for mul mat
* Finish broadcasting mul mat support for GQA
* Clean up unused functions
Add repeat op
* Add further ops, not yet enabled. Improve semaphore code
* Reduce number of used semaphores by utilizing timelines more properly
* Remove queue information
* Reuse timeline semaphores, allow parallel operation with binary semaphores to work around nvidia driver limitations
* Add Vulkan to llama-bench
* Remove cblas dependency
* Fix matmul k-split bug
* Fix q4_k dmmv K_QUANTS_PER_ITERATION 1 shader
* Add RMS Norm shader, rework op_f32 shader setup, fix matmul bug
* Fix issues with float16 overflows in shaders
* Fix issues with older Vulkan headers on Ubuntu 22.04
* Allow multi-op partial offloading by parsing the graph to preallocate enough between-op buffers
* Implement further ops, rework op_f32 calls, fix bugs
* Finish full offloading support, add last remaining ops, fix bugs, remove redundant code
* Upload generated file ggml-vulkan-shaders.hpp, remove redundant shaders
* Merge upstream changes, fix conflicts, adapt soft_max op
* Fix Python and shader header format
* Free model gpu buffers on exit
* Use single queue per device to simplify code
* Add matmul shader support for running multiple calculations in parallel
* Switch from semaphore-synchronized multiple command buffers per op to single command buffer for multiple ops, whole graph if possible
* Fix missing event cast
* Replace uint64_t(-1) with UINT64_MAX, rename function for clarity
* Fix warning about empty C function parameters
* Fix compiler warnings
* Properly implement Vulkan backend buffer handling
* Fix oversized host staging buffers
* Simplify barrier synchronization calls
* Fix gcc warnings
* Implement max_size for backend buffer types to limit the size of a single allocation
* Use min of maxMemoryAllocationSize and maxBufferSize for device max allocation size
* refactor multi buf
* Disable unsupported ops to fix tests
* Check for maintenance4 support before using it
* Handle devices with only a single queue
* Fix single queue logic
* propagate buffer usage in multi buffers
* Implement rope_neox op
* Cleanup header and other files
* Simplify gpu_extras by removing events and putting staging memcpys into contexts
* Move queue into context
Add not-yet-enabled async backend ops
* Simplify context use, optimize matmul shader for warp size 64 (AMD GCN), fix split_k matmul shader optimization
* Add get_max_size to SYCL backend.
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* llama : fix trailing whitespace
---------
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-01-28 17:03:59 +00:00
// FIXME: add a generic callback to the buffer interface
if ( ggml_backend_buffer_is_multi_buffer ( buffer ) ) {
ggml_backend_multi_buffer_set_usage ( buffer , usage ) ;
}
2024-01-12 19:07:38 +00:00
}
2024-07-18 21:48:47 +00:00
enum ggml_backend_buffer_usage ggml_backend_buffer_get_usage ( ggml_backend_buffer_t buffer ) {
return buffer - > usage ;
}
2024-01-12 19:07:38 +00:00
ggml_backend_buffer_type_t ggml_backend_buffer_get_type ( ggml_backend_buffer_t buffer ) {
2023-12-07 20:26:54 +00:00
return buffer - > buft ;
2023-10-08 17:19:14 +00:00
}
2024-01-12 19:07:38 +00:00
void ggml_backend_buffer_reset ( ggml_backend_buffer_t buffer ) {
if ( buffer - > iface . reset ) {
buffer - > iface . reset ( buffer ) ;
}
}
bool ggml_backend_buffer_copy_tensor ( const struct ggml_tensor * src , struct ggml_tensor * dst ) {
ggml_backend_buffer_t dst_buf = dst - > view_src ? dst - > view_src - > buffer : dst - > buffer ;
if ( dst_buf - > iface . cpy_tensor ) {
2024-06-03 17:03:26 +00:00
return dst_buf - > iface . cpy_tensor ( dst_buf , src , dst ) ;
2024-01-12 19:07:38 +00:00
}
return false ;
}
2023-12-07 20:26:54 +00:00
// backend
2024-02-24 16:27:36 +00:00
ggml_guid_t ggml_backend_guid ( ggml_backend_t backend ) {
if ( backend = = NULL ) {
return NULL ;
}
return backend - > guid ;
}
2023-10-08 17:19:14 +00:00
const char * ggml_backend_name ( ggml_backend_t backend ) {
2023-11-13 12:16:23 +00:00
if ( backend = = NULL ) {
return " NULL " ;
}
2023-10-08 17:19:14 +00:00
return backend - > iface . get_name ( backend ) ;
}
void ggml_backend_free ( ggml_backend_t backend ) {
2023-11-13 12:16:23 +00:00
if ( backend = = NULL ) {
return ;
}
2023-10-08 17:19:14 +00:00
backend - > iface . free ( backend ) ;
}
2023-12-07 20:26:54 +00:00
ggml_backend_buffer_type_t ggml_backend_get_default_buffer_type ( ggml_backend_t backend ) {
return backend - > iface . get_default_buffer_type ( backend ) ;
}
2023-10-08 17:19:14 +00:00
ggml_backend_buffer_t ggml_backend_alloc_buffer ( ggml_backend_t backend , size_t size ) {
2023-12-07 20:26:54 +00:00
return ggml_backend_buft_alloc_buffer ( ggml_backend_get_default_buffer_type ( backend ) , size ) ;
2023-10-08 17:19:14 +00:00
}
size_t ggml_backend_get_alignment ( ggml_backend_t backend ) {
2023-12-07 20:26:54 +00:00
return ggml_backend_buft_get_alignment ( ggml_backend_get_default_buffer_type ( backend ) ) ;
2023-10-08 17:19:14 +00:00
}
ggml : add Vulkan backend (#2059)
* Vulkan loader code
* Fix matmul kernel, continue implementation
* Continue implementation
* Vulkan memory management
* Vulkan development
* Matmul call
* Add aligned malloc and free for VMA
* Continue implementation
* First matmul success
* GEMM Kernel optimization
* 1D Blocktiling
* 2D Blocktiling
* Write coalescing
* Continue vulkan implementation and optimization
* First FP16 attempt, disabled for now
* Code abstraction, FP16 implementation, fix kernel, add FP16 to FP32 kernel
* Enable device extensions properly, restore fp16 matmul op
* Fix mulmat_f16
* Output FP32 in fp16 matmul shader
* Fix f16_to_f32 kernel
* dequant_q4_0 kernel
* Add VMA library
* Avoid requesting dedicated memory, VMA can decide that by itself
* Add bounds checking to matmul kernels, improve implementation, fix command buffers not freed properly
* add cmake commands
* Add 2d write operation, profiling code
* Fix 2d write
* Fix queue selection for AMD RADV
* Fix trailing whitespace in vk_mem_alloc.h
* Add WIP warp tile mat mul shaders
* Disable glslc optimization
* Disable glslc optimization for CMake
* Optimize warptile matmul shader, replace blocktile with it
* Add split-k optimization for small matrix multiplication
Use semaphores for synchronization instead of fences or waitidle
Rework async write/read for synchronization
* Fix validation errors, improve compatibility with AMD GPUs
* Rework command buffer handling
* Variable matmul kernel using specialization constants
* Fix synchronization on AMD, add barriers for buffer ownership transfer, add debug flag and prints
* Reuse semaphores
* Handle stage flags during command buffer submission properly
* Increase matmul test runs for consistent results
* Fix F32 matmul
* Add vectorized loading and zeropadding for matrix multiplication
* Use pinned memory for f16 preprocessing
* Don't force aligned matmul
* Don't free before queue done
* Replace VMA library with native Vulkan buffer management
* Basic offloading support with mul_f32 and dmmv for q4_0
* Run glslc commands in parallel
* Unroll loops in dmmv shader
* Reduce usage of waitIdle
* Reuse pinned allocation for f16 conversion
* Handle devices with only a single queue
* Fix trailing whitespace in CMakeLists.txt
* Allow parallel execution of kernels, parallelize third and fourth dimension calls
* Add fallback for devices only supporting one DescriptorSet per DescriptorPool
* Move to graph function similar to CUDA implementation
* Use F16 kernel for most things, replace q_f32 with mul_mat_q_f16 function
* Add F32 dmmv shaders
* Batch submissions
* Add .spv to gitignore
* Split off matrix vector multiplication for separate optimization
* Use single command buffer for matrix vector multiplication ops
* Reduce overhead of mul_f32 calls by using a single command buffer
* Add submission batching to mul_f32
* Fix tests
* Add missing barrier
* Add further missing barrier
* Add further ops
* Replace vk::QueueFamilyIgnored with VK_QUEUE_FAMILY_IGNORED to support more Vulkan header versions
* Remove unnecessary cblas link
* Fix descriptor set pre-allocation assert
* Add runtime shader compilation, start transferring shaders to this approach
* Transfer remaining shaders to header and compile on runtime
* Fix fp32 fallback if device doesn't support fp16, add force disable env var GGML_VULKAN_DISABLE_F16
* Add support for q4_1, q5_0, q5_1 and q8_0
* Remove unnecessary scalar layout extension
* Parse graph early to pre-record command buffers
* Add q6_k support
* Add multi-submit for command buffers
* Fix q6_k dequant shader for AMD
* Fix q6_k for GPUs without fp16 support
* Simplify q6_k fp16 fix
* Minor fixes
* Fix wg_denom of m-mulmat shaders
* Add Python-based Vulkan shader generator
* Replace shaderc dependency with precompiled shaders
Fix python script to generate shaders
* Clean up code
* Fix shader generator script Windows compatibility
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
* Close file before deletion
* Fix vulkan shader fp32 name
* Add q2_k and q3_k support
Add validation check to compare shader results to cpu results
* Add q4_k support
* Add q5_k support
* Bake SPIR-V bytecode into the library instead of loading shaders from file
* Switch to signal semaphores for flexibility
Prepare broadcasting support for mul mat
* Finish broadcasting mul mat support for GQA
* Clean up unused functions
Add repeat op
* Add further ops, not yet enabled. Improve semaphore code
* Reduce number of used semaphores by utilizing timelines more properly
* Remove queue information
* Reuse timeline semaphores, allow parallel operation with binary semaphores to work around nvidia driver limitations
* Add Vulkan to llama-bench
* Remove cblas dependency
* Fix matmul k-split bug
* Fix q4_k dmmv K_QUANTS_PER_ITERATION 1 shader
* Add RMS Norm shader, rework op_f32 shader setup, fix matmul bug
* Fix issues with float16 overflows in shaders
* Fix issues with older Vulkan headers on Ubuntu 22.04
* Allow multi-op partial offloading by parsing the graph to preallocate enough between-op buffers
* Implement further ops, rework op_f32 calls, fix bugs
* Finish full offloading support, add last remaining ops, fix bugs, remove redundant code
* Upload generated file ggml-vulkan-shaders.hpp, remove redundant shaders
* Merge upstream changes, fix conflicts, adapt soft_max op
* Fix Python and shader header format
* Free model gpu buffers on exit
* Use single queue per device to simplify code
* Add matmul shader support for running multiple calculations in parallel
* Switch from semaphore-synchronized multiple command buffers per op to single command buffer for multiple ops, whole graph if possible
* Fix missing event cast
* Replace uint64_t(-1) with UINT64_MAX, rename function for clarity
* Fix warning about empty C function parameters
* Fix compiler warnings
* Properly implement Vulkan backend buffer handling
* Fix oversized host staging buffers
* Simplify barrier synchronization calls
* Fix gcc warnings
* Implement max_size for backend buffer types to limit the size of a single allocation
* Use min of maxMemoryAllocationSize and maxBufferSize for device max allocation size
* refactor multi buf
* Disable unsupported ops to fix tests
* Check for maintenance4 support before using it
* Handle devices with only a single queue
* Fix single queue logic
* propagate buffer usage in multi buffers
* Implement rope_neox op
* Cleanup header and other files
* Simplify gpu_extras by removing events and putting staging memcpys into contexts
* Move queue into context
Add not-yet-enabled async backend ops
* Simplify context use, optimize matmul shader for warp size 64 (AMD GCN), fix split_k matmul shader optimization
* Add get_max_size to SYCL backend.
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* llama : fix trailing whitespace
---------
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-01-28 17:03:59 +00:00
size_t ggml_backend_get_max_size ( ggml_backend_t backend ) {
return ggml_backend_buft_get_max_size ( ggml_backend_get_default_buffer_type ( backend ) ) ;
}
2023-12-07 20:26:54 +00:00
void ggml_backend_tensor_set_async ( ggml_backend_t backend , struct ggml_tensor * tensor , const void * data , size_t offset , size_t size ) {
GGML_ASSERT ( tensor - > data ! = NULL & & " tensor not allocated " ) ;
GGML_ASSERT ( offset + size < = ggml_nbytes ( tensor ) & & " tensor write out of bounds " ) ;
2024-01-12 19:07:38 +00:00
if ( backend - > iface . set_tensor_async = = NULL ) {
ggml_backend_tensor_set ( tensor , data , offset , size ) ;
} else {
backend - > iface . set_tensor_async ( backend , tensor , data , offset , size ) ;
}
2023-10-08 17:19:14 +00:00
}
2023-12-07 20:26:54 +00:00
void ggml_backend_tensor_get_async ( ggml_backend_t backend , const struct ggml_tensor * tensor , void * data , size_t offset , size_t size ) {
GGML_ASSERT ( tensor - > data ! = NULL & & " tensor not allocated " ) ;
GGML_ASSERT ( offset + size < = ggml_nbytes ( tensor ) & & " tensor read out of bounds " ) ;
2024-01-12 19:07:38 +00:00
if ( backend - > iface . get_tensor_async = = NULL ) {
ggml_backend_tensor_get ( tensor , data , offset , size ) ;
} else {
backend - > iface . get_tensor_async ( backend , tensor , data , offset , size ) ;
}
2023-10-08 17:19:14 +00:00
}
2024-10-02 23:49:47 +00:00
void ggml_backend_tensor_set ( struct ggml_tensor * tensor , const void * data , size_t offset , size_t size ) {
2024-01-12 19:07:38 +00:00
ggml_backend_buffer_t buf = tensor - > view_src ? tensor - > view_src - > buffer : tensor - > buffer ;
GGML_ASSERT ( buf ! = NULL & & " tensor buffer not set " ) ;
2024-03-13 17:54:21 +00:00
GGML_ASSERT ( tensor - > data ! = NULL & & " tensor not allocated " ) ;
2023-12-07 20:26:54 +00:00
GGML_ASSERT ( offset + size < = ggml_nbytes ( tensor ) & & " tensor write out of bounds " ) ;
2023-11-13 12:16:23 +00:00
2024-02-13 21:44:25 +00:00
if ( ! size ) {
return ;
}
2024-03-13 17:54:21 +00:00
buf - > iface . set_tensor ( buf , tensor , data , offset , size ) ;
2023-10-08 17:19:14 +00:00
}
2024-10-02 23:49:47 +00:00
void ggml_backend_tensor_get ( const struct ggml_tensor * tensor , void * data , size_t offset , size_t size ) {
2024-01-12 19:07:38 +00:00
ggml_backend_buffer_t buf = tensor - > view_src ? tensor - > view_src - > buffer : tensor - > buffer ;
2024-03-13 17:54:21 +00:00
GGML_ASSERT ( buf ! = NULL & & " tensor buffer not set " ) ;
2023-11-13 12:16:23 +00:00
GGML_ASSERT ( tensor - > data ! = NULL & & " tensor not allocated " ) ;
2023-12-07 20:26:54 +00:00
GGML_ASSERT ( offset + size < = ggml_nbytes ( tensor ) & & " tensor read out of bounds " ) ;
2023-11-13 12:16:23 +00:00
2024-02-13 21:44:25 +00:00
if ( ! size ) {
return ;
}
2024-03-13 17:54:21 +00:00
buf - > iface . get_tensor ( buf , tensor , data , offset , size ) ;
2023-10-08 17:19:14 +00:00
}
2024-10-02 23:49:47 +00:00
GGML_API void ggml_backend_tensor_memset ( struct ggml_tensor * tensor , uint8_t value , size_t offset , size_t size ) {
2024-09-20 16:04:44 +00:00
ggml_backend_buffer_t buf = tensor - > view_src ? tensor - > view_src - > buffer : tensor - > buffer ;
GGML_ASSERT ( buf ! = NULL & & " tensor buffer not set " ) ;
GGML_ASSERT ( tensor - > data ! = NULL & & " tensor not allocated " ) ;
GGML_ASSERT ( offset + size < = ggml_nbytes ( tensor ) & & " tensor write out of bounds " ) ;
if ( ! size ) {
return ;
}
2024-09-20 16:13:02 +00:00
2024-09-20 16:04:44 +00:00
GGML_ASSERT ( buf - > iface . memset_tensor ! = NULL & & " memset not supported by backend buffer " ) ;
buf - > iface . memset_tensor ( buf , tensor , value , offset , size ) ;
}
2023-10-08 17:19:14 +00:00
void ggml_backend_synchronize ( ggml_backend_t backend ) {
2023-12-07 20:26:54 +00:00
if ( backend - > iface . synchronize = = NULL ) {
return ;
}
2023-10-08 17:19:14 +00:00
backend - > iface . synchronize ( backend ) ;
}
ggml_backend_graph_plan_t ggml_backend_graph_plan_create ( ggml_backend_t backend , struct ggml_cgraph * cgraph ) {
2024-03-13 17:54:21 +00:00
GGML_ASSERT ( backend - > iface . graph_plan_create ! = NULL ) ;
2023-10-08 17:19:14 +00:00
return backend - > iface . graph_plan_create ( backend , cgraph ) ;
}
void ggml_backend_graph_plan_free ( ggml_backend_t backend , ggml_backend_graph_plan_t plan ) {
2024-03-13 17:54:21 +00:00
GGML_ASSERT ( backend - > iface . graph_plan_free ! = NULL ) ;
2023-10-08 17:19:14 +00:00
backend - > iface . graph_plan_free ( backend , plan ) ;
}
2024-03-04 09:05:42 +00:00
enum ggml_status ggml_backend_graph_plan_compute ( ggml_backend_t backend , ggml_backend_graph_plan_t plan ) {
2024-03-13 17:54:21 +00:00
GGML_ASSERT ( backend - > iface . graph_plan_compute ! = NULL ) ;
2024-03-04 09:05:42 +00:00
return backend - > iface . graph_plan_compute ( backend , plan ) ;
2023-10-08 17:19:14 +00:00
}
2024-03-04 09:05:42 +00:00
enum ggml_status ggml_backend_graph_compute ( ggml_backend_t backend , struct ggml_cgraph * cgraph ) {
2024-03-13 17:54:21 +00:00
enum ggml_status err = ggml_backend_graph_compute_async ( backend , cgraph ) ;
ggml_backend_synchronize ( backend ) ;
return err ;
}
2024-03-18 10:03:04 +00:00
enum ggml_status ggml_backend_graph_compute_async ( ggml_backend_t backend , struct ggml_cgraph * cgraph ) {
2024-01-12 19:07:38 +00:00
return backend - > iface . graph_compute ( backend , cgraph ) ;
2023-10-08 17:19:14 +00:00
}
bool ggml_backend_supports_op ( ggml_backend_t backend , const struct ggml_tensor * op ) {
2024-10-02 23:49:47 +00:00
// helper to ease transition to device interface
if ( backend - > device ) {
return ggml_backend_dev_supports_op ( backend - > device , op ) ;
}
2023-10-08 17:19:14 +00:00
return backend - > iface . supports_op ( backend , op ) ;
}
2024-06-13 01:11:35 +00:00
bool ggml_backend_supports_buft ( ggml_backend_t backend , ggml_backend_buffer_type_t buft ) {
2024-10-02 23:49:47 +00:00
// helper to ease transition to device interface
if ( backend - > device ) {
return ggml_backend_dev_supports_buft ( backend - > device , buft ) ;
}
2024-06-13 01:11:35 +00:00
return backend - > iface . supports_buft ( backend , buft ) ;
}
2024-03-18 10:03:04 +00:00
bool ggml_backend_offload_op ( ggml_backend_t backend , const struct ggml_tensor * op ) {
2024-10-02 23:49:47 +00:00
// helper to ease transition to device interface
if ( backend - > device ) {
return ggml_backend_dev_offload_op ( backend - > device , op ) ;
}
2024-03-18 10:03:04 +00:00
if ( backend - > iface . offload_op ! = NULL ) {
return backend - > iface . offload_op ( backend , op ) ;
}
return false ;
}
2024-10-02 23:49:47 +00:00
ggml_backend_dev_t ggml_backend_get_device ( ggml_backend_t backend ) {
return backend - > device ;
}
2023-10-08 17:19:14 +00:00
// backend copy
static bool ggml_are_same_layout ( const struct ggml_tensor * a , const struct ggml_tensor * b ) {
if ( a - > type ! = b - > type ) {
return false ;
}
for ( int i = 0 ; i < GGML_MAX_DIMS ; i + + ) {
if ( a - > ne [ i ] ! = b - > ne [ i ] ) {
return false ;
}
if ( a - > nb [ i ] ! = b - > nb [ i ] ) {
return false ;
}
}
return true ;
}
void ggml_backend_tensor_copy ( struct ggml_tensor * src , struct ggml_tensor * dst ) {
GGML_ASSERT ( ggml_are_same_layout ( src , dst ) & & " cannot copy tensors with different layouts " ) ;
if ( src = = dst ) {
return ;
}
2024-01-12 19:07:38 +00:00
if ( ggml_backend_buffer_is_host ( src - > buffer ) ) {
ggml_backend_tensor_set ( dst , src - > data , 0 , ggml_nbytes ( src ) ) ;
} else if ( ggml_backend_buffer_is_host ( dst - > buffer ) ) {
ggml_backend_tensor_get ( src , dst - > data , 0 , ggml_nbytes ( src ) ) ;
} else if ( ! ggml_backend_buffer_copy_tensor ( src , dst ) ) {
# ifndef NDEBUG
2024-10-11 13:34:45 +00:00
GGML_LOG_DEBUG ( " %s: warning: slow copy from %s to %s \n " , __func__ , ggml_backend_buffer_name ( src - > buffer ) , ggml_backend_buffer_name ( dst - > buffer ) ) ;
2024-01-12 19:07:38 +00:00
# endif
2023-10-08 17:19:14 +00:00
size_t nbytes = ggml_nbytes ( src ) ;
void * data = malloc ( nbytes ) ;
ggml_backend_tensor_get ( src , data , 0 , nbytes ) ;
ggml_backend_tensor_set ( dst , data , 0 , nbytes ) ;
free ( data ) ;
}
}
2024-03-13 17:54:21 +00:00
void ggml_backend_tensor_copy_async ( ggml_backend_t backend_src , ggml_backend_t backend_dst , struct ggml_tensor * src , struct ggml_tensor * dst ) {
2024-01-12 19:07:38 +00:00
GGML_ASSERT ( ggml_are_same_layout ( src , dst ) & & " cannot copy tensors with different layouts " ) ;
if ( src = = dst ) {
return ;
}
2024-03-13 17:54:21 +00:00
if ( backend_dst - > iface . cpy_tensor_async ! = NULL ) {
if ( backend_dst - > iface . cpy_tensor_async ( backend_src , backend_dst , src , dst ) ) {
return ;
2024-01-12 19:07:38 +00:00
}
}
2024-03-13 17:54:21 +00:00
// an async copy would normally happen after all the queued operations on both backends are completed
2024-08-07 11:29:02 +00:00
// to simulate the same behavior, we need to synchronize both backends first, and do a blocking copy
ggml_backend_synchronize ( backend_src ) ;
ggml_backend_synchronize ( backend_dst ) ;
ggml_backend_tensor_copy ( src , dst ) ;
2024-03-13 17:54:21 +00:00
}
// events
2024-10-02 23:49:47 +00:00
ggml_backend_event_t ggml_backend_event_new ( ggml_backend_dev_t device ) {
// null device is allowed for the transition period to the device interface
if ( device = = NULL | | device - > iface . event_new = = NULL ) {
2024-03-13 17:54:21 +00:00
return NULL ;
}
2024-10-02 23:49:47 +00:00
return device - > iface . event_new ( device ) ;
2024-03-13 17:54:21 +00:00
}
void ggml_backend_event_free ( ggml_backend_event_t event ) {
if ( event = = NULL ) {
return ;
2024-01-12 19:07:38 +00:00
}
2024-10-02 23:49:47 +00:00
event - > device - > iface . event_free ( event - > device , event ) ;
2024-03-13 17:54:21 +00:00
}
2024-10-02 23:49:47 +00:00
void ggml_backend_event_record ( ggml_backend_event_t event , ggml_backend_t backend ) {
GGML_ASSERT ( backend - > iface . event_record ! = NULL ) ;
2024-03-13 17:54:21 +00:00
2024-10-02 23:49:47 +00:00
backend - > iface . event_record ( backend , event ) ;
2024-01-12 19:07:38 +00:00
}
2024-03-13 17:54:21 +00:00
void ggml_backend_event_synchronize ( ggml_backend_event_t event ) {
2024-10-02 23:49:47 +00:00
GGML_ASSERT ( event - > device - > iface . event_synchronize ) ;
2024-03-13 17:54:21 +00:00
2024-10-02 23:49:47 +00:00
event - > device - > iface . event_synchronize ( event - > device , event ) ;
2024-03-13 17:54:21 +00:00
}
void ggml_backend_event_wait ( ggml_backend_t backend , ggml_backend_event_t event ) {
GGML_ASSERT ( backend - > iface . event_wait ! = NULL ) ;
backend - > iface . event_wait ( backend , event ) ;
}
2024-01-12 19:07:38 +00:00
2024-10-02 23:49:47 +00:00
// Backend device
2023-10-08 17:19:14 +00:00
2024-10-02 23:49:47 +00:00
const char * ggml_backend_dev_name ( ggml_backend_dev_t device ) {
return device - > iface . get_name ( device ) ;
}
2023-12-07 20:26:54 +00:00
2024-10-02 23:49:47 +00:00
const char * ggml_backend_dev_description ( ggml_backend_dev_t device ) {
return device - > iface . get_description ( device ) ;
}
2023-10-08 17:19:14 +00:00
2024-10-02 23:49:47 +00:00
void ggml_backend_dev_memory ( ggml_backend_dev_t device , size_t * free , size_t * total ) {
device - > iface . get_memory ( device , free , total ) ;
}
2023-12-07 20:26:54 +00:00
2024-10-02 23:49:47 +00:00
enum ggml_backend_dev_type ggml_backend_dev_type ( ggml_backend_dev_t device ) {
return device - > iface . get_type ( device ) ;
}
2023-12-07 20:26:54 +00:00
2024-10-02 23:49:47 +00:00
void ggml_backend_dev_get_props ( ggml_backend_dev_t device , struct ggml_backend_dev_props * props ) {
2024-10-07 15:27:51 +00:00
memset ( props , 0 , sizeof ( * props ) ) ;
2024-10-02 23:49:47 +00:00
device - > iface . get_props ( device , props ) ;
}
2023-12-07 20:26:54 +00:00
2024-10-02 23:49:47 +00:00
ggml_backend_reg_t ggml_backend_dev_backend_reg ( ggml_backend_dev_t device ) {
return device - > reg ;
}
2023-12-07 20:26:54 +00:00
2024-10-02 23:49:47 +00:00
ggml_backend_t ggml_backend_dev_init ( ggml_backend_dev_t device , const char * params ) {
return device - > iface . init_backend ( device , params ) ;
}
2023-10-08 17:19:14 +00:00
2024-10-02 23:49:47 +00:00
ggml_backend_buffer_type_t ggml_backend_dev_buffer_type ( ggml_backend_dev_t device ) {
return device - > iface . get_buffer_type ( device ) ;
}
2023-12-07 20:26:54 +00:00
2024-10-02 23:49:47 +00:00
ggml_backend_buffer_type_t ggml_backend_dev_host_buffer_type ( ggml_backend_dev_t device ) {
2024-10-07 15:27:51 +00:00
if ( device - > iface . get_host_buffer_type = = NULL ) {
return NULL ;
}
2024-10-02 23:49:47 +00:00
return device - > iface . get_host_buffer_type ( device ) ;
}
2023-12-07 20:26:54 +00:00
2024-10-02 23:49:47 +00:00
ggml_backend_buffer_t ggml_backend_dev_buffer_from_host_ptr ( ggml_backend_dev_t device , void * ptr , size_t size , size_t max_tensor_size ) {
return device - > iface . buffer_from_host_ptr ( device , ptr , size , max_tensor_size ) ;
}
ggml : add unified SYCL backend for Intel GPUs (#2690)
* first update for migration
* update init_cublas
* add debug functio, commit all help code
* step 1
* step 2
* step3 add fp16, slower 31->28
* add GGML_LIST_DEVICE function
* step 5 format device and print
* step6, enhance error check, remove CUDA macro, enhance device id to fix none-zero id issue
* support main device is non-zero
* step7 add debug for code path, rm log
* step 8, rename all macro & func from cuda by sycl
* fix error of select non-zero device, format device list
* ren ggml-sycl.hpp -> ggml-sycl.h
* clear CMAKE to rm unused lib and options
* correct queue: rm dtct:get_queue
* add print tensor function to debug
* fix error: wrong result in 658746bb26702e50f2c59c0e4ada8e9da6010481
* summary dpct definition in one header file to replace folder:dpct
* refactor device log
* mv dpct definition from folder dpct to ggml-sycl.h
* update readme, refactor build script
* fix build with sycl
* set nthread=1 when sycl, increase performance
* add run script, comment debug code
* add ls-sycl-device tool
* add ls-sycl-device, rm unused files
* rm rear space
* dos2unix
* Update README_sycl.md
* fix return type
* remove sycl version from include path
* restore rm code to fix hang issue
* add syc and link for sycl readme
* rm original sycl code before refactor
* fix code err
* add know issue for pvc hang issue
* enable SYCL_F16 support
* align pr4766
* check for sycl blas, better performance
* cleanup 1
* remove extra endif
* add build&run script, clean CMakefile, update guide by review comments
* rename macro to intel hardware
* editor config format
* format fixes
* format fixes
* editor format fix
* Remove unused headers
* skip build sycl tool for other code path
* replace tab by space
* fix blas matmul function
* fix mac build
* restore hip dependency
* fix conflict
* ren as review comments
* mv internal function to .cpp file
* export funciton print_sycl_devices(), mv class dpct definition to source file
* update CI/action for sycl code, fix CI error of repeat/dup
* fix action ID format issue
* rm unused strategy
* enable llama_f16 in ci
* fix conflict
* fix build break on MacOS, due to CI of MacOS depend on external ggml, instead of internal ggml
* fix ci cases for unsupported data type
* revert unrelated changed in cuda cmake
remove useless nommq
fix typo of GGML_USE_CLBLAS_SYCL
* revert hip cmake changes
* fix indent
* add prefix in func name
* revert no mmq
* rm cpu blas duplicate
* fix no_new_line
* fix src1->type==F16 bug.
* pass batch offset for F16 src1
* fix batch error
* fix wrong code
* revert sycl checking in test-sampling
* pass void as arguments of ggml_backend_sycl_print_sycl_devices
* remove extra blank line in test-sampling
* revert setting n_threads in sycl
* implement std::isinf for icpx with fast math.
* Update ci/run.sh
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* Update examples/sycl/run-llama2.sh
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* Update examples/sycl/run-llama2.sh
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* Update CMakeLists.txt
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* Update CMakeLists.txt
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* Update CMakeLists.txt
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* Update CMakeLists.txt
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* add copyright and MIT license declare
* update the cmd example
---------
Co-authored-by: jianyuzh <jianyu.zhang@intel.com>
Co-authored-by: luoyu-intel <yu.luo@intel.com>
Co-authored-by: Meng, Hengyu <hengyu.meng@intel.com>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-01-28 15:56:23 +00:00
2024-10-02 23:49:47 +00:00
bool ggml_backend_dev_supports_op ( ggml_backend_dev_t device , const struct ggml_tensor * op ) {
return device - > iface . supports_op ( device , op ) ;
}
ggml : add Vulkan backend (#2059)
* Vulkan loader code
* Fix matmul kernel, continue implementation
* Continue implementation
* Vulkan memory management
* Vulkan development
* Matmul call
* Add aligned malloc and free for VMA
* Continue implementation
* First matmul success
* GEMM Kernel optimization
* 1D Blocktiling
* 2D Blocktiling
* Write coalescing
* Continue vulkan implementation and optimization
* First FP16 attempt, disabled for now
* Code abstraction, FP16 implementation, fix kernel, add FP16 to FP32 kernel
* Enable device extensions properly, restore fp16 matmul op
* Fix mulmat_f16
* Output FP32 in fp16 matmul shader
* Fix f16_to_f32 kernel
* dequant_q4_0 kernel
* Add VMA library
* Avoid requesting dedicated memory, VMA can decide that by itself
* Add bounds checking to matmul kernels, improve implementation, fix command buffers not freed properly
* add cmake commands
* Add 2d write operation, profiling code
* Fix 2d write
* Fix queue selection for AMD RADV
* Fix trailing whitespace in vk_mem_alloc.h
* Add WIP warp tile mat mul shaders
* Disable glslc optimization
* Disable glslc optimization for CMake
* Optimize warptile matmul shader, replace blocktile with it
* Add split-k optimization for small matrix multiplication
Use semaphores for synchronization instead of fences or waitidle
Rework async write/read for synchronization
* Fix validation errors, improve compatibility with AMD GPUs
* Rework command buffer handling
* Variable matmul kernel using specialization constants
* Fix synchronization on AMD, add barriers for buffer ownership transfer, add debug flag and prints
* Reuse semaphores
* Handle stage flags during command buffer submission properly
* Increase matmul test runs for consistent results
* Fix F32 matmul
* Add vectorized loading and zeropadding for matrix multiplication
* Use pinned memory for f16 preprocessing
* Don't force aligned matmul
* Don't free before queue done
* Replace VMA library with native Vulkan buffer management
* Basic offloading support with mul_f32 and dmmv for q4_0
* Run glslc commands in parallel
* Unroll loops in dmmv shader
* Reduce usage of waitIdle
* Reuse pinned allocation for f16 conversion
* Handle devices with only a single queue
* Fix trailing whitespace in CMakeLists.txt
* Allow parallel execution of kernels, parallelize third and fourth dimension calls
* Add fallback for devices only supporting one DescriptorSet per DescriptorPool
* Move to graph function similar to CUDA implementation
* Use F16 kernel for most things, replace q_f32 with mul_mat_q_f16 function
* Add F32 dmmv shaders
* Batch submissions
* Add .spv to gitignore
* Split off matrix vector multiplication for separate optimization
* Use single command buffer for matrix vector multiplication ops
* Reduce overhead of mul_f32 calls by using a single command buffer
* Add submission batching to mul_f32
* Fix tests
* Add missing barrier
* Add further missing barrier
* Add further ops
* Replace vk::QueueFamilyIgnored with VK_QUEUE_FAMILY_IGNORED to support more Vulkan header versions
* Remove unnecessary cblas link
* Fix descriptor set pre-allocation assert
* Add runtime shader compilation, start transferring shaders to this approach
* Transfer remaining shaders to header and compile on runtime
* Fix fp32 fallback if device doesn't support fp16, add force disable env var GGML_VULKAN_DISABLE_F16
* Add support for q4_1, q5_0, q5_1 and q8_0
* Remove unnecessary scalar layout extension
* Parse graph early to pre-record command buffers
* Add q6_k support
* Add multi-submit for command buffers
* Fix q6_k dequant shader for AMD
* Fix q6_k for GPUs without fp16 support
* Simplify q6_k fp16 fix
* Minor fixes
* Fix wg_denom of m-mulmat shaders
* Add Python-based Vulkan shader generator
* Replace shaderc dependency with precompiled shaders
Fix python script to generate shaders
* Clean up code
* Fix shader generator script Windows compatibility
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
* Close file before deletion
* Fix vulkan shader fp32 name
* Add q2_k and q3_k support
Add validation check to compare shader results to cpu results
* Add q4_k support
* Add q5_k support
* Bake SPIR-V bytecode into the library instead of loading shaders from file
* Switch to signal semaphores for flexibility
Prepare broadcasting support for mul mat
* Finish broadcasting mul mat support for GQA
* Clean up unused functions
Add repeat op
* Add further ops, not yet enabled. Improve semaphore code
* Reduce number of used semaphores by utilizing timelines more properly
* Remove queue information
* Reuse timeline semaphores, allow parallel operation with binary semaphores to work around nvidia driver limitations
* Add Vulkan to llama-bench
* Remove cblas dependency
* Fix matmul k-split bug
* Fix q4_k dmmv K_QUANTS_PER_ITERATION 1 shader
* Add RMS Norm shader, rework op_f32 shader setup, fix matmul bug
* Fix issues with float16 overflows in shaders
* Fix issues with older Vulkan headers on Ubuntu 22.04
* Allow multi-op partial offloading by parsing the graph to preallocate enough between-op buffers
* Implement further ops, rework op_f32 calls, fix bugs
* Finish full offloading support, add last remaining ops, fix bugs, remove redundant code
* Upload generated file ggml-vulkan-shaders.hpp, remove redundant shaders
* Merge upstream changes, fix conflicts, adapt soft_max op
* Fix Python and shader header format
* Free model gpu buffers on exit
* Use single queue per device to simplify code
* Add matmul shader support for running multiple calculations in parallel
* Switch from semaphore-synchronized multiple command buffers per op to single command buffer for multiple ops, whole graph if possible
* Fix missing event cast
* Replace uint64_t(-1) with UINT64_MAX, rename function for clarity
* Fix warning about empty C function parameters
* Fix compiler warnings
* Properly implement Vulkan backend buffer handling
* Fix oversized host staging buffers
* Simplify barrier synchronization calls
* Fix gcc warnings
* Implement max_size for backend buffer types to limit the size of a single allocation
* Use min of maxMemoryAllocationSize and maxBufferSize for device max allocation size
* refactor multi buf
* Disable unsupported ops to fix tests
* Check for maintenance4 support before using it
* Handle devices with only a single queue
* Fix single queue logic
* propagate buffer usage in multi buffers
* Implement rope_neox op
* Cleanup header and other files
* Simplify gpu_extras by removing events and putting staging memcpys into contexts
* Move queue into context
Add not-yet-enabled async backend ops
* Simplify context use, optimize matmul shader for warp size 64 (AMD GCN), fix split_k matmul shader optimization
* Add get_max_size to SYCL backend.
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* llama : fix trailing whitespace
---------
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-01-28 17:03:59 +00:00
2024-10-02 23:49:47 +00:00
bool ggml_backend_dev_supports_buft ( ggml_backend_dev_t device , ggml_backend_buffer_type_t buft ) {
return device - > iface . supports_buft ( device , buft ) ;
}
2024-01-29 20:50:50 +00:00
2024-10-02 23:49:47 +00:00
bool ggml_backend_dev_offload_op ( ggml_backend_dev_t device , const struct ggml_tensor * op ) {
2024-10-07 19:55:08 +00:00
if ( device - > iface . offload_op ! = NULL ) {
return device - > iface . offload_op ( device , op ) ;
}
return false ;
2024-10-02 23:49:47 +00:00
}
2024-07-17 11:23:50 +00:00
2024-10-02 23:49:47 +00:00
// Backend (reg)
const char * ggml_backend_reg_name ( ggml_backend_reg_t reg ) {
return reg - > iface . get_name ( reg ) ;
2023-10-08 17:19:14 +00:00
}
2024-10-02 23:49:47 +00:00
size_t ggml_backend_reg_dev_count ( ggml_backend_reg_t reg ) {
return reg - > iface . get_device_count ( reg ) ;
}
2023-12-07 20:26:54 +00:00
2024-10-02 23:49:47 +00:00
ggml_backend_dev_t ggml_backend_reg_dev_get ( ggml_backend_reg_t reg , size_t index ) {
return reg - > iface . get_device ( reg , index ) ;
}
2023-12-07 20:26:54 +00:00
2024-10-02 23:49:47 +00:00
void * ggml_backend_reg_get_proc_address ( ggml_backend_reg_t reg , const char * name ) {
if ( ! reg - > iface . get_proc_address ) {
return NULL ;
}
return reg - > iface . get_proc_address ( reg , name ) ;
}
2023-12-07 20:26:54 +00:00
2024-10-02 23:49:47 +00:00
// Backend registry
# ifdef GGML_USE_CUDA
# include "ggml-cuda.h"
2023-12-07 20:26:54 +00:00
# endif
2024-10-07 15:27:51 +00:00
# ifdef GGML_USE_METAL
# include "ggml-metal.h"
# endif
2024-10-18 05:46:16 +00:00
# ifdef GGML_USE_SYCL
# include "ggml-sycl.h"
# endif
2024-10-17 00:46:58 +00:00
# ifdef GGML_USE_VULKAN
# include "ggml-vulkan.h"
# endif
2024-10-07 19:55:08 +00:00
# ifdef GGML_USE_BLAS
# include "ggml-blas.h"
# endif
2024-10-10 18:14:55 +00:00
# ifdef GGML_USE_RPC
# include "ggml-rpc.h"
# endif
2024-10-18 05:34:36 +00:00
# ifndef __AMX_INT8__
# undef GGML_USE_AMX
# endif
# ifdef GGML_USE_AMX
# include "ggml-amx.h"
# endif
2024-10-22 08:16:01 +00:00
# ifdef GGML_USE_CANN
# include "ggml-cann.h"
# endif
2024-10-02 23:49:47 +00:00
struct ggml_backend_registry {
std : : vector < ggml_backend_reg_t > backends ;
std : : vector < ggml_backend_dev_t > devices ;
2023-12-07 20:26:54 +00:00
2024-10-02 23:49:47 +00:00
ggml_backend_registry ( ) {
# ifdef GGML_USE_CUDA
register_backend ( ggml_backend_cuda_reg ( ) ) ;
# endif
2024-10-07 15:27:51 +00:00
# ifdef GGML_USE_METAL
register_backend ( ggml_backend_metal_reg ( ) ) ;
# endif
2024-10-18 05:46:16 +00:00
# ifdef GGML_USE_SYCL
register_backend ( ggml_backend_sycl_reg ( ) ) ;
# endif
2024-10-17 00:46:58 +00:00
# ifdef GGML_USE_VULKAN
register_backend ( ggml_backend_vk_reg ( ) ) ;
# endif
2024-10-07 19:55:08 +00:00
# ifdef GGML_USE_BLAS
register_backend ( ggml_backend_blas_reg ( ) ) ;
# endif
2024-10-10 18:14:55 +00:00
# ifdef GGML_USE_RPC
register_backend ( ggml_backend_rpc_reg ( ) ) ;
# endif
2024-10-18 05:34:36 +00:00
# ifdef GGML_USE_AMX
register_backend ( ggml_backend_amx_reg ( ) ) ;
# endif
2024-10-22 08:16:01 +00:00
# ifdef GGML_USE_CANN
register_backend ( ggml_backend_cann_reg ( ) ) ;
# endif
2023-12-07 20:26:54 +00:00
2024-10-22 08:16:01 +00:00
// TODO: kompute
2024-10-07 19:55:08 +00:00
register_backend ( ggml_backend_cpu_reg ( ) ) ;
2024-10-02 23:49:47 +00:00
}
2023-12-07 20:26:54 +00:00
2024-10-02 23:49:47 +00:00
void register_backend ( ggml_backend_reg_t reg ) {
# ifndef NDEBUG
2024-10-11 13:34:45 +00:00
GGML_LOG_DEBUG ( " %s: registered backend %s (%zu devices) \n " ,
2024-10-02 23:49:47 +00:00
__func__ , ggml_backend_reg_name ( reg ) , ggml_backend_reg_dev_count ( reg ) ) ;
# endif
backends . push_back ( reg ) ;
for ( size_t i = 0 ; i < ggml_backend_reg_dev_count ( reg ) ; i + + ) {
register_device ( ggml_backend_reg_dev_get ( reg , i ) ) ;
2023-12-07 20:26:54 +00:00
}
}
2023-12-24 13:34:22 +00:00
2024-10-02 23:49:47 +00:00
void register_device ( ggml_backend_dev_t device ) {
# ifndef NDEBUG
2024-10-11 13:34:45 +00:00
GGML_LOG_DEBUG ( " %s: registered device %s (%s) \n " , __func__ , ggml_backend_dev_name ( device ) , ggml_backend_dev_description ( device ) ) ;
2024-10-02 23:49:47 +00:00
# endif
devices . push_back ( device ) ;
}
} ;
static ggml_backend_registry & get_reg ( ) {
static ggml_backend_registry reg ;
return reg ;
2023-12-07 20:26:54 +00:00
}
2024-10-02 23:49:47 +00:00
// Internal API
void ggml_backend_register ( ggml_backend_reg_t reg ) {
get_reg ( ) . register_backend ( reg ) ;
}
2023-12-07 20:26:54 +00:00
2024-10-02 23:49:47 +00:00
void ggml_backend_device_register ( ggml_backend_dev_t device ) {
get_reg ( ) . register_device ( device ) ;
}
// Backend (reg) enumeration
size_t ggml_backend_reg_count ( ) {
return get_reg ( ) . backends . size ( ) ;
}
2023-12-07 20:26:54 +00:00
2024-10-02 23:49:47 +00:00
ggml_backend_reg_t ggml_backend_reg_get ( size_t index ) {
GGML_ASSERT ( index < ggml_backend_reg_count ( ) ) ;
return get_reg ( ) . backends [ index ] ;
}
2023-12-24 13:34:22 +00:00
2024-10-02 23:49:47 +00:00
ggml_backend_reg_t ggml_backend_reg_by_name ( const char * name ) {
for ( size_t i = 0 ; i < ggml_backend_reg_count ( ) ; i + + ) {
ggml_backend_reg_t reg = ggml_backend_reg_get ( i ) ;
if ( strcmp ( ggml_backend_reg_name ( reg ) , name ) = = 0 ) {
return reg ;
}
2023-12-07 20:26:54 +00:00
}
2024-10-02 23:49:47 +00:00
return NULL ;
2023-12-07 20:26:54 +00:00
}
2024-10-02 23:49:47 +00:00
// Device enumeration
size_t ggml_backend_dev_count ( ) {
return get_reg ( ) . devices . size ( ) ;
}
2023-12-07 20:26:54 +00:00
2024-10-02 23:49:47 +00:00
ggml_backend_dev_t ggml_backend_dev_get ( size_t index ) {
GGML_ASSERT ( index < ggml_backend_dev_count ( ) ) ;
return get_reg ( ) . devices [ index ] ;
2023-12-07 20:26:54 +00:00
}
2024-10-02 23:49:47 +00:00
ggml_backend_dev_t ggml_backend_dev_by_name ( const char * name ) {
for ( size_t i = 0 ; i < ggml_backend_dev_count ( ) ; i + + ) {
ggml_backend_dev_t dev = ggml_backend_dev_get ( i ) ;
if ( strcmp ( ggml_backend_dev_name ( dev ) , name ) = = 0 ) {
return dev ;
}
}
return NULL ;
}
2023-12-07 20:26:54 +00:00
2024-10-02 23:49:47 +00:00
ggml_backend_dev_t ggml_backend_dev_by_type ( enum ggml_backend_dev_type type ) {
for ( size_t i = 0 ; i < ggml_backend_dev_count ( ) ; i + + ) {
ggml_backend_dev_t dev = ggml_backend_dev_get ( i ) ;
if ( ggml_backend_dev_type ( dev ) = = type ) {
return dev ;
}
}
return NULL ;
2023-12-07 20:26:54 +00:00
}
2024-10-02 23:49:47 +00:00
// Convenience functions
ggml_backend_t ggml_backend_init_by_name ( const char * name , const char * params ) {
ggml_backend_dev_t dev = ggml_backend_dev_by_name ( name ) ;
if ( ! dev ) {
return NULL ;
}
return ggml_backend_dev_init ( dev , params ) ;
2023-12-07 20:26:54 +00:00
}
2024-10-02 23:49:47 +00:00
ggml_backend_t ggml_backend_init_by_type ( enum ggml_backend_dev_type type , const char * params ) {
ggml_backend_dev_t dev = ggml_backend_dev_by_type ( type ) ;
if ( ! dev ) {
return NULL ;
}
return ggml_backend_dev_init ( dev , params ) ;
}
2023-12-07 20:26:54 +00:00
2024-10-02 23:49:47 +00:00
ggml_backend_t ggml_backend_init_best ( void ) {
ggml_backend_dev_t dev = ggml_backend_dev_by_type ( GGML_BACKEND_DEVICE_TYPE_GPU_FULL ) ;
if ( ! dev ) {
dev = ggml_backend_dev_by_type ( GGML_BACKEND_DEVICE_TYPE_CPU_FULL ) ;
}
if ( ! dev ) {
return NULL ;
}
return ggml_backend_dev_init ( dev , NULL ) ;
2023-10-08 17:19:14 +00:00
}
2023-12-07 20:26:54 +00:00
// backend CPU
2024-10-02 23:49:47 +00:00
static const char * ggml_backend_cpu_buffer_get_name ( ggml_backend_buffer_t buffer ) {
2024-01-12 19:07:38 +00:00
return " CPU " ;
GGML_UNUSED ( buffer ) ;
}
2024-10-02 23:49:47 +00:00
static void * ggml_backend_cpu_buffer_get_base ( ggml_backend_buffer_t buffer ) {
2024-02-12 07:16:06 +00:00
uintptr_t data = ( uintptr_t ) buffer - > context ;
// align the buffer
if ( data % TENSOR_ALIGNMENT ! = 0 ) {
data = GGML_PAD ( data , TENSOR_ALIGNMENT ) ;
}
return ( void * ) data ;
2023-10-08 17:19:14 +00:00
}
2024-10-02 23:49:47 +00:00
static void ggml_backend_cpu_buffer_free_buffer ( ggml_backend_buffer_t buffer ) {
2024-10-16 22:36:51 +00:00
ggml_aligned_free ( buffer - > context , buffer - > size ) ;
2023-12-07 20:26:54 +00:00
}
2024-10-02 23:49:47 +00:00
static void ggml_backend_cpu_buffer_memset_tensor ( ggml_backend_buffer_t buffer , struct ggml_tensor * tensor , uint8_t value , size_t offset , size_t size ) {
2024-09-20 16:04:44 +00:00
memset ( ( char * ) tensor - > data + offset , value , size ) ;
GGML_UNUSED ( buffer ) ;
}
2024-10-02 23:49:47 +00:00
static void ggml_backend_cpu_buffer_set_tensor ( ggml_backend_buffer_t buffer , struct ggml_tensor * tensor , const void * data , size_t offset , size_t size ) {
2023-12-07 20:26:54 +00:00
memcpy ( ( char * ) tensor - > data + offset , data , size ) ;
GGML_UNUSED ( buffer ) ;
}
2024-10-02 23:49:47 +00:00
static void ggml_backend_cpu_buffer_get_tensor ( ggml_backend_buffer_t buffer , const struct ggml_tensor * tensor , void * data , size_t offset , size_t size ) {
2023-12-07 20:26:54 +00:00
memcpy ( data , ( const char * ) tensor - > data + offset , size ) ;
GGML_UNUSED ( buffer ) ;
}
2024-10-02 23:49:47 +00:00
static bool ggml_backend_cpu_buffer_cpy_tensor ( ggml_backend_buffer_t buffer , const struct ggml_tensor * src , struct ggml_tensor * dst ) {
2024-01-12 19:07:38 +00:00
if ( ggml_backend_buffer_is_host ( src - > buffer ) ) {
memcpy ( dst - > data , src - > data , ggml_nbytes ( src ) ) ;
return true ;
}
return false ;
2023-12-07 20:26:54 +00:00
GGML_UNUSED ( buffer ) ;
2023-10-08 17:19:14 +00:00
}
2024-10-02 23:49:47 +00:00
static void ggml_backend_cpu_buffer_clear ( ggml_backend_buffer_t buffer , uint8_t value ) {
2023-12-21 20:07:46 +00:00
memset ( buffer - > context , value , buffer - > size ) ;
}
2024-10-02 23:49:47 +00:00
static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_i = {
/* .get_name = */ ggml_backend_cpu_buffer_get_name ,
2023-12-07 20:26:54 +00:00
/* .free_buffer = */ ggml_backend_cpu_buffer_free_buffer ,
/* .get_base = */ ggml_backend_cpu_buffer_get_base ,
/* .init_tensor = */ NULL , // no initialization required
2024-09-20 16:04:44 +00:00
/* .memset_tensor = */ ggml_backend_cpu_buffer_memset_tensor ,
2023-12-07 20:26:54 +00:00
/* .set_tensor = */ ggml_backend_cpu_buffer_set_tensor ,
/* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor ,
2024-01-12 19:07:38 +00:00
/* .cpy_tensor = */ ggml_backend_cpu_buffer_cpy_tensor ,
2023-12-21 20:07:46 +00:00
/* .clear = */ ggml_backend_cpu_buffer_clear ,
2024-01-12 19:07:38 +00:00
/* .reset = */ NULL ,
2023-10-08 17:19:14 +00:00
} ;
2024-10-02 23:49:47 +00:00
static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_from_ptr_i = {
/* .get_name = */ ggml_backend_cpu_buffer_get_name ,
2023-12-07 20:26:54 +00:00
/* .free_buffer = */ NULL , // ptr is not owned by the buffer, so it does not need to be freed
/* .get_base = */ ggml_backend_cpu_buffer_get_base ,
/* .init_tensor = */ NULL , // no initialization required
2024-09-20 16:04:44 +00:00
/* .memset_tensor = */ ggml_backend_cpu_buffer_memset_tensor ,
2023-12-07 20:26:54 +00:00
/* .set_tensor = */ ggml_backend_cpu_buffer_set_tensor ,
/* .get_tensor = */ ggml_backend_cpu_buffer_get_tensor ,
2024-01-12 19:07:38 +00:00
/* .cpy_tensor = */ ggml_backend_cpu_buffer_cpy_tensor ,
2023-12-21 20:07:46 +00:00
/* .clear = */ ggml_backend_cpu_buffer_clear ,
2024-01-12 19:07:38 +00:00
/* .reset = */ NULL ,
2023-10-08 17:19:14 +00:00
} ;
2024-10-02 23:49:47 +00:00
static const char * ggml_backend_cpu_buffer_type_get_name ( ggml_backend_buffer_type_t buft ) {
2024-01-12 19:07:38 +00:00
return " CPU " ;
GGML_UNUSED ( buft ) ;
}
2024-10-02 23:49:47 +00:00
static ggml_backend_buffer_t ggml_backend_cpu_buffer_type_alloc_buffer ( ggml_backend_buffer_type_t buft , size_t size ) {
2024-10-16 23:34:22 +00:00
auto alloc_size = size ;
if ( alloc_size = = 0 ) {
alloc_size = 1 ;
}
void * data = ggml_aligned_malloc ( alloc_size ) ;
2024-10-16 22:36:51 +00:00
2024-02-12 07:16:06 +00:00
if ( data = = NULL ) {
2024-10-16 23:34:22 +00:00
GGML_LOG_ERROR ( " %s: failed to allocate buffer of size %zu \n " , __func__ , alloc_size ) ;
2024-02-12 07:16:06 +00:00
return NULL ;
}
2023-11-13 12:16:23 +00:00
2024-10-16 23:34:22 +00:00
return ggml_backend_buffer_init ( buft , ggml_backend_cpu_buffer_i , data , alloc_size ) ;
2023-10-08 17:19:14 +00:00
}
2024-10-02 23:49:47 +00:00
static size_t ggml_backend_cpu_buffer_type_get_alignment ( ggml_backend_buffer_type_t buft ) {
2023-10-08 17:19:14 +00:00
return TENSOR_ALIGNMENT ;
2023-12-07 20:26:54 +00:00
GGML_UNUSED ( buft ) ;
}
2023-10-08 17:19:14 +00:00
2024-10-02 23:49:47 +00:00
static bool ggml_backend_cpu_buffer_type_is_host ( ggml_backend_buffer_type_t buft ) {
2023-12-21 20:07:46 +00:00
return true ;
GGML_UNUSED ( buft ) ;
}
2024-10-02 23:49:47 +00:00
ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type ( void ) {
2023-12-21 20:07:46 +00:00
static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = {
2024-10-02 23:49:47 +00:00
/* .iface = */ {
2024-01-12 19:07:38 +00:00
/* .get_name = */ ggml_backend_cpu_buffer_type_get_name ,
2023-12-07 20:26:54 +00:00
/* .alloc_buffer = */ ggml_backend_cpu_buffer_type_alloc_buffer ,
/* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment ,
ggml : add Vulkan backend (#2059)
* Vulkan loader code
* Fix matmul kernel, continue implementation
* Continue implementation
* Vulkan memory management
* Vulkan development
* Matmul call
* Add aligned malloc and free for VMA
* Continue implementation
* First matmul success
* GEMM Kernel optimization
* 1D Blocktiling
* 2D Blocktiling
* Write coalescing
* Continue vulkan implementation and optimization
* First FP16 attempt, disabled for now
* Code abstraction, FP16 implementation, fix kernel, add FP16 to FP32 kernel
* Enable device extensions properly, restore fp16 matmul op
* Fix mulmat_f16
* Output FP32 in fp16 matmul shader
* Fix f16_to_f32 kernel
* dequant_q4_0 kernel
* Add VMA library
* Avoid requesting dedicated memory, VMA can decide that by itself
* Add bounds checking to matmul kernels, improve implementation, fix command buffers not freed properly
* add cmake commands
* Add 2d write operation, profiling code
* Fix 2d write
* Fix queue selection for AMD RADV
* Fix trailing whitespace in vk_mem_alloc.h
* Add WIP warp tile mat mul shaders
* Disable glslc optimization
* Disable glslc optimization for CMake
* Optimize warptile matmul shader, replace blocktile with it
* Add split-k optimization for small matrix multiplication
Use semaphores for synchronization instead of fences or waitidle
Rework async write/read for synchronization
* Fix validation errors, improve compatibility with AMD GPUs
* Rework command buffer handling
* Variable matmul kernel using specialization constants
* Fix synchronization on AMD, add barriers for buffer ownership transfer, add debug flag and prints
* Reuse semaphores
* Handle stage flags during command buffer submission properly
* Increase matmul test runs for consistent results
* Fix F32 matmul
* Add vectorized loading and zeropadding for matrix multiplication
* Use pinned memory for f16 preprocessing
* Don't force aligned matmul
* Don't free before queue done
* Replace VMA library with native Vulkan buffer management
* Basic offloading support with mul_f32 and dmmv for q4_0
* Run glslc commands in parallel
* Unroll loops in dmmv shader
* Reduce usage of waitIdle
* Reuse pinned allocation for f16 conversion
* Handle devices with only a single queue
* Fix trailing whitespace in CMakeLists.txt
* Allow parallel execution of kernels, parallelize third and fourth dimension calls
* Add fallback for devices only supporting one DescriptorSet per DescriptorPool
* Move to graph function similar to CUDA implementation
* Use F16 kernel for most things, replace q_f32 with mul_mat_q_f16 function
* Add F32 dmmv shaders
* Batch submissions
* Add .spv to gitignore
* Split off matrix vector multiplication for separate optimization
* Use single command buffer for matrix vector multiplication ops
* Reduce overhead of mul_f32 calls by using a single command buffer
* Add submission batching to mul_f32
* Fix tests
* Add missing barrier
* Add further missing barrier
* Add further ops
* Replace vk::QueueFamilyIgnored with VK_QUEUE_FAMILY_IGNORED to support more Vulkan header versions
* Remove unnecessary cblas link
* Fix descriptor set pre-allocation assert
* Add runtime shader compilation, start transferring shaders to this approach
* Transfer remaining shaders to header and compile on runtime
* Fix fp32 fallback if device doesn't support fp16, add force disable env var GGML_VULKAN_DISABLE_F16
* Add support for q4_1, q5_0, q5_1 and q8_0
* Remove unnecessary scalar layout extension
* Parse graph early to pre-record command buffers
* Add q6_k support
* Add multi-submit for command buffers
* Fix q6_k dequant shader for AMD
* Fix q6_k for GPUs without fp16 support
* Simplify q6_k fp16 fix
* Minor fixes
* Fix wg_denom of m-mulmat shaders
* Add Python-based Vulkan shader generator
* Replace shaderc dependency with precompiled shaders
Fix python script to generate shaders
* Clean up code
* Fix shader generator script Windows compatibility
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
* Close file before deletion
* Fix vulkan shader fp32 name
* Add q2_k and q3_k support
Add validation check to compare shader results to cpu results
* Add q4_k support
* Add q5_k support
* Bake SPIR-V bytecode into the library instead of loading shaders from file
* Switch to signal semaphores for flexibility
Prepare broadcasting support for mul mat
* Finish broadcasting mul mat support for GQA
* Clean up unused functions
Add repeat op
* Add further ops, not yet enabled. Improve semaphore code
* Reduce number of used semaphores by utilizing timelines more properly
* Remove queue information
* Reuse timeline semaphores, allow parallel operation with binary semaphores to work around nvidia driver limitations
* Add Vulkan to llama-bench
* Remove cblas dependency
* Fix matmul k-split bug
* Fix q4_k dmmv K_QUANTS_PER_ITERATION 1 shader
* Add RMS Norm shader, rework op_f32 shader setup, fix matmul bug
* Fix issues with float16 overflows in shaders
* Fix issues with older Vulkan headers on Ubuntu 22.04
* Allow multi-op partial offloading by parsing the graph to preallocate enough between-op buffers
* Implement further ops, rework op_f32 calls, fix bugs
* Finish full offloading support, add last remaining ops, fix bugs, remove redundant code
* Upload generated file ggml-vulkan-shaders.hpp, remove redundant shaders
* Merge upstream changes, fix conflicts, adapt soft_max op
* Fix Python and shader header format
* Free model gpu buffers on exit
* Use single queue per device to simplify code
* Add matmul shader support for running multiple calculations in parallel
* Switch from semaphore-synchronized multiple command buffers per op to single command buffer for multiple ops, whole graph if possible
* Fix missing event cast
* Replace uint64_t(-1) with UINT64_MAX, rename function for clarity
* Fix warning about empty C function parameters
* Fix compiler warnings
* Properly implement Vulkan backend buffer handling
* Fix oversized host staging buffers
* Simplify barrier synchronization calls
* Fix gcc warnings
* Implement max_size for backend buffer types to limit the size of a single allocation
* Use min of maxMemoryAllocationSize and maxBufferSize for device max allocation size
* refactor multi buf
* Disable unsupported ops to fix tests
* Check for maintenance4 support before using it
* Handle devices with only a single queue
* Fix single queue logic
* propagate buffer usage in multi buffers
* Implement rope_neox op
* Cleanup header and other files
* Simplify gpu_extras by removing events and putting staging memcpys into contexts
* Move queue into context
Add not-yet-enabled async backend ops
* Simplify context use, optimize matmul shader for warp size 64 (AMD GCN), fix split_k matmul shader optimization
* Add get_max_size to SYCL backend.
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* llama : fix trailing whitespace
---------
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-01-28 17:03:59 +00:00
/* .get_max_size = */ NULL , // defaults to SIZE_MAX
2023-12-07 20:26:54 +00:00
/* .get_alloc_size = */ NULL , // defaults to ggml_nbytes
2023-12-21 20:07:46 +00:00
/* .is_host = */ ggml_backend_cpu_buffer_type_is_host ,
2023-12-07 20:26:54 +00:00
} ,
2024-10-02 23:49:47 +00:00
/* .device = */ ggml_backend_reg_dev_get ( ggml_backend_cpu_reg ( ) , 0 ) ,
2023-12-07 20:26:54 +00:00
/* .context = */ NULL ,
} ;
2023-10-08 17:19:14 +00:00
2023-12-21 20:07:46 +00:00
return & ggml_backend_cpu_buffer_type ;
}
# ifdef GGML_USE_CPU_HBM
// buffer type HBM
# include <hbwmalloc.h>
2024-10-02 23:49:47 +00:00
static const char * ggml_backend_cpu_hbm_buffer_type_get_name ( ggml_backend_buffer_type_t buft ) {
2024-01-12 19:07:38 +00:00
return " CPU_HBM " ;
GGML_UNUSED ( buft ) ;
}
2024-10-02 23:49:47 +00:00
static const char * ggml_backend_cpu_hbm_buffer_get_name ( ggml_backend_buffer_t buf ) {
2024-01-12 19:07:38 +00:00
return " CPU_HBM " ;
GGML_UNUSED ( buf ) ;
}
2024-10-02 23:49:47 +00:00
static void ggml_backend_cpu_hbm_buffer_free_buffer ( ggml_backend_buffer_t buffer ) {
2023-12-21 20:07:46 +00:00
hbw_free ( buffer - > context ) ;
}
2024-10-02 23:49:47 +00:00
static ggml_backend_buffer_t ggml_backend_cpu_hbm_buffer_type_alloc_buffer ( ggml_backend_buffer_type_t buft , size_t size ) {
2023-12-21 20:07:46 +00:00
//void * ptr = hbw_malloc(size);
void * ptr ;
int result = hbw_posix_memalign ( & ptr , ggml_backend_cpu_buffer_type_get_alignment ( buft ) , size ) ;
if ( result ! = 0 ) {
2024-10-11 13:34:45 +00:00
GGML_LOG_ERROR ( " failed to allocate HBM buffer of size %zu \n " , size ) ;
2023-12-21 20:07:46 +00:00
return NULL ;
}
ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr ( ptr , size ) ;
buffer - > buft = buft ;
2024-01-12 19:07:38 +00:00
buffer - > iface . get_name = ggml_backend_cpu_hbm_buffer_get_name ;
2023-12-21 20:07:46 +00:00
buffer - > iface . free_buffer = ggml_backend_cpu_hbm_buffer_free_buffer ;
return buffer ;
}
2024-01-12 19:07:38 +00:00
ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type ( void ) {
2023-12-21 20:07:46 +00:00
static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_hbm = {
/* .iface = */ {
2024-01-12 19:07:38 +00:00
/* .get_name = */ ggml_backend_cpu_hbm_buffer_type_get_name ,
2023-12-21 20:07:46 +00:00
/* .alloc_buffer = */ ggml_backend_cpu_hbm_buffer_type_alloc_buffer ,
/* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment ,
ggml : add Vulkan backend (#2059)
* Vulkan loader code
* Fix matmul kernel, continue implementation
* Continue implementation
* Vulkan memory management
* Vulkan development
* Matmul call
* Add aligned malloc and free for VMA
* Continue implementation
* First matmul success
* GEMM Kernel optimization
* 1D Blocktiling
* 2D Blocktiling
* Write coalescing
* Continue vulkan implementation and optimization
* First FP16 attempt, disabled for now
* Code abstraction, FP16 implementation, fix kernel, add FP16 to FP32 kernel
* Enable device extensions properly, restore fp16 matmul op
* Fix mulmat_f16
* Output FP32 in fp16 matmul shader
* Fix f16_to_f32 kernel
* dequant_q4_0 kernel
* Add VMA library
* Avoid requesting dedicated memory, VMA can decide that by itself
* Add bounds checking to matmul kernels, improve implementation, fix command buffers not freed properly
* add cmake commands
* Add 2d write operation, profiling code
* Fix 2d write
* Fix queue selection for AMD RADV
* Fix trailing whitespace in vk_mem_alloc.h
* Add WIP warp tile mat mul shaders
* Disable glslc optimization
* Disable glslc optimization for CMake
* Optimize warptile matmul shader, replace blocktile with it
* Add split-k optimization for small matrix multiplication
Use semaphores for synchronization instead of fences or waitidle
Rework async write/read for synchronization
* Fix validation errors, improve compatibility with AMD GPUs
* Rework command buffer handling
* Variable matmul kernel using specialization constants
* Fix synchronization on AMD, add barriers for buffer ownership transfer, add debug flag and prints
* Reuse semaphores
* Handle stage flags during command buffer submission properly
* Increase matmul test runs for consistent results
* Fix F32 matmul
* Add vectorized loading and zeropadding for matrix multiplication
* Use pinned memory for f16 preprocessing
* Don't force aligned matmul
* Don't free before queue done
* Replace VMA library with native Vulkan buffer management
* Basic offloading support with mul_f32 and dmmv for q4_0
* Run glslc commands in parallel
* Unroll loops in dmmv shader
* Reduce usage of waitIdle
* Reuse pinned allocation for f16 conversion
* Handle devices with only a single queue
* Fix trailing whitespace in CMakeLists.txt
* Allow parallel execution of kernels, parallelize third and fourth dimension calls
* Add fallback for devices only supporting one DescriptorSet per DescriptorPool
* Move to graph function similar to CUDA implementation
* Use F16 kernel for most things, replace q_f32 with mul_mat_q_f16 function
* Add F32 dmmv shaders
* Batch submissions
* Add .spv to gitignore
* Split off matrix vector multiplication for separate optimization
* Use single command buffer for matrix vector multiplication ops
* Reduce overhead of mul_f32 calls by using a single command buffer
* Add submission batching to mul_f32
* Fix tests
* Add missing barrier
* Add further missing barrier
* Add further ops
* Replace vk::QueueFamilyIgnored with VK_QUEUE_FAMILY_IGNORED to support more Vulkan header versions
* Remove unnecessary cblas link
* Fix descriptor set pre-allocation assert
* Add runtime shader compilation, start transferring shaders to this approach
* Transfer remaining shaders to header and compile on runtime
* Fix fp32 fallback if device doesn't support fp16, add force disable env var GGML_VULKAN_DISABLE_F16
* Add support for q4_1, q5_0, q5_1 and q8_0
* Remove unnecessary scalar layout extension
* Parse graph early to pre-record command buffers
* Add q6_k support
* Add multi-submit for command buffers
* Fix q6_k dequant shader for AMD
* Fix q6_k for GPUs without fp16 support
* Simplify q6_k fp16 fix
* Minor fixes
* Fix wg_denom of m-mulmat shaders
* Add Python-based Vulkan shader generator
* Replace shaderc dependency with precompiled shaders
Fix python script to generate shaders
* Clean up code
* Fix shader generator script Windows compatibility
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
* Close file before deletion
* Fix vulkan shader fp32 name
* Add q2_k and q3_k support
Add validation check to compare shader results to cpu results
* Add q4_k support
* Add q5_k support
* Bake SPIR-V bytecode into the library instead of loading shaders from file
* Switch to signal semaphores for flexibility
Prepare broadcasting support for mul mat
* Finish broadcasting mul mat support for GQA
* Clean up unused functions
Add repeat op
* Add further ops, not yet enabled. Improve semaphore code
* Reduce number of used semaphores by utilizing timelines more properly
* Remove queue information
* Reuse timeline semaphores, allow parallel operation with binary semaphores to work around nvidia driver limitations
* Add Vulkan to llama-bench
* Remove cblas dependency
* Fix matmul k-split bug
* Fix q4_k dmmv K_QUANTS_PER_ITERATION 1 shader
* Add RMS Norm shader, rework op_f32 shader setup, fix matmul bug
* Fix issues with float16 overflows in shaders
* Fix issues with older Vulkan headers on Ubuntu 22.04
* Allow multi-op partial offloading by parsing the graph to preallocate enough between-op buffers
* Implement further ops, rework op_f32 calls, fix bugs
* Finish full offloading support, add last remaining ops, fix bugs, remove redundant code
* Upload generated file ggml-vulkan-shaders.hpp, remove redundant shaders
* Merge upstream changes, fix conflicts, adapt soft_max op
* Fix Python and shader header format
* Free model gpu buffers on exit
* Use single queue per device to simplify code
* Add matmul shader support for running multiple calculations in parallel
* Switch from semaphore-synchronized multiple command buffers per op to single command buffer for multiple ops, whole graph if possible
* Fix missing event cast
* Replace uint64_t(-1) with UINT64_MAX, rename function for clarity
* Fix warning about empty C function parameters
* Fix compiler warnings
* Properly implement Vulkan backend buffer handling
* Fix oversized host staging buffers
* Simplify barrier synchronization calls
* Fix gcc warnings
* Implement max_size for backend buffer types to limit the size of a single allocation
* Use min of maxMemoryAllocationSize and maxBufferSize for device max allocation size
* refactor multi buf
* Disable unsupported ops to fix tests
* Check for maintenance4 support before using it
* Handle devices with only a single queue
* Fix single queue logic
* propagate buffer usage in multi buffers
* Implement rope_neox op
* Cleanup header and other files
* Simplify gpu_extras by removing events and putting staging memcpys into contexts
* Move queue into context
Add not-yet-enabled async backend ops
* Simplify context use, optimize matmul shader for warp size 64 (AMD GCN), fix split_k matmul shader optimization
* Add get_max_size to SYCL backend.
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* llama : fix trailing whitespace
---------
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-01-28 17:03:59 +00:00
/* .get_max_size = */ NULL , // defaults to SIZE_MAX
2023-12-21 20:07:46 +00:00
/* .get_alloc_size = */ NULL , // defaults to ggml_nbytes
/* .is_host = */ ggml_backend_cpu_buffer_type_is_host ,
} ,
/* .context = */ NULL ,
} ;
return & ggml_backend_cpu_buffer_type_hbm ;
2023-10-08 17:19:14 +00:00
}
2023-12-21 20:07:46 +00:00
# endif
2023-10-08 17:19:14 +00:00
2023-12-07 20:26:54 +00:00
struct ggml_backend_cpu_context {
Threadpool: take 2 (#8672)
* Introduce ggml_compute_threadpool
- OpenMP functional: check
- Vanilla ggml functional: Check
- ggml w/threadpool functional: Check
- OpenMP no regression: No glaring problems
- Vanilla ggml no regression: No glaring problems
- ggml w/threadpool no regression: No glaring problems
* Minor fixes
* fixed use after release bug
* fixed a harmless race condition
* Fix Android bulid issue
* fix more race conditions
* fix deadlock for cases where cgraph.n_nodes == 1
and fix --poll case
* threadpool: use cpu_get_num_math to set the default number of threadpool threads
This way we avoid using E-Cores and Hyperthreaded siblings.
* bench: create fresh threadpool for each test
For benchmarking it's better to start a fresh pool for each test with the exact number of threads
needed for that test. Having larger pools is suboptimal (causes more load, etc).
* atomics: always use stdatomics with clang and use relaxed memory order when polling in ggml_barrier
This also removes sched_yield() calls from ggml_barrier() to match OpenMP behavior.
* threadpool: make polling the default to match openmp behavior
All command line args now allow for setting poll to 0 (false).
* threadpool: do not wakeup threads in already paused threadpool
* fix potential race condition in check_for_work
* threadpool: do not create two threadpools if their params are identical
* threadpool: reduce pause/resume/wakeup overhead in common cases
We now start threadpool in paused state only if we have two.
The resume is now implicit (ie new work) which allows for reduced locking and context-switch overhead.
* threadpool: add support for hybrid polling
poll params (--poll, ...) now specify "polling level", i.e. how aggresively we poll before waiting on cond.var.
poll=0 means no polling, 1 means poll for 128K rounds then wait, 2 for 256K rounds, ...
The default value of 50 (ie 50x128K rounds) seems like a decent default across modern platforms.
We can tune this further as things evolve.
* threadpool: reduce the number of barrier required
New work is now indicated with an atomic counter that is incremented for
each new graph that needs to be computed.
This removes the need for extra barrier for clearing the "new_work" and
removes the special case for trivial graphs.
* threadpool: remove special-casing for disposable threadpools
With the efficient hybrid polling there is no need to make disposable pools any different.
This simplifies the overall logic and reduces branching.
Include n_threads in debug print for disposable threadpool.
Declare pause and stop flags as atomic_bool
This doesn't actually generate any memory barriers and simply informs
the thread sanitizer that these flags can be written & read by different
threads without locking.
* threadpool: do not clear barrier counters between graphs computes (fixes race with small graphs)
This fixes the race condition with very small graphs where the main thread happens to
start a new graph while the workers are just about to exit from barriers.
* threadpool: use relaxed order for chunk sync
Full memory barrier is an overkill for this since each thread works on different chunk
* threadpool: remove abort_callback from threadpool state
* threadpool: better naming for thread/cpumask releated functions
* threadpool: consistent use of int type for n_threads params
* threadpool: add support for ggml_threadpool_params_default/init
Also removes the need for explicit mask_specified param.
all-zero cpumask means use default (usually inherited) cpu affinity mask.
* threadpool: move typedef into ggml.h
* threadpool: fix apply_priority() function name
* threadpool: fix swift wrapper errors due to n_threads int type cleanup
* threadpool: enable --cpu-mask and other threadpool related options only if threadpool is enabled
* threadpool: replace checks for compute_thread ret code with proper status check
* threadpool: simplify threadpool init logic and fix main thread affinity application
Most of the init code is now exactly the same between threadpool and openmp.
* threadpool: update threadpool resume/pause function names
* threadpool: enable openmp by default for now
* threadpool: don't forget to free workers state when omp is enabled
* threadpool: avoid updating process priority on the platforms that do not require it
On Windows we need to change overall process priority class in order to set thread priorities,
but on Linux, Mac, etc we do not need to touch the overall process settings.
* threadpool: update calling thread prio and affinity only at start/resume
This avoids extra syscalls for each graph_compute()
* llama-bench: turn threadpool params into vectors, add output headers, etc
* llama-bench: add support for cool off between tests --delay
This helps for long running tests on platforms that are thermally limited (phones, laptops, etc).
--delay (disabled by default) introduces the sleep for N seconds before starting each test.
* threadpool: move process priority setting into the apps (bench and cli)
This avoids changing the overall process priority on Windows for the apps
that use ggml/llama.cpp directy.
* threadpool: move all pause/resume logic into ggml
* threadpool: futher api cleanup and prep for future refactoring
All threadpool related functions and structs use ggml_threadpool prefix.
* threadpool: minor indent fixes
* threadpool: improve setprioty error message
* Update examples/llama-bench/llama-bench.cpp
Co-authored-by: slaren <slarengh@gmail.com>
* threadpool: fix indent in set_threadpool call
* use int32_t for n_thread type in public llama.cpp API
* threadpool: use _new and _free instead of _create and _release
* fix two more public APIs to use int32_t for n_threads
* build: set _GNU_SOURCE for Adroid
---------
Co-authored-by: Max Krasnyansky <quic_maxk@quicinc.com>
Co-authored-by: fmz <quic_fzaghlou@quic.com>
Co-authored-by: Max Krasnyansky <max.krasnyansky@gmail.com>
Co-authored-by: slaren <slarengh@gmail.com>
2024-08-29 23:20:53 +00:00
int n_threads ;
ggml_threadpool_t threadpool ;
2024-10-02 23:49:47 +00:00
uint8_t * work_data ;
Threadpool: take 2 (#8672)
* Introduce ggml_compute_threadpool
- OpenMP functional: check
- Vanilla ggml functional: Check
- ggml w/threadpool functional: Check
- OpenMP no regression: No glaring problems
- Vanilla ggml no regression: No glaring problems
- ggml w/threadpool no regression: No glaring problems
* Minor fixes
* fixed use after release bug
* fixed a harmless race condition
* Fix Android bulid issue
* fix more race conditions
* fix deadlock for cases where cgraph.n_nodes == 1
and fix --poll case
* threadpool: use cpu_get_num_math to set the default number of threadpool threads
This way we avoid using E-Cores and Hyperthreaded siblings.
* bench: create fresh threadpool for each test
For benchmarking it's better to start a fresh pool for each test with the exact number of threads
needed for that test. Having larger pools is suboptimal (causes more load, etc).
* atomics: always use stdatomics with clang and use relaxed memory order when polling in ggml_barrier
This also removes sched_yield() calls from ggml_barrier() to match OpenMP behavior.
* threadpool: make polling the default to match openmp behavior
All command line args now allow for setting poll to 0 (false).
* threadpool: do not wakeup threads in already paused threadpool
* fix potential race condition in check_for_work
* threadpool: do not create two threadpools if their params are identical
* threadpool: reduce pause/resume/wakeup overhead in common cases
We now start threadpool in paused state only if we have two.
The resume is now implicit (ie new work) which allows for reduced locking and context-switch overhead.
* threadpool: add support for hybrid polling
poll params (--poll, ...) now specify "polling level", i.e. how aggresively we poll before waiting on cond.var.
poll=0 means no polling, 1 means poll for 128K rounds then wait, 2 for 256K rounds, ...
The default value of 50 (ie 50x128K rounds) seems like a decent default across modern platforms.
We can tune this further as things evolve.
* threadpool: reduce the number of barrier required
New work is now indicated with an atomic counter that is incremented for
each new graph that needs to be computed.
This removes the need for extra barrier for clearing the "new_work" and
removes the special case for trivial graphs.
* threadpool: remove special-casing for disposable threadpools
With the efficient hybrid polling there is no need to make disposable pools any different.
This simplifies the overall logic and reduces branching.
Include n_threads in debug print for disposable threadpool.
Declare pause and stop flags as atomic_bool
This doesn't actually generate any memory barriers and simply informs
the thread sanitizer that these flags can be written & read by different
threads without locking.
* threadpool: do not clear barrier counters between graphs computes (fixes race with small graphs)
This fixes the race condition with very small graphs where the main thread happens to
start a new graph while the workers are just about to exit from barriers.
* threadpool: use relaxed order for chunk sync
Full memory barrier is an overkill for this since each thread works on different chunk
* threadpool: remove abort_callback from threadpool state
* threadpool: better naming for thread/cpumask releated functions
* threadpool: consistent use of int type for n_threads params
* threadpool: add support for ggml_threadpool_params_default/init
Also removes the need for explicit mask_specified param.
all-zero cpumask means use default (usually inherited) cpu affinity mask.
* threadpool: move typedef into ggml.h
* threadpool: fix apply_priority() function name
* threadpool: fix swift wrapper errors due to n_threads int type cleanup
* threadpool: enable --cpu-mask and other threadpool related options only if threadpool is enabled
* threadpool: replace checks for compute_thread ret code with proper status check
* threadpool: simplify threadpool init logic and fix main thread affinity application
Most of the init code is now exactly the same between threadpool and openmp.
* threadpool: update threadpool resume/pause function names
* threadpool: enable openmp by default for now
* threadpool: don't forget to free workers state when omp is enabled
* threadpool: avoid updating process priority on the platforms that do not require it
On Windows we need to change overall process priority class in order to set thread priorities,
but on Linux, Mac, etc we do not need to touch the overall process settings.
* threadpool: update calling thread prio and affinity only at start/resume
This avoids extra syscalls for each graph_compute()
* llama-bench: turn threadpool params into vectors, add output headers, etc
* llama-bench: add support for cool off between tests --delay
This helps for long running tests on platforms that are thermally limited (phones, laptops, etc).
--delay (disabled by default) introduces the sleep for N seconds before starting each test.
* threadpool: move process priority setting into the apps (bench and cli)
This avoids changing the overall process priority on Windows for the apps
that use ggml/llama.cpp directy.
* threadpool: move all pause/resume logic into ggml
* threadpool: futher api cleanup and prep for future refactoring
All threadpool related functions and structs use ggml_threadpool prefix.
* threadpool: minor indent fixes
* threadpool: improve setprioty error message
* Update examples/llama-bench/llama-bench.cpp
Co-authored-by: slaren <slarengh@gmail.com>
* threadpool: fix indent in set_threadpool call
* use int32_t for n_thread type in public llama.cpp API
* threadpool: use _new and _free instead of _create and _release
* fix two more public APIs to use int32_t for n_threads
* build: set _GNU_SOURCE for Adroid
---------
Co-authored-by: Max Krasnyansky <quic_maxk@quicinc.com>
Co-authored-by: fmz <quic_fzaghlou@quic.com>
Co-authored-by: Max Krasnyansky <max.krasnyansky@gmail.com>
Co-authored-by: slaren <slarengh@gmail.com>
2024-08-29 23:20:53 +00:00
size_t work_size ;
2024-02-09 09:42:27 +00:00
ggml_abort_callback abort_callback ;
void * abort_callback_data ;
2023-12-07 20:26:54 +00:00
} ;
2023-10-08 17:19:14 +00:00
2024-10-02 23:49:47 +00:00
static const char * ggml_backend_cpu_get_name ( ggml_backend_t backend ) {
2023-12-07 20:26:54 +00:00
return " CPU " ;
2023-10-08 17:19:14 +00:00
2023-12-07 20:26:54 +00:00
GGML_UNUSED ( backend ) ;
2023-10-08 17:19:14 +00:00
}
2024-10-02 23:49:47 +00:00
static void ggml_backend_cpu_free ( ggml_backend_t backend ) {
2023-12-07 20:26:54 +00:00
struct ggml_backend_cpu_context * cpu_ctx = ( struct ggml_backend_cpu_context * ) backend - > context ;
2024-10-02 23:49:47 +00:00
delete [ ] cpu_ctx - > work_data ;
delete cpu_ctx ;
delete backend ;
2023-12-07 20:26:54 +00:00
}
2024-10-02 23:49:47 +00:00
static ggml_backend_buffer_type_t ggml_backend_cpu_get_default_buffer_type ( ggml_backend_t backend ) {
2023-12-07 20:26:54 +00:00
return ggml_backend_cpu_buffer_type ( ) ;
2023-10-08 17:19:14 +00:00
2023-12-07 20:26:54 +00:00
GGML_UNUSED ( backend ) ;
2023-10-08 17:19:14 +00:00
}
struct ggml_backend_plan_cpu {
struct ggml_cplan cplan ;
struct ggml_cgraph cgraph ;
} ;
2024-10-02 23:49:47 +00:00
static ggml_backend_graph_plan_t ggml_backend_cpu_graph_plan_create ( ggml_backend_t backend , const struct ggml_cgraph * cgraph ) {
2023-10-08 17:19:14 +00:00
struct ggml_backend_cpu_context * cpu_ctx = ( struct ggml_backend_cpu_context * ) backend - > context ;
2024-10-02 23:49:47 +00:00
struct ggml_backend_plan_cpu * cpu_plan = new ggml_backend_plan_cpu ;
2023-10-08 17:19:14 +00:00
Threadpool: take 2 (#8672)
* Introduce ggml_compute_threadpool
- OpenMP functional: check
- Vanilla ggml functional: Check
- ggml w/threadpool functional: Check
- OpenMP no regression: No glaring problems
- Vanilla ggml no regression: No glaring problems
- ggml w/threadpool no regression: No glaring problems
* Minor fixes
* fixed use after release bug
* fixed a harmless race condition
* Fix Android bulid issue
* fix more race conditions
* fix deadlock for cases where cgraph.n_nodes == 1
and fix --poll case
* threadpool: use cpu_get_num_math to set the default number of threadpool threads
This way we avoid using E-Cores and Hyperthreaded siblings.
* bench: create fresh threadpool for each test
For benchmarking it's better to start a fresh pool for each test with the exact number of threads
needed for that test. Having larger pools is suboptimal (causes more load, etc).
* atomics: always use stdatomics with clang and use relaxed memory order when polling in ggml_barrier
This also removes sched_yield() calls from ggml_barrier() to match OpenMP behavior.
* threadpool: make polling the default to match openmp behavior
All command line args now allow for setting poll to 0 (false).
* threadpool: do not wakeup threads in already paused threadpool
* fix potential race condition in check_for_work
* threadpool: do not create two threadpools if their params are identical
* threadpool: reduce pause/resume/wakeup overhead in common cases
We now start threadpool in paused state only if we have two.
The resume is now implicit (ie new work) which allows for reduced locking and context-switch overhead.
* threadpool: add support for hybrid polling
poll params (--poll, ...) now specify "polling level", i.e. how aggresively we poll before waiting on cond.var.
poll=0 means no polling, 1 means poll for 128K rounds then wait, 2 for 256K rounds, ...
The default value of 50 (ie 50x128K rounds) seems like a decent default across modern platforms.
We can tune this further as things evolve.
* threadpool: reduce the number of barrier required
New work is now indicated with an atomic counter that is incremented for
each new graph that needs to be computed.
This removes the need for extra barrier for clearing the "new_work" and
removes the special case for trivial graphs.
* threadpool: remove special-casing for disposable threadpools
With the efficient hybrid polling there is no need to make disposable pools any different.
This simplifies the overall logic and reduces branching.
Include n_threads in debug print for disposable threadpool.
Declare pause and stop flags as atomic_bool
This doesn't actually generate any memory barriers and simply informs
the thread sanitizer that these flags can be written & read by different
threads without locking.
* threadpool: do not clear barrier counters between graphs computes (fixes race with small graphs)
This fixes the race condition with very small graphs where the main thread happens to
start a new graph while the workers are just about to exit from barriers.
* threadpool: use relaxed order for chunk sync
Full memory barrier is an overkill for this since each thread works on different chunk
* threadpool: remove abort_callback from threadpool state
* threadpool: better naming for thread/cpumask releated functions
* threadpool: consistent use of int type for n_threads params
* threadpool: add support for ggml_threadpool_params_default/init
Also removes the need for explicit mask_specified param.
all-zero cpumask means use default (usually inherited) cpu affinity mask.
* threadpool: move typedef into ggml.h
* threadpool: fix apply_priority() function name
* threadpool: fix swift wrapper errors due to n_threads int type cleanup
* threadpool: enable --cpu-mask and other threadpool related options only if threadpool is enabled
* threadpool: replace checks for compute_thread ret code with proper status check
* threadpool: simplify threadpool init logic and fix main thread affinity application
Most of the init code is now exactly the same between threadpool and openmp.
* threadpool: update threadpool resume/pause function names
* threadpool: enable openmp by default for now
* threadpool: don't forget to free workers state when omp is enabled
* threadpool: avoid updating process priority on the platforms that do not require it
On Windows we need to change overall process priority class in order to set thread priorities,
but on Linux, Mac, etc we do not need to touch the overall process settings.
* threadpool: update calling thread prio and affinity only at start/resume
This avoids extra syscalls for each graph_compute()
* llama-bench: turn threadpool params into vectors, add output headers, etc
* llama-bench: add support for cool off between tests --delay
This helps for long running tests on platforms that are thermally limited (phones, laptops, etc).
--delay (disabled by default) introduces the sleep for N seconds before starting each test.
* threadpool: move process priority setting into the apps (bench and cli)
This avoids changing the overall process priority on Windows for the apps
that use ggml/llama.cpp directy.
* threadpool: move all pause/resume logic into ggml
* threadpool: futher api cleanup and prep for future refactoring
All threadpool related functions and structs use ggml_threadpool prefix.
* threadpool: minor indent fixes
* threadpool: improve setprioty error message
* Update examples/llama-bench/llama-bench.cpp
Co-authored-by: slaren <slarengh@gmail.com>
* threadpool: fix indent in set_threadpool call
* use int32_t for n_thread type in public llama.cpp API
* threadpool: use _new and _free instead of _create and _release
* fix two more public APIs to use int32_t for n_threads
* build: set _GNU_SOURCE for Adroid
---------
Co-authored-by: Max Krasnyansky <quic_maxk@quicinc.com>
Co-authored-by: fmz <quic_fzaghlou@quic.com>
Co-authored-by: Max Krasnyansky <max.krasnyansky@gmail.com>
Co-authored-by: slaren <slarengh@gmail.com>
2024-08-29 23:20:53 +00:00
cpu_plan - > cplan = ggml_graph_plan ( cgraph , cpu_ctx - > n_threads , cpu_ctx - > threadpool ) ;
2023-12-21 20:07:46 +00:00
cpu_plan - > cgraph = * cgraph ; // FIXME: deep copy
2023-10-08 17:19:14 +00:00
if ( cpu_plan - > cplan . work_size > 0 ) {
2024-10-02 23:49:47 +00:00
cpu_plan - > cplan . work_data = new uint8_t [ cpu_plan - > cplan . work_size ] ;
2024-03-18 10:03:04 +00:00
if ( cpu_plan - > cplan . work_data = = NULL ) {
2024-10-02 23:49:47 +00:00
delete cpu_plan ;
2024-03-18 10:03:04 +00:00
return NULL ;
}
2023-10-08 17:19:14 +00:00
}
2024-02-09 09:42:27 +00:00
cpu_plan - > cplan . abort_callback = cpu_ctx - > abort_callback ;
cpu_plan - > cplan . abort_callback_data = cpu_ctx - > abort_callback_data ;
2023-10-08 17:19:14 +00:00
return cpu_plan ;
}
2024-10-02 23:49:47 +00:00
static void ggml_backend_cpu_graph_plan_free ( ggml_backend_t backend , ggml_backend_graph_plan_t plan ) {
2023-10-08 17:19:14 +00:00
struct ggml_backend_plan_cpu * cpu_plan = ( struct ggml_backend_plan_cpu * ) plan ;
2024-10-02 23:49:47 +00:00
delete [ ] cpu_plan - > cplan . work_data ;
delete cpu_plan ;
2023-10-08 17:19:14 +00:00
2023-12-07 20:26:54 +00:00
GGML_UNUSED ( backend ) ;
2023-10-08 17:19:14 +00:00
}
2024-10-02 23:49:47 +00:00
static enum ggml_status ggml_backend_cpu_graph_plan_compute ( ggml_backend_t backend , ggml_backend_graph_plan_t plan ) {
2023-10-08 17:19:14 +00:00
struct ggml_backend_plan_cpu * cpu_plan = ( struct ggml_backend_plan_cpu * ) plan ;
2024-03-04 09:05:42 +00:00
return ggml_graph_compute ( & cpu_plan - > cgraph , & cpu_plan - > cplan ) ;
2023-10-08 17:19:14 +00:00
2023-12-07 20:26:54 +00:00
GGML_UNUSED ( backend ) ;
2023-10-08 17:19:14 +00:00
}
2024-10-02 23:49:47 +00:00
static enum ggml_status ggml_backend_cpu_graph_compute ( ggml_backend_t backend , struct ggml_cgraph * cgraph ) {
2023-10-08 17:19:14 +00:00
struct ggml_backend_cpu_context * cpu_ctx = ( struct ggml_backend_cpu_context * ) backend - > context ;
Threadpool: take 2 (#8672)
* Introduce ggml_compute_threadpool
- OpenMP functional: check
- Vanilla ggml functional: Check
- ggml w/threadpool functional: Check
- OpenMP no regression: No glaring problems
- Vanilla ggml no regression: No glaring problems
- ggml w/threadpool no regression: No glaring problems
* Minor fixes
* fixed use after release bug
* fixed a harmless race condition
* Fix Android bulid issue
* fix more race conditions
* fix deadlock for cases where cgraph.n_nodes == 1
and fix --poll case
* threadpool: use cpu_get_num_math to set the default number of threadpool threads
This way we avoid using E-Cores and Hyperthreaded siblings.
* bench: create fresh threadpool for each test
For benchmarking it's better to start a fresh pool for each test with the exact number of threads
needed for that test. Having larger pools is suboptimal (causes more load, etc).
* atomics: always use stdatomics with clang and use relaxed memory order when polling in ggml_barrier
This also removes sched_yield() calls from ggml_barrier() to match OpenMP behavior.
* threadpool: make polling the default to match openmp behavior
All command line args now allow for setting poll to 0 (false).
* threadpool: do not wakeup threads in already paused threadpool
* fix potential race condition in check_for_work
* threadpool: do not create two threadpools if their params are identical
* threadpool: reduce pause/resume/wakeup overhead in common cases
We now start threadpool in paused state only if we have two.
The resume is now implicit (ie new work) which allows for reduced locking and context-switch overhead.
* threadpool: add support for hybrid polling
poll params (--poll, ...) now specify "polling level", i.e. how aggresively we poll before waiting on cond.var.
poll=0 means no polling, 1 means poll for 128K rounds then wait, 2 for 256K rounds, ...
The default value of 50 (ie 50x128K rounds) seems like a decent default across modern platforms.
We can tune this further as things evolve.
* threadpool: reduce the number of barrier required
New work is now indicated with an atomic counter that is incremented for
each new graph that needs to be computed.
This removes the need for extra barrier for clearing the "new_work" and
removes the special case for trivial graphs.
* threadpool: remove special-casing for disposable threadpools
With the efficient hybrid polling there is no need to make disposable pools any different.
This simplifies the overall logic and reduces branching.
Include n_threads in debug print for disposable threadpool.
Declare pause and stop flags as atomic_bool
This doesn't actually generate any memory barriers and simply informs
the thread sanitizer that these flags can be written & read by different
threads without locking.
* threadpool: do not clear barrier counters between graphs computes (fixes race with small graphs)
This fixes the race condition with very small graphs where the main thread happens to
start a new graph while the workers are just about to exit from barriers.
* threadpool: use relaxed order for chunk sync
Full memory barrier is an overkill for this since each thread works on different chunk
* threadpool: remove abort_callback from threadpool state
* threadpool: better naming for thread/cpumask releated functions
* threadpool: consistent use of int type for n_threads params
* threadpool: add support for ggml_threadpool_params_default/init
Also removes the need for explicit mask_specified param.
all-zero cpumask means use default (usually inherited) cpu affinity mask.
* threadpool: move typedef into ggml.h
* threadpool: fix apply_priority() function name
* threadpool: fix swift wrapper errors due to n_threads int type cleanup
* threadpool: enable --cpu-mask and other threadpool related options only if threadpool is enabled
* threadpool: replace checks for compute_thread ret code with proper status check
* threadpool: simplify threadpool init logic and fix main thread affinity application
Most of the init code is now exactly the same between threadpool and openmp.
* threadpool: update threadpool resume/pause function names
* threadpool: enable openmp by default for now
* threadpool: don't forget to free workers state when omp is enabled
* threadpool: avoid updating process priority on the platforms that do not require it
On Windows we need to change overall process priority class in order to set thread priorities,
but on Linux, Mac, etc we do not need to touch the overall process settings.
* threadpool: update calling thread prio and affinity only at start/resume
This avoids extra syscalls for each graph_compute()
* llama-bench: turn threadpool params into vectors, add output headers, etc
* llama-bench: add support for cool off between tests --delay
This helps for long running tests on platforms that are thermally limited (phones, laptops, etc).
--delay (disabled by default) introduces the sleep for N seconds before starting each test.
* threadpool: move process priority setting into the apps (bench and cli)
This avoids changing the overall process priority on Windows for the apps
that use ggml/llama.cpp directy.
* threadpool: move all pause/resume logic into ggml
* threadpool: futher api cleanup and prep for future refactoring
All threadpool related functions and structs use ggml_threadpool prefix.
* threadpool: minor indent fixes
* threadpool: improve setprioty error message
* Update examples/llama-bench/llama-bench.cpp
Co-authored-by: slaren <slarengh@gmail.com>
* threadpool: fix indent in set_threadpool call
* use int32_t for n_thread type in public llama.cpp API
* threadpool: use _new and _free instead of _create and _release
* fix two more public APIs to use int32_t for n_threads
* build: set _GNU_SOURCE for Adroid
---------
Co-authored-by: Max Krasnyansky <quic_maxk@quicinc.com>
Co-authored-by: fmz <quic_fzaghlou@quic.com>
Co-authored-by: Max Krasnyansky <max.krasnyansky@gmail.com>
Co-authored-by: slaren <slarengh@gmail.com>
2024-08-29 23:20:53 +00:00
struct ggml_cplan cplan = ggml_graph_plan ( cgraph , cpu_ctx - > n_threads , cpu_ctx - > threadpool ) ;
2023-10-08 17:19:14 +00:00
if ( cpu_ctx - > work_size < cplan . work_size ) {
2024-10-02 23:49:47 +00:00
delete [ ] cpu_ctx - > work_data ;
cpu_ctx - > work_data = new uint8_t [ cplan . work_size ] ;
2024-03-13 17:54:21 +00:00
if ( cpu_ctx - > work_data = = NULL ) {
cpu_ctx - > work_size = 0 ;
return GGML_STATUS_ALLOC_FAILED ;
}
2023-10-08 17:19:14 +00:00
cpu_ctx - > work_size = cplan . work_size ;
}
2024-10-02 23:49:47 +00:00
cplan . work_data = ( uint8_t * ) cpu_ctx - > work_data ;
2023-10-08 17:19:14 +00:00
2024-02-09 09:42:27 +00:00
cplan . abort_callback = cpu_ctx - > abort_callback ;
cplan . abort_callback_data = cpu_ctx - > abort_callback_data ;
2024-03-04 09:05:42 +00:00
return ggml_graph_compute ( cgraph , & cplan ) ;
2023-10-08 17:19:14 +00:00
}
2024-10-02 23:49:47 +00:00
static const struct ggml_backend_i ggml_backend_cpu_i = {
/* .get_name = */ ggml_backend_cpu_get_name ,
2023-12-07 20:26:54 +00:00
/* .free = */ ggml_backend_cpu_free ,
/* .get_default_buffer_type = */ ggml_backend_cpu_get_default_buffer_type ,
/* .set_tensor_async = */ NULL ,
/* .get_tensor_async = */ NULL ,
2024-01-12 19:07:38 +00:00
/* .cpy_tensor_async = */ NULL ,
2023-12-07 20:26:54 +00:00
/* .synchronize = */ NULL ,
/* .graph_plan_create = */ ggml_backend_cpu_graph_plan_create ,
/* .graph_plan_free = */ ggml_backend_cpu_graph_plan_free ,
2024-06-13 01:11:35 +00:00
/* .graph_plan_update = */ NULL ,
2023-12-07 20:26:54 +00:00
/* .graph_plan_compute = */ ggml_backend_cpu_graph_plan_compute ,
/* .graph_compute = */ ggml_backend_cpu_graph_compute ,
2024-10-02 23:49:47 +00:00
/* .supports_op = */ NULL ,
/* .supports_buft = */ NULL ,
2024-03-18 10:03:04 +00:00
/* .offload_op = */ NULL ,
2024-03-13 17:54:21 +00:00
/* .event_record = */ NULL ,
/* .event_wait = */ NULL ,
2023-10-08 17:19:14 +00:00
} ;
2024-02-24 16:27:36 +00:00
static ggml_guid_t ggml_backend_cpu_guid ( void ) {
static ggml_guid guid = { 0xaa , 0x67 , 0xc7 , 0x43 , 0x96 , 0xe6 , 0xa3 , 0x8a , 0xe3 , 0xaf , 0xea , 0x92 , 0x36 , 0xbc , 0xfc , 0x89 } ;
return & guid ;
}
2023-10-08 17:19:14 +00:00
ggml_backend_t ggml_backend_cpu_init ( void ) {
2024-10-02 23:49:47 +00:00
struct ggml_backend_cpu_context * ctx = new ggml_backend_cpu_context ;
2024-02-12 07:16:06 +00:00
if ( ctx = = NULL ) {
return NULL ;
}
2023-10-08 17:19:14 +00:00
2024-02-09 09:42:27 +00:00
ctx - > n_threads = GGML_DEFAULT_N_THREADS ;
Threadpool: take 2 (#8672)
* Introduce ggml_compute_threadpool
- OpenMP functional: check
- Vanilla ggml functional: Check
- ggml w/threadpool functional: Check
- OpenMP no regression: No glaring problems
- Vanilla ggml no regression: No glaring problems
- ggml w/threadpool no regression: No glaring problems
* Minor fixes
* fixed use after release bug
* fixed a harmless race condition
* Fix Android bulid issue
* fix more race conditions
* fix deadlock for cases where cgraph.n_nodes == 1
and fix --poll case
* threadpool: use cpu_get_num_math to set the default number of threadpool threads
This way we avoid using E-Cores and Hyperthreaded siblings.
* bench: create fresh threadpool for each test
For benchmarking it's better to start a fresh pool for each test with the exact number of threads
needed for that test. Having larger pools is suboptimal (causes more load, etc).
* atomics: always use stdatomics with clang and use relaxed memory order when polling in ggml_barrier
This also removes sched_yield() calls from ggml_barrier() to match OpenMP behavior.
* threadpool: make polling the default to match openmp behavior
All command line args now allow for setting poll to 0 (false).
* threadpool: do not wakeup threads in already paused threadpool
* fix potential race condition in check_for_work
* threadpool: do not create two threadpools if their params are identical
* threadpool: reduce pause/resume/wakeup overhead in common cases
We now start threadpool in paused state only if we have two.
The resume is now implicit (ie new work) which allows for reduced locking and context-switch overhead.
* threadpool: add support for hybrid polling
poll params (--poll, ...) now specify "polling level", i.e. how aggresively we poll before waiting on cond.var.
poll=0 means no polling, 1 means poll for 128K rounds then wait, 2 for 256K rounds, ...
The default value of 50 (ie 50x128K rounds) seems like a decent default across modern platforms.
We can tune this further as things evolve.
* threadpool: reduce the number of barrier required
New work is now indicated with an atomic counter that is incremented for
each new graph that needs to be computed.
This removes the need for extra barrier for clearing the "new_work" and
removes the special case for trivial graphs.
* threadpool: remove special-casing for disposable threadpools
With the efficient hybrid polling there is no need to make disposable pools any different.
This simplifies the overall logic and reduces branching.
Include n_threads in debug print for disposable threadpool.
Declare pause and stop flags as atomic_bool
This doesn't actually generate any memory barriers and simply informs
the thread sanitizer that these flags can be written & read by different
threads without locking.
* threadpool: do not clear barrier counters between graphs computes (fixes race with small graphs)
This fixes the race condition with very small graphs where the main thread happens to
start a new graph while the workers are just about to exit from barriers.
* threadpool: use relaxed order for chunk sync
Full memory barrier is an overkill for this since each thread works on different chunk
* threadpool: remove abort_callback from threadpool state
* threadpool: better naming for thread/cpumask releated functions
* threadpool: consistent use of int type for n_threads params
* threadpool: add support for ggml_threadpool_params_default/init
Also removes the need for explicit mask_specified param.
all-zero cpumask means use default (usually inherited) cpu affinity mask.
* threadpool: move typedef into ggml.h
* threadpool: fix apply_priority() function name
* threadpool: fix swift wrapper errors due to n_threads int type cleanup
* threadpool: enable --cpu-mask and other threadpool related options only if threadpool is enabled
* threadpool: replace checks for compute_thread ret code with proper status check
* threadpool: simplify threadpool init logic and fix main thread affinity application
Most of the init code is now exactly the same between threadpool and openmp.
* threadpool: update threadpool resume/pause function names
* threadpool: enable openmp by default for now
* threadpool: don't forget to free workers state when omp is enabled
* threadpool: avoid updating process priority on the platforms that do not require it
On Windows we need to change overall process priority class in order to set thread priorities,
but on Linux, Mac, etc we do not need to touch the overall process settings.
* threadpool: update calling thread prio and affinity only at start/resume
This avoids extra syscalls for each graph_compute()
* llama-bench: turn threadpool params into vectors, add output headers, etc
* llama-bench: add support for cool off between tests --delay
This helps for long running tests on platforms that are thermally limited (phones, laptops, etc).
--delay (disabled by default) introduces the sleep for N seconds before starting each test.
* threadpool: move process priority setting into the apps (bench and cli)
This avoids changing the overall process priority on Windows for the apps
that use ggml/llama.cpp directy.
* threadpool: move all pause/resume logic into ggml
* threadpool: futher api cleanup and prep for future refactoring
All threadpool related functions and structs use ggml_threadpool prefix.
* threadpool: minor indent fixes
* threadpool: improve setprioty error message
* Update examples/llama-bench/llama-bench.cpp
Co-authored-by: slaren <slarengh@gmail.com>
* threadpool: fix indent in set_threadpool call
* use int32_t for n_thread type in public llama.cpp API
* threadpool: use _new and _free instead of _create and _release
* fix two more public APIs to use int32_t for n_threads
* build: set _GNU_SOURCE for Adroid
---------
Co-authored-by: Max Krasnyansky <quic_maxk@quicinc.com>
Co-authored-by: fmz <quic_fzaghlou@quic.com>
Co-authored-by: Max Krasnyansky <max.krasnyansky@gmail.com>
Co-authored-by: slaren <slarengh@gmail.com>
2024-08-29 23:20:53 +00:00
ctx - > threadpool = NULL ;
2024-02-09 09:42:27 +00:00
ctx - > work_data = NULL ;
ctx - > work_size = 0 ;
ctx - > abort_callback = NULL ;
ctx - > abort_callback_data = NULL ;
2023-10-08 17:19:14 +00:00
2024-10-02 23:49:47 +00:00
ggml_backend_t cpu_backend = new ggml_backend {
/* .guid = */ ggml_backend_cpu_guid ( ) ,
/* .interface = */ ggml_backend_cpu_i ,
/* .device = */ ggml_backend_reg_dev_get ( ggml_backend_cpu_reg ( ) , 0 ) ,
/* .context = */ ctx ,
} ;
2024-02-12 07:16:06 +00:00
if ( cpu_backend = = NULL ) {
2024-10-02 23:49:47 +00:00
delete ctx ;
2024-02-12 07:16:06 +00:00
return NULL ;
}
2023-10-08 17:19:14 +00:00
return cpu_backend ;
}
2024-10-02 23:49:47 +00:00
bool ggml_backend_is_cpu ( ggml_backend_t backend ) {
2024-02-24 16:27:36 +00:00
return backend ! = NULL & & ggml_guid_matches ( backend - > guid , ggml_backend_cpu_guid ( ) ) ;
2023-10-08 17:19:14 +00:00
}
void ggml_backend_cpu_set_n_threads ( ggml_backend_t backend_cpu , int n_threads ) {
GGML_ASSERT ( ggml_backend_is_cpu ( backend_cpu ) ) ;
struct ggml_backend_cpu_context * ctx = ( struct ggml_backend_cpu_context * ) backend_cpu - > context ;
ctx - > n_threads = n_threads ;
}
Threadpool: take 2 (#8672)
* Introduce ggml_compute_threadpool
- OpenMP functional: check
- Vanilla ggml functional: Check
- ggml w/threadpool functional: Check
- OpenMP no regression: No glaring problems
- Vanilla ggml no regression: No glaring problems
- ggml w/threadpool no regression: No glaring problems
* Minor fixes
* fixed use after release bug
* fixed a harmless race condition
* Fix Android bulid issue
* fix more race conditions
* fix deadlock for cases where cgraph.n_nodes == 1
and fix --poll case
* threadpool: use cpu_get_num_math to set the default number of threadpool threads
This way we avoid using E-Cores and Hyperthreaded siblings.
* bench: create fresh threadpool for each test
For benchmarking it's better to start a fresh pool for each test with the exact number of threads
needed for that test. Having larger pools is suboptimal (causes more load, etc).
* atomics: always use stdatomics with clang and use relaxed memory order when polling in ggml_barrier
This also removes sched_yield() calls from ggml_barrier() to match OpenMP behavior.
* threadpool: make polling the default to match openmp behavior
All command line args now allow for setting poll to 0 (false).
* threadpool: do not wakeup threads in already paused threadpool
* fix potential race condition in check_for_work
* threadpool: do not create two threadpools if their params are identical
* threadpool: reduce pause/resume/wakeup overhead in common cases
We now start threadpool in paused state only if we have two.
The resume is now implicit (ie new work) which allows for reduced locking and context-switch overhead.
* threadpool: add support for hybrid polling
poll params (--poll, ...) now specify "polling level", i.e. how aggresively we poll before waiting on cond.var.
poll=0 means no polling, 1 means poll for 128K rounds then wait, 2 for 256K rounds, ...
The default value of 50 (ie 50x128K rounds) seems like a decent default across modern platforms.
We can tune this further as things evolve.
* threadpool: reduce the number of barrier required
New work is now indicated with an atomic counter that is incremented for
each new graph that needs to be computed.
This removes the need for extra barrier for clearing the "new_work" and
removes the special case for trivial graphs.
* threadpool: remove special-casing for disposable threadpools
With the efficient hybrid polling there is no need to make disposable pools any different.
This simplifies the overall logic and reduces branching.
Include n_threads in debug print for disposable threadpool.
Declare pause and stop flags as atomic_bool
This doesn't actually generate any memory barriers and simply informs
the thread sanitizer that these flags can be written & read by different
threads without locking.
* threadpool: do not clear barrier counters between graphs computes (fixes race with small graphs)
This fixes the race condition with very small graphs where the main thread happens to
start a new graph while the workers are just about to exit from barriers.
* threadpool: use relaxed order for chunk sync
Full memory barrier is an overkill for this since each thread works on different chunk
* threadpool: remove abort_callback from threadpool state
* threadpool: better naming for thread/cpumask releated functions
* threadpool: consistent use of int type for n_threads params
* threadpool: add support for ggml_threadpool_params_default/init
Also removes the need for explicit mask_specified param.
all-zero cpumask means use default (usually inherited) cpu affinity mask.
* threadpool: move typedef into ggml.h
* threadpool: fix apply_priority() function name
* threadpool: fix swift wrapper errors due to n_threads int type cleanup
* threadpool: enable --cpu-mask and other threadpool related options only if threadpool is enabled
* threadpool: replace checks for compute_thread ret code with proper status check
* threadpool: simplify threadpool init logic and fix main thread affinity application
Most of the init code is now exactly the same between threadpool and openmp.
* threadpool: update threadpool resume/pause function names
* threadpool: enable openmp by default for now
* threadpool: don't forget to free workers state when omp is enabled
* threadpool: avoid updating process priority on the platforms that do not require it
On Windows we need to change overall process priority class in order to set thread priorities,
but on Linux, Mac, etc we do not need to touch the overall process settings.
* threadpool: update calling thread prio and affinity only at start/resume
This avoids extra syscalls for each graph_compute()
* llama-bench: turn threadpool params into vectors, add output headers, etc
* llama-bench: add support for cool off between tests --delay
This helps for long running tests on platforms that are thermally limited (phones, laptops, etc).
--delay (disabled by default) introduces the sleep for N seconds before starting each test.
* threadpool: move process priority setting into the apps (bench and cli)
This avoids changing the overall process priority on Windows for the apps
that use ggml/llama.cpp directy.
* threadpool: move all pause/resume logic into ggml
* threadpool: futher api cleanup and prep for future refactoring
All threadpool related functions and structs use ggml_threadpool prefix.
* threadpool: minor indent fixes
* threadpool: improve setprioty error message
* Update examples/llama-bench/llama-bench.cpp
Co-authored-by: slaren <slarengh@gmail.com>
* threadpool: fix indent in set_threadpool call
* use int32_t for n_thread type in public llama.cpp API
* threadpool: use _new and _free instead of _create and _release
* fix two more public APIs to use int32_t for n_threads
* build: set _GNU_SOURCE for Adroid
---------
Co-authored-by: Max Krasnyansky <quic_maxk@quicinc.com>
Co-authored-by: fmz <quic_fzaghlou@quic.com>
Co-authored-by: Max Krasnyansky <max.krasnyansky@gmail.com>
Co-authored-by: slaren <slarengh@gmail.com>
2024-08-29 23:20:53 +00:00
void ggml_backend_cpu_set_threadpool ( ggml_backend_t backend_cpu , ggml_threadpool_t threadpool ) {
GGML_ASSERT ( ggml_backend_is_cpu ( backend_cpu ) ) ;
struct ggml_backend_cpu_context * ctx = ( struct ggml_backend_cpu_context * ) backend_cpu - > context ;
if ( ctx - > threadpool & & ctx - > threadpool ! = threadpool ) {
// already had a different threadpool, pause/suspend it before switching
ggml_threadpool_pause ( ctx - > threadpool ) ;
}
ctx - > threadpool = threadpool ;
}
2024-02-09 09:42:27 +00:00
void ggml_backend_cpu_set_abort_callback ( ggml_backend_t backend_cpu , ggml_abort_callback abort_callback , void * abort_callback_data ) {
GGML_ASSERT ( ggml_backend_is_cpu ( backend_cpu ) ) ;
struct ggml_backend_cpu_context * ctx = ( struct ggml_backend_cpu_context * ) backend_cpu - > context ;
ctx - > abort_callback = abort_callback ;
ctx - > abort_callback_data = abort_callback_data ;
}
2024-10-02 23:49:47 +00:00
ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr ( void * ptr , size_t size ) {
2024-02-12 07:16:06 +00:00
GGML_ASSERT ( ( uintptr_t ) ptr % TENSOR_ALIGNMENT = = 0 & & " buffer pointer must be aligned " ) ;
2024-10-02 23:49:47 +00:00
return ggml_backend_buffer_init ( ggml_backend_cpu_buffer_type ( ) , ggml_backend_cpu_buffer_from_ptr_i , ptr , size ) ;
2023-12-07 20:26:54 +00:00
}
2024-10-02 23:49:47 +00:00
////////////////////////
2024-10-03 15:39:18 +00:00
struct ggml_backend_cpu_device_context {
std : : string description = " CPU " ;
ggml_backend_cpu_device_context ( ) {
# ifdef __APPLE__
size_t len = 0 ;
if ( ! sysctlbyname ( " machdep.cpu.brand_string " , NULL , & len , NULL , 0 ) ) {
description . resize ( len ) ;
sysctlbyname ( " machdep.cpu.brand_string " , & description [ 0 ] , & len , NULL , 0 ) ; // NOLINT
}
# elif defined(__linux__)
FILE * f = fopen ( " /proc/cpuinfo " , " r " ) ;
if ( f ) {
char buf [ 1024 ] ;
while ( fgets ( buf , sizeof ( buf ) , f ) ) {
if ( strncmp ( buf , " model name " , 10 ) = = 0 ) {
char * p = strchr ( buf , ' : ' ) ;
if ( p ) {
p + + ;
while ( std : : isspace ( * p ) ) {
p + + ;
}
while ( std : : isspace ( p [ strlen ( p ) - 1 ] ) ) {
p [ strlen ( p ) - 1 ] = ' \0 ' ;
}
description = p ;
break ;
}
}
}
fclose ( f ) ;
}
# elif defined(_WIN32)
HKEY hKey ;
if ( RegOpenKeyEx ( HKEY_LOCAL_MACHINE ,
TEXT ( " HARDWARE \\ DESCRIPTION \\ System \\ CentralProcessor \\ 0 " ) ,
0 ,
KEY_READ ,
& hKey ) = = ERROR_SUCCESS ) {
DWORD cpu_brand_size = 0 ;
if ( RegQueryValueExA ( hKey ,
TEXT ( " ProcessorNameString " ) ,
NULL ,
NULL ,
NULL ,
& cpu_brand_size ) = = ERROR_SUCCESS ) {
description . resize ( cpu_brand_size ) ;
if ( RegQueryValueExA ( hKey ,
TEXT ( " ProcessorNameString " ) ,
NULL ,
NULL ,
( LPBYTE ) & description [ 0 ] , // NOLINT
& cpu_brand_size ) = = ERROR_SUCCESS ) {
if ( description . find ( ' \0 ' ) ! = std : : string : : npos ) {
description . resize ( description . find ( ' \0 ' ) ) ;
}
}
}
RegCloseKey ( hKey ) ;
}
# endif
}
} ;
2024-10-02 23:49:47 +00:00
static const char * ggml_backend_cpu_device_get_name ( ggml_backend_dev_t dev ) {
return " CPU " ;
GGML_UNUSED ( dev ) ;
}
static const char * ggml_backend_cpu_device_get_description ( ggml_backend_dev_t dev ) {
2024-10-03 15:39:18 +00:00
struct ggml_backend_cpu_device_context * ctx = ( struct ggml_backend_cpu_device_context * ) dev - > context ;
2024-10-02 23:49:47 +00:00
2024-10-03 15:39:18 +00:00
return ctx - > description . c_str ( ) ;
2024-10-02 23:49:47 +00:00
}
static void ggml_backend_cpu_device_get_memory ( ggml_backend_dev_t dev , size_t * free , size_t * total ) {
// TODO
* free = 0 ;
* total = 0 ;
GGML_UNUSED ( dev ) ;
}
static enum ggml_backend_dev_type ggml_backend_cpu_device_get_type ( ggml_backend_dev_t dev ) {
return GGML_BACKEND_DEVICE_TYPE_CPU_FULL ;
GGML_UNUSED ( dev ) ;
}
static void ggml_backend_cpu_device_get_props ( ggml_backend_dev_t dev , struct ggml_backend_dev_props * props ) {
props - > name = ggml_backend_cpu_device_get_name ( dev ) ;
props - > description = ggml_backend_cpu_device_get_description ( dev ) ;
props - > type = ggml_backend_cpu_device_get_type ( dev ) ;
ggml_backend_cpu_device_get_memory ( dev , & props - > memory_free , & props - > memory_total ) ;
props - > caps = {
2024-10-07 15:27:51 +00:00
/* .async = */ false ,
/* .host_buffer = */ false ,
/* .buffer_from_host_ptr = */ true ,
/* .events = */ false ,
2024-10-02 23:49:47 +00:00
} ;
}
static ggml_backend_t ggml_backend_cpu_device_init ( ggml_backend_dev_t dev , const char * params ) {
2023-12-07 20:26:54 +00:00
return ggml_backend_cpu_init ( ) ;
2024-10-02 23:49:47 +00:00
GGML_UNUSED ( dev ) ;
2023-12-07 20:26:54 +00:00
GGML_UNUSED ( params ) ;
2024-10-02 23:49:47 +00:00
}
static ggml_backend_buffer_type_t ggml_backend_cpu_device_get_buffer_type ( ggml_backend_dev_t dev ) {
return ggml_backend_cpu_buffer_type ( ) ;
GGML_UNUSED ( dev ) ;
}
static ggml_backend_buffer_t ggml_backend_cpu_device_buffer_from_ptr ( ggml_backend_dev_t dev , void * ptr , size_t size , size_t max_tensor_size ) {
return ggml_backend_cpu_buffer_from_ptr ( ptr , size ) ;
GGML_UNUSED ( dev ) ;
GGML_UNUSED ( max_tensor_size ) ;
}
static bool ggml_backend_cpu_device_supports_op ( ggml_backend_dev_t dev , const struct ggml_tensor * op ) {
switch ( op - > op ) {
case GGML_OP_CPY :
return
op - > type ! = GGML_TYPE_IQ2_XXS & &
op - > type ! = GGML_TYPE_IQ2_XS & &
op - > type ! = GGML_TYPE_IQ1_S & &
op - > type ! = GGML_TYPE_IQ1_M ; // missing type_traits.from_float
case GGML_OP_MUL_MAT :
2024-10-08 12:21:43 +00:00
return op - > src [ 1 ] - > type = = GGML_TYPE_F32 | | op - > src [ 1 ] - > type = = ggml_get_type_traits ( op - > src [ 0 ] - > type ) - > vec_dot_type ;
2024-10-02 23:49:47 +00:00
case GGML_OP_ROPE_BACK :
return op - > src [ 2 ] = = NULL & & ( op - > op_params [ 2 ] & 4 ) = = 0 ;
case GGML_OP_IM2COL_BACK :
return op - > src [ 0 ] - > type = = GGML_TYPE_F32 & & op - > src [ 1 ] - > type = = GGML_TYPE_F32 ;
case GGML_OP_OUT_PROD :
return ( op - > src [ 0 ] - > type = = GGML_TYPE_F32 | | ggml_is_quantized ( op - > src [ 0 ] - > type ) ) & & op - > src [ 1 ] - > type = = GGML_TYPE_F32 ;
default :
return true ;
}
GGML_UNUSED ( dev ) ;
}
static bool ggml_backend_cpu_device_supports_buft ( ggml_backend_dev_t dev , ggml_backend_buffer_type_t buft ) {
return ggml_backend_buft_is_host ( buft ) ;
GGML_UNUSED ( dev ) ;
}
static const struct ggml_backend_device_i ggml_backend_cpu_device_i = {
/* .get_name = */ ggml_backend_cpu_device_get_name ,
/* .get_description = */ ggml_backend_cpu_device_get_description ,
/* .get_memory = */ ggml_backend_cpu_device_get_memory ,
/* .get_type = */ ggml_backend_cpu_device_get_type ,
/* .get_props = */ ggml_backend_cpu_device_get_props ,
/* .init_backend = */ ggml_backend_cpu_device_init ,
/* .get_buffer_type = */ ggml_backend_cpu_device_get_buffer_type ,
/* .get_host_buffer_type = */ NULL ,
/* .buffer_from_host_ptr = */ ggml_backend_cpu_device_buffer_from_ptr ,
/* .supports_op = */ ggml_backend_cpu_device_supports_op ,
/* .supports_buft = */ ggml_backend_cpu_device_supports_buft ,
/* .offload_op = */ NULL ,
/* .event_new = */ NULL ,
/* .event_free = */ NULL ,
/* .event_synchronize = */ NULL ,
} ;
////////////////////////
static const char * ggml_backend_cpu_reg_get_name ( ggml_backend_reg_t reg ) {
return " CPU " ;
GGML_UNUSED ( reg ) ;
}
static size_t ggml_backend_cpu_reg_get_device_count ( ggml_backend_reg_t reg ) {
return 1 ;
GGML_UNUSED ( reg ) ;
}
static ggml_backend_dev_t ggml_backend_cpu_reg_get_device ( ggml_backend_reg_t reg , size_t index ) {
GGML_ASSERT ( index = = 0 ) ;
2024-10-03 15:39:18 +00:00
static ggml_backend_cpu_device_context ctx ;
2024-10-02 23:49:47 +00:00
static ggml_backend_device ggml_backend_cpu_device = {
/* .iface = */ ggml_backend_cpu_device_i ,
/* .reg = */ reg ,
2024-10-03 15:39:18 +00:00
/* .context = */ & ctx ,
2024-10-02 23:49:47 +00:00
} ;
return & ggml_backend_cpu_device ;
2024-10-07 19:55:08 +00:00
}
static void * ggml_backend_cpu_get_proc_address ( ggml_backend_reg_t reg , const char * name ) {
if ( strcmp ( name , " ggml_backend_set_n_threads " ) = = 0 ) {
return ( void * ) ggml_backend_cpu_set_n_threads ;
}
return NULL ;
2024-10-02 23:49:47 +00:00
GGML_UNUSED ( reg ) ;
}
static const struct ggml_backend_reg_i ggml_backend_cpu_reg_i = {
/* .get_name = */ ggml_backend_cpu_reg_get_name ,
/* .get_device_count = */ ggml_backend_cpu_reg_get_device_count ,
/* .get_device = */ ggml_backend_cpu_reg_get_device ,
2024-10-07 19:55:08 +00:00
/* .get_proc_address = */ ggml_backend_cpu_get_proc_address ,
2024-10-02 23:49:47 +00:00
} ;
ggml_backend_reg_t ggml_backend_cpu_reg ( void ) {
static struct ggml_backend_reg ggml_backend_cpu_reg = {
/* .iface = */ ggml_backend_cpu_reg_i ,
/* .context = */ NULL ,
} ;
return & ggml_backend_cpu_reg ;
2023-10-08 17:19:14 +00:00
}
2023-11-13 12:16:23 +00:00
ggml : add Vulkan backend (#2059)
* Vulkan loader code
* Fix matmul kernel, continue implementation
* Continue implementation
* Vulkan memory management
* Vulkan development
* Matmul call
* Add aligned malloc and free for VMA
* Continue implementation
* First matmul success
* GEMM Kernel optimization
* 1D Blocktiling
* 2D Blocktiling
* Write coalescing
* Continue vulkan implementation and optimization
* First FP16 attempt, disabled for now
* Code abstraction, FP16 implementation, fix kernel, add FP16 to FP32 kernel
* Enable device extensions properly, restore fp16 matmul op
* Fix mulmat_f16
* Output FP32 in fp16 matmul shader
* Fix f16_to_f32 kernel
* dequant_q4_0 kernel
* Add VMA library
* Avoid requesting dedicated memory, VMA can decide that by itself
* Add bounds checking to matmul kernels, improve implementation, fix command buffers not freed properly
* add cmake commands
* Add 2d write operation, profiling code
* Fix 2d write
* Fix queue selection for AMD RADV
* Fix trailing whitespace in vk_mem_alloc.h
* Add WIP warp tile mat mul shaders
* Disable glslc optimization
* Disable glslc optimization for CMake
* Optimize warptile matmul shader, replace blocktile with it
* Add split-k optimization for small matrix multiplication
Use semaphores for synchronization instead of fences or waitidle
Rework async write/read for synchronization
* Fix validation errors, improve compatibility with AMD GPUs
* Rework command buffer handling
* Variable matmul kernel using specialization constants
* Fix synchronization on AMD, add barriers for buffer ownership transfer, add debug flag and prints
* Reuse semaphores
* Handle stage flags during command buffer submission properly
* Increase matmul test runs for consistent results
* Fix F32 matmul
* Add vectorized loading and zeropadding for matrix multiplication
* Use pinned memory for f16 preprocessing
* Don't force aligned matmul
* Don't free before queue done
* Replace VMA library with native Vulkan buffer management
* Basic offloading support with mul_f32 and dmmv for q4_0
* Run glslc commands in parallel
* Unroll loops in dmmv shader
* Reduce usage of waitIdle
* Reuse pinned allocation for f16 conversion
* Handle devices with only a single queue
* Fix trailing whitespace in CMakeLists.txt
* Allow parallel execution of kernels, parallelize third and fourth dimension calls
* Add fallback for devices only supporting one DescriptorSet per DescriptorPool
* Move to graph function similar to CUDA implementation
* Use F16 kernel for most things, replace q_f32 with mul_mat_q_f16 function
* Add F32 dmmv shaders
* Batch submissions
* Add .spv to gitignore
* Split off matrix vector multiplication for separate optimization
* Use single command buffer for matrix vector multiplication ops
* Reduce overhead of mul_f32 calls by using a single command buffer
* Add submission batching to mul_f32
* Fix tests
* Add missing barrier
* Add further missing barrier
* Add further ops
* Replace vk::QueueFamilyIgnored with VK_QUEUE_FAMILY_IGNORED to support more Vulkan header versions
* Remove unnecessary cblas link
* Fix descriptor set pre-allocation assert
* Add runtime shader compilation, start transferring shaders to this approach
* Transfer remaining shaders to header and compile on runtime
* Fix fp32 fallback if device doesn't support fp16, add force disable env var GGML_VULKAN_DISABLE_F16
* Add support for q4_1, q5_0, q5_1 and q8_0
* Remove unnecessary scalar layout extension
* Parse graph early to pre-record command buffers
* Add q6_k support
* Add multi-submit for command buffers
* Fix q6_k dequant shader for AMD
* Fix q6_k for GPUs without fp16 support
* Simplify q6_k fp16 fix
* Minor fixes
* Fix wg_denom of m-mulmat shaders
* Add Python-based Vulkan shader generator
* Replace shaderc dependency with precompiled shaders
Fix python script to generate shaders
* Clean up code
* Fix shader generator script Windows compatibility
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
* Close file before deletion
* Fix vulkan shader fp32 name
* Add q2_k and q3_k support
Add validation check to compare shader results to cpu results
* Add q4_k support
* Add q5_k support
* Bake SPIR-V bytecode into the library instead of loading shaders from file
* Switch to signal semaphores for flexibility
Prepare broadcasting support for mul mat
* Finish broadcasting mul mat support for GQA
* Clean up unused functions
Add repeat op
* Add further ops, not yet enabled. Improve semaphore code
* Reduce number of used semaphores by utilizing timelines more properly
* Remove queue information
* Reuse timeline semaphores, allow parallel operation with binary semaphores to work around nvidia driver limitations
* Add Vulkan to llama-bench
* Remove cblas dependency
* Fix matmul k-split bug
* Fix q4_k dmmv K_QUANTS_PER_ITERATION 1 shader
* Add RMS Norm shader, rework op_f32 shader setup, fix matmul bug
* Fix issues with float16 overflows in shaders
* Fix issues with older Vulkan headers on Ubuntu 22.04
* Allow multi-op partial offloading by parsing the graph to preallocate enough between-op buffers
* Implement further ops, rework op_f32 calls, fix bugs
* Finish full offloading support, add last remaining ops, fix bugs, remove redundant code
* Upload generated file ggml-vulkan-shaders.hpp, remove redundant shaders
* Merge upstream changes, fix conflicts, adapt soft_max op
* Fix Python and shader header format
* Free model gpu buffers on exit
* Use single queue per device to simplify code
* Add matmul shader support for running multiple calculations in parallel
* Switch from semaphore-synchronized multiple command buffers per op to single command buffer for multiple ops, whole graph if possible
* Fix missing event cast
* Replace uint64_t(-1) with UINT64_MAX, rename function for clarity
* Fix warning about empty C function parameters
* Fix compiler warnings
* Properly implement Vulkan backend buffer handling
* Fix oversized host staging buffers
* Simplify barrier synchronization calls
* Fix gcc warnings
* Implement max_size for backend buffer types to limit the size of a single allocation
* Use min of maxMemoryAllocationSize and maxBufferSize for device max allocation size
* refactor multi buf
* Disable unsupported ops to fix tests
* Check for maintenance4 support before using it
* Handle devices with only a single queue
* Fix single queue logic
* propagate buffer usage in multi buffers
* Implement rope_neox op
* Cleanup header and other files
* Simplify gpu_extras by removing events and putting staging memcpys into contexts
* Move queue into context
Add not-yet-enabled async backend ops
* Simplify context use, optimize matmul shader for warp size 64 (AMD GCN), fix split_k matmul shader optimization
* Add get_max_size to SYCL backend.
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* llama : fix trailing whitespace
---------
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-01-28 17:03:59 +00:00
// multi-buffer buffer
struct ggml_backend_multi_buffer_context {
ggml_backend_buffer_t * buffers ;
size_t n_buffers ;
} ;
2024-10-02 23:49:47 +00:00
static const char * ggml_backend_multi_buffer_get_name ( ggml_backend_buffer_t buffer ) {
ggml_backend_multi_buffer_context * ctx = ( ggml_backend_multi_buffer_context * ) buffer - > context ;
ggml : add Vulkan backend (#2059)
* Vulkan loader code
* Fix matmul kernel, continue implementation
* Continue implementation
* Vulkan memory management
* Vulkan development
* Matmul call
* Add aligned malloc and free for VMA
* Continue implementation
* First matmul success
* GEMM Kernel optimization
* 1D Blocktiling
* 2D Blocktiling
* Write coalescing
* Continue vulkan implementation and optimization
* First FP16 attempt, disabled for now
* Code abstraction, FP16 implementation, fix kernel, add FP16 to FP32 kernel
* Enable device extensions properly, restore fp16 matmul op
* Fix mulmat_f16
* Output FP32 in fp16 matmul shader
* Fix f16_to_f32 kernel
* dequant_q4_0 kernel
* Add VMA library
* Avoid requesting dedicated memory, VMA can decide that by itself
* Add bounds checking to matmul kernels, improve implementation, fix command buffers not freed properly
* add cmake commands
* Add 2d write operation, profiling code
* Fix 2d write
* Fix queue selection for AMD RADV
* Fix trailing whitespace in vk_mem_alloc.h
* Add WIP warp tile mat mul shaders
* Disable glslc optimization
* Disable glslc optimization for CMake
* Optimize warptile matmul shader, replace blocktile with it
* Add split-k optimization for small matrix multiplication
Use semaphores for synchronization instead of fences or waitidle
Rework async write/read for synchronization
* Fix validation errors, improve compatibility with AMD GPUs
* Rework command buffer handling
* Variable matmul kernel using specialization constants
* Fix synchronization on AMD, add barriers for buffer ownership transfer, add debug flag and prints
* Reuse semaphores
* Handle stage flags during command buffer submission properly
* Increase matmul test runs for consistent results
* Fix F32 matmul
* Add vectorized loading and zeropadding for matrix multiplication
* Use pinned memory for f16 preprocessing
* Don't force aligned matmul
* Don't free before queue done
* Replace VMA library with native Vulkan buffer management
* Basic offloading support with mul_f32 and dmmv for q4_0
* Run glslc commands in parallel
* Unroll loops in dmmv shader
* Reduce usage of waitIdle
* Reuse pinned allocation for f16 conversion
* Handle devices with only a single queue
* Fix trailing whitespace in CMakeLists.txt
* Allow parallel execution of kernels, parallelize third and fourth dimension calls
* Add fallback for devices only supporting one DescriptorSet per DescriptorPool
* Move to graph function similar to CUDA implementation
* Use F16 kernel for most things, replace q_f32 with mul_mat_q_f16 function
* Add F32 dmmv shaders
* Batch submissions
* Add .spv to gitignore
* Split off matrix vector multiplication for separate optimization
* Use single command buffer for matrix vector multiplication ops
* Reduce overhead of mul_f32 calls by using a single command buffer
* Add submission batching to mul_f32
* Fix tests
* Add missing barrier
* Add further missing barrier
* Add further ops
* Replace vk::QueueFamilyIgnored with VK_QUEUE_FAMILY_IGNORED to support more Vulkan header versions
* Remove unnecessary cblas link
* Fix descriptor set pre-allocation assert
* Add runtime shader compilation, start transferring shaders to this approach
* Transfer remaining shaders to header and compile on runtime
* Fix fp32 fallback if device doesn't support fp16, add force disable env var GGML_VULKAN_DISABLE_F16
* Add support for q4_1, q5_0, q5_1 and q8_0
* Remove unnecessary scalar layout extension
* Parse graph early to pre-record command buffers
* Add q6_k support
* Add multi-submit for command buffers
* Fix q6_k dequant shader for AMD
* Fix q6_k for GPUs without fp16 support
* Simplify q6_k fp16 fix
* Minor fixes
* Fix wg_denom of m-mulmat shaders
* Add Python-based Vulkan shader generator
* Replace shaderc dependency with precompiled shaders
Fix python script to generate shaders
* Clean up code
* Fix shader generator script Windows compatibility
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
* Close file before deletion
* Fix vulkan shader fp32 name
* Add q2_k and q3_k support
Add validation check to compare shader results to cpu results
* Add q4_k support
* Add q5_k support
* Bake SPIR-V bytecode into the library instead of loading shaders from file
* Switch to signal semaphores for flexibility
Prepare broadcasting support for mul mat
* Finish broadcasting mul mat support for GQA
* Clean up unused functions
Add repeat op
* Add further ops, not yet enabled. Improve semaphore code
* Reduce number of used semaphores by utilizing timelines more properly
* Remove queue information
* Reuse timeline semaphores, allow parallel operation with binary semaphores to work around nvidia driver limitations
* Add Vulkan to llama-bench
* Remove cblas dependency
* Fix matmul k-split bug
* Fix q4_k dmmv K_QUANTS_PER_ITERATION 1 shader
* Add RMS Norm shader, rework op_f32 shader setup, fix matmul bug
* Fix issues with float16 overflows in shaders
* Fix issues with older Vulkan headers on Ubuntu 22.04
* Allow multi-op partial offloading by parsing the graph to preallocate enough between-op buffers
* Implement further ops, rework op_f32 calls, fix bugs
* Finish full offloading support, add last remaining ops, fix bugs, remove redundant code
* Upload generated file ggml-vulkan-shaders.hpp, remove redundant shaders
* Merge upstream changes, fix conflicts, adapt soft_max op
* Fix Python and shader header format
* Free model gpu buffers on exit
* Use single queue per device to simplify code
* Add matmul shader support for running multiple calculations in parallel
* Switch from semaphore-synchronized multiple command buffers per op to single command buffer for multiple ops, whole graph if possible
* Fix missing event cast
* Replace uint64_t(-1) with UINT64_MAX, rename function for clarity
* Fix warning about empty C function parameters
* Fix compiler warnings
* Properly implement Vulkan backend buffer handling
* Fix oversized host staging buffers
* Simplify barrier synchronization calls
* Fix gcc warnings
* Implement max_size for backend buffer types to limit the size of a single allocation
* Use min of maxMemoryAllocationSize and maxBufferSize for device max allocation size
* refactor multi buf
* Disable unsupported ops to fix tests
* Check for maintenance4 support before using it
* Handle devices with only a single queue
* Fix single queue logic
* propagate buffer usage in multi buffers
* Implement rope_neox op
* Cleanup header and other files
* Simplify gpu_extras by removing events and putting staging memcpys into contexts
* Move queue into context
Add not-yet-enabled async backend ops
* Simplify context use, optimize matmul shader for warp size 64 (AMD GCN), fix split_k matmul shader optimization
* Add get_max_size to SYCL backend.
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* llama : fix trailing whitespace
---------
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-01-28 17:03:59 +00:00
return ctx - > buffers [ 0 ] - > iface . get_name ( ctx - > buffers [ 0 ] ) ;
}
2024-10-02 23:49:47 +00:00
static void ggml_backend_multi_buffer_free_buffer ( ggml_backend_buffer_t buffer ) {
ggml_backend_multi_buffer_context * ctx = ( ggml_backend_multi_buffer_context * ) buffer - > context ;
ggml : add Vulkan backend (#2059)
* Vulkan loader code
* Fix matmul kernel, continue implementation
* Continue implementation
* Vulkan memory management
* Vulkan development
* Matmul call
* Add aligned malloc and free for VMA
* Continue implementation
* First matmul success
* GEMM Kernel optimization
* 1D Blocktiling
* 2D Blocktiling
* Write coalescing
* Continue vulkan implementation and optimization
* First FP16 attempt, disabled for now
* Code abstraction, FP16 implementation, fix kernel, add FP16 to FP32 kernel
* Enable device extensions properly, restore fp16 matmul op
* Fix mulmat_f16
* Output FP32 in fp16 matmul shader
* Fix f16_to_f32 kernel
* dequant_q4_0 kernel
* Add VMA library
* Avoid requesting dedicated memory, VMA can decide that by itself
* Add bounds checking to matmul kernels, improve implementation, fix command buffers not freed properly
* add cmake commands
* Add 2d write operation, profiling code
* Fix 2d write
* Fix queue selection for AMD RADV
* Fix trailing whitespace in vk_mem_alloc.h
* Add WIP warp tile mat mul shaders
* Disable glslc optimization
* Disable glslc optimization for CMake
* Optimize warptile matmul shader, replace blocktile with it
* Add split-k optimization for small matrix multiplication
Use semaphores for synchronization instead of fences or waitidle
Rework async write/read for synchronization
* Fix validation errors, improve compatibility with AMD GPUs
* Rework command buffer handling
* Variable matmul kernel using specialization constants
* Fix synchronization on AMD, add barriers for buffer ownership transfer, add debug flag and prints
* Reuse semaphores
* Handle stage flags during command buffer submission properly
* Increase matmul test runs for consistent results
* Fix F32 matmul
* Add vectorized loading and zeropadding for matrix multiplication
* Use pinned memory for f16 preprocessing
* Don't force aligned matmul
* Don't free before queue done
* Replace VMA library with native Vulkan buffer management
* Basic offloading support with mul_f32 and dmmv for q4_0
* Run glslc commands in parallel
* Unroll loops in dmmv shader
* Reduce usage of waitIdle
* Reuse pinned allocation for f16 conversion
* Handle devices with only a single queue
* Fix trailing whitespace in CMakeLists.txt
* Allow parallel execution of kernels, parallelize third and fourth dimension calls
* Add fallback for devices only supporting one DescriptorSet per DescriptorPool
* Move to graph function similar to CUDA implementation
* Use F16 kernel for most things, replace q_f32 with mul_mat_q_f16 function
* Add F32 dmmv shaders
* Batch submissions
* Add .spv to gitignore
* Split off matrix vector multiplication for separate optimization
* Use single command buffer for matrix vector multiplication ops
* Reduce overhead of mul_f32 calls by using a single command buffer
* Add submission batching to mul_f32
* Fix tests
* Add missing barrier
* Add further missing barrier
* Add further ops
* Replace vk::QueueFamilyIgnored with VK_QUEUE_FAMILY_IGNORED to support more Vulkan header versions
* Remove unnecessary cblas link
* Fix descriptor set pre-allocation assert
* Add runtime shader compilation, start transferring shaders to this approach
* Transfer remaining shaders to header and compile on runtime
* Fix fp32 fallback if device doesn't support fp16, add force disable env var GGML_VULKAN_DISABLE_F16
* Add support for q4_1, q5_0, q5_1 and q8_0
* Remove unnecessary scalar layout extension
* Parse graph early to pre-record command buffers
* Add q6_k support
* Add multi-submit for command buffers
* Fix q6_k dequant shader for AMD
* Fix q6_k for GPUs without fp16 support
* Simplify q6_k fp16 fix
* Minor fixes
* Fix wg_denom of m-mulmat shaders
* Add Python-based Vulkan shader generator
* Replace shaderc dependency with precompiled shaders
Fix python script to generate shaders
* Clean up code
* Fix shader generator script Windows compatibility
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
* Close file before deletion
* Fix vulkan shader fp32 name
* Add q2_k and q3_k support
Add validation check to compare shader results to cpu results
* Add q4_k support
* Add q5_k support
* Bake SPIR-V bytecode into the library instead of loading shaders from file
* Switch to signal semaphores for flexibility
Prepare broadcasting support for mul mat
* Finish broadcasting mul mat support for GQA
* Clean up unused functions
Add repeat op
* Add further ops, not yet enabled. Improve semaphore code
* Reduce number of used semaphores by utilizing timelines more properly
* Remove queue information
* Reuse timeline semaphores, allow parallel operation with binary semaphores to work around nvidia driver limitations
* Add Vulkan to llama-bench
* Remove cblas dependency
* Fix matmul k-split bug
* Fix q4_k dmmv K_QUANTS_PER_ITERATION 1 shader
* Add RMS Norm shader, rework op_f32 shader setup, fix matmul bug
* Fix issues with float16 overflows in shaders
* Fix issues with older Vulkan headers on Ubuntu 22.04
* Allow multi-op partial offloading by parsing the graph to preallocate enough between-op buffers
* Implement further ops, rework op_f32 calls, fix bugs
* Finish full offloading support, add last remaining ops, fix bugs, remove redundant code
* Upload generated file ggml-vulkan-shaders.hpp, remove redundant shaders
* Merge upstream changes, fix conflicts, adapt soft_max op
* Fix Python and shader header format
* Free model gpu buffers on exit
* Use single queue per device to simplify code
* Add matmul shader support for running multiple calculations in parallel
* Switch from semaphore-synchronized multiple command buffers per op to single command buffer for multiple ops, whole graph if possible
* Fix missing event cast
* Replace uint64_t(-1) with UINT64_MAX, rename function for clarity
* Fix warning about empty C function parameters
* Fix compiler warnings
* Properly implement Vulkan backend buffer handling
* Fix oversized host staging buffers
* Simplify barrier synchronization calls
* Fix gcc warnings
* Implement max_size for backend buffer types to limit the size of a single allocation
* Use min of maxMemoryAllocationSize and maxBufferSize for device max allocation size
* refactor multi buf
* Disable unsupported ops to fix tests
* Check for maintenance4 support before using it
* Handle devices with only a single queue
* Fix single queue logic
* propagate buffer usage in multi buffers
* Implement rope_neox op
* Cleanup header and other files
* Simplify gpu_extras by removing events and putting staging memcpys into contexts
* Move queue into context
Add not-yet-enabled async backend ops
* Simplify context use, optimize matmul shader for warp size 64 (AMD GCN), fix split_k matmul shader optimization
* Add get_max_size to SYCL backend.
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* llama : fix trailing whitespace
---------
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-01-28 17:03:59 +00:00
for ( size_t i = 0 ; i < ctx - > n_buffers ; i + + ) {
ggml_backend_buffer_free ( ctx - > buffers [ i ] ) ;
}
free ( ctx - > buffers ) ;
free ( ctx ) ;
}
2024-10-02 23:49:47 +00:00
static void ggml_backend_multi_buffer_clear ( ggml_backend_buffer_t buffer , uint8_t value ) {
ggml_backend_multi_buffer_context * ctx = ( ggml_backend_multi_buffer_context * ) buffer - > context ;
ggml : add Vulkan backend (#2059)
* Vulkan loader code
* Fix matmul kernel, continue implementation
* Continue implementation
* Vulkan memory management
* Vulkan development
* Matmul call
* Add aligned malloc and free for VMA
* Continue implementation
* First matmul success
* GEMM Kernel optimization
* 1D Blocktiling
* 2D Blocktiling
* Write coalescing
* Continue vulkan implementation and optimization
* First FP16 attempt, disabled for now
* Code abstraction, FP16 implementation, fix kernel, add FP16 to FP32 kernel
* Enable device extensions properly, restore fp16 matmul op
* Fix mulmat_f16
* Output FP32 in fp16 matmul shader
* Fix f16_to_f32 kernel
* dequant_q4_0 kernel
* Add VMA library
* Avoid requesting dedicated memory, VMA can decide that by itself
* Add bounds checking to matmul kernels, improve implementation, fix command buffers not freed properly
* add cmake commands
* Add 2d write operation, profiling code
* Fix 2d write
* Fix queue selection for AMD RADV
* Fix trailing whitespace in vk_mem_alloc.h
* Add WIP warp tile mat mul shaders
* Disable glslc optimization
* Disable glslc optimization for CMake
* Optimize warptile matmul shader, replace blocktile with it
* Add split-k optimization for small matrix multiplication
Use semaphores for synchronization instead of fences or waitidle
Rework async write/read for synchronization
* Fix validation errors, improve compatibility with AMD GPUs
* Rework command buffer handling
* Variable matmul kernel using specialization constants
* Fix synchronization on AMD, add barriers for buffer ownership transfer, add debug flag and prints
* Reuse semaphores
* Handle stage flags during command buffer submission properly
* Increase matmul test runs for consistent results
* Fix F32 matmul
* Add vectorized loading and zeropadding for matrix multiplication
* Use pinned memory for f16 preprocessing
* Don't force aligned matmul
* Don't free before queue done
* Replace VMA library with native Vulkan buffer management
* Basic offloading support with mul_f32 and dmmv for q4_0
* Run glslc commands in parallel
* Unroll loops in dmmv shader
* Reduce usage of waitIdle
* Reuse pinned allocation for f16 conversion
* Handle devices with only a single queue
* Fix trailing whitespace in CMakeLists.txt
* Allow parallel execution of kernels, parallelize third and fourth dimension calls
* Add fallback for devices only supporting one DescriptorSet per DescriptorPool
* Move to graph function similar to CUDA implementation
* Use F16 kernel for most things, replace q_f32 with mul_mat_q_f16 function
* Add F32 dmmv shaders
* Batch submissions
* Add .spv to gitignore
* Split off matrix vector multiplication for separate optimization
* Use single command buffer for matrix vector multiplication ops
* Reduce overhead of mul_f32 calls by using a single command buffer
* Add submission batching to mul_f32
* Fix tests
* Add missing barrier
* Add further missing barrier
* Add further ops
* Replace vk::QueueFamilyIgnored with VK_QUEUE_FAMILY_IGNORED to support more Vulkan header versions
* Remove unnecessary cblas link
* Fix descriptor set pre-allocation assert
* Add runtime shader compilation, start transferring shaders to this approach
* Transfer remaining shaders to header and compile on runtime
* Fix fp32 fallback if device doesn't support fp16, add force disable env var GGML_VULKAN_DISABLE_F16
* Add support for q4_1, q5_0, q5_1 and q8_0
* Remove unnecessary scalar layout extension
* Parse graph early to pre-record command buffers
* Add q6_k support
* Add multi-submit for command buffers
* Fix q6_k dequant shader for AMD
* Fix q6_k for GPUs without fp16 support
* Simplify q6_k fp16 fix
* Minor fixes
* Fix wg_denom of m-mulmat shaders
* Add Python-based Vulkan shader generator
* Replace shaderc dependency with precompiled shaders
Fix python script to generate shaders
* Clean up code
* Fix shader generator script Windows compatibility
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
* Close file before deletion
* Fix vulkan shader fp32 name
* Add q2_k and q3_k support
Add validation check to compare shader results to cpu results
* Add q4_k support
* Add q5_k support
* Bake SPIR-V bytecode into the library instead of loading shaders from file
* Switch to signal semaphores for flexibility
Prepare broadcasting support for mul mat
* Finish broadcasting mul mat support for GQA
* Clean up unused functions
Add repeat op
* Add further ops, not yet enabled. Improve semaphore code
* Reduce number of used semaphores by utilizing timelines more properly
* Remove queue information
* Reuse timeline semaphores, allow parallel operation with binary semaphores to work around nvidia driver limitations
* Add Vulkan to llama-bench
* Remove cblas dependency
* Fix matmul k-split bug
* Fix q4_k dmmv K_QUANTS_PER_ITERATION 1 shader
* Add RMS Norm shader, rework op_f32 shader setup, fix matmul bug
* Fix issues with float16 overflows in shaders
* Fix issues with older Vulkan headers on Ubuntu 22.04
* Allow multi-op partial offloading by parsing the graph to preallocate enough between-op buffers
* Implement further ops, rework op_f32 calls, fix bugs
* Finish full offloading support, add last remaining ops, fix bugs, remove redundant code
* Upload generated file ggml-vulkan-shaders.hpp, remove redundant shaders
* Merge upstream changes, fix conflicts, adapt soft_max op
* Fix Python and shader header format
* Free model gpu buffers on exit
* Use single queue per device to simplify code
* Add matmul shader support for running multiple calculations in parallel
* Switch from semaphore-synchronized multiple command buffers per op to single command buffer for multiple ops, whole graph if possible
* Fix missing event cast
* Replace uint64_t(-1) with UINT64_MAX, rename function for clarity
* Fix warning about empty C function parameters
* Fix compiler warnings
* Properly implement Vulkan backend buffer handling
* Fix oversized host staging buffers
* Simplify barrier synchronization calls
* Fix gcc warnings
* Implement max_size for backend buffer types to limit the size of a single allocation
* Use min of maxMemoryAllocationSize and maxBufferSize for device max allocation size
* refactor multi buf
* Disable unsupported ops to fix tests
* Check for maintenance4 support before using it
* Handle devices with only a single queue
* Fix single queue logic
* propagate buffer usage in multi buffers
* Implement rope_neox op
* Cleanup header and other files
* Simplify gpu_extras by removing events and putting staging memcpys into contexts
* Move queue into context
Add not-yet-enabled async backend ops
* Simplify context use, optimize matmul shader for warp size 64 (AMD GCN), fix split_k matmul shader optimization
* Add get_max_size to SYCL backend.
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* llama : fix trailing whitespace
---------
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-01-28 17:03:59 +00:00
for ( size_t i = 0 ; i < ctx - > n_buffers ; i + + ) {
ggml_backend_buffer_clear ( ctx - > buffers [ i ] , value ) ;
}
}
2024-10-02 23:49:47 +00:00
static const struct ggml_backend_buffer_i ggml_backend_multi_buffer_i = {
/* .get_name = */ ggml_backend_multi_buffer_get_name ,
/* .free_buffer = */ ggml_backend_multi_buffer_free_buffer ,
/* .get_base = */ NULL ,
/* .init_tensor = */ NULL ,
/* .memset_tensor = */ NULL ,
/* .set_tensor = */ NULL ,
/* .get_tensor = */ NULL ,
/* .cpy_tensor = */ NULL ,
/* .clear = */ ggml_backend_multi_buffer_clear ,
/* .reset = */ NULL ,
} ;
ggml : add Vulkan backend (#2059)
* Vulkan loader code
* Fix matmul kernel, continue implementation
* Continue implementation
* Vulkan memory management
* Vulkan development
* Matmul call
* Add aligned malloc and free for VMA
* Continue implementation
* First matmul success
* GEMM Kernel optimization
* 1D Blocktiling
* 2D Blocktiling
* Write coalescing
* Continue vulkan implementation and optimization
* First FP16 attempt, disabled for now
* Code abstraction, FP16 implementation, fix kernel, add FP16 to FP32 kernel
* Enable device extensions properly, restore fp16 matmul op
* Fix mulmat_f16
* Output FP32 in fp16 matmul shader
* Fix f16_to_f32 kernel
* dequant_q4_0 kernel
* Add VMA library
* Avoid requesting dedicated memory, VMA can decide that by itself
* Add bounds checking to matmul kernels, improve implementation, fix command buffers not freed properly
* add cmake commands
* Add 2d write operation, profiling code
* Fix 2d write
* Fix queue selection for AMD RADV
* Fix trailing whitespace in vk_mem_alloc.h
* Add WIP warp tile mat mul shaders
* Disable glslc optimization
* Disable glslc optimization for CMake
* Optimize warptile matmul shader, replace blocktile with it
* Add split-k optimization for small matrix multiplication
Use semaphores for synchronization instead of fences or waitidle
Rework async write/read for synchronization
* Fix validation errors, improve compatibility with AMD GPUs
* Rework command buffer handling
* Variable matmul kernel using specialization constants
* Fix synchronization on AMD, add barriers for buffer ownership transfer, add debug flag and prints
* Reuse semaphores
* Handle stage flags during command buffer submission properly
* Increase matmul test runs for consistent results
* Fix F32 matmul
* Add vectorized loading and zeropadding for matrix multiplication
* Use pinned memory for f16 preprocessing
* Don't force aligned matmul
* Don't free before queue done
* Replace VMA library with native Vulkan buffer management
* Basic offloading support with mul_f32 and dmmv for q4_0
* Run glslc commands in parallel
* Unroll loops in dmmv shader
* Reduce usage of waitIdle
* Reuse pinned allocation for f16 conversion
* Handle devices with only a single queue
* Fix trailing whitespace in CMakeLists.txt
* Allow parallel execution of kernels, parallelize third and fourth dimension calls
* Add fallback for devices only supporting one DescriptorSet per DescriptorPool
* Move to graph function similar to CUDA implementation
* Use F16 kernel for most things, replace q_f32 with mul_mat_q_f16 function
* Add F32 dmmv shaders
* Batch submissions
* Add .spv to gitignore
* Split off matrix vector multiplication for separate optimization
* Use single command buffer for matrix vector multiplication ops
* Reduce overhead of mul_f32 calls by using a single command buffer
* Add submission batching to mul_f32
* Fix tests
* Add missing barrier
* Add further missing barrier
* Add further ops
* Replace vk::QueueFamilyIgnored with VK_QUEUE_FAMILY_IGNORED to support more Vulkan header versions
* Remove unnecessary cblas link
* Fix descriptor set pre-allocation assert
* Add runtime shader compilation, start transferring shaders to this approach
* Transfer remaining shaders to header and compile on runtime
* Fix fp32 fallback if device doesn't support fp16, add force disable env var GGML_VULKAN_DISABLE_F16
* Add support for q4_1, q5_0, q5_1 and q8_0
* Remove unnecessary scalar layout extension
* Parse graph early to pre-record command buffers
* Add q6_k support
* Add multi-submit for command buffers
* Fix q6_k dequant shader for AMD
* Fix q6_k for GPUs without fp16 support
* Simplify q6_k fp16 fix
* Minor fixes
* Fix wg_denom of m-mulmat shaders
* Add Python-based Vulkan shader generator
* Replace shaderc dependency with precompiled shaders
Fix python script to generate shaders
* Clean up code
* Fix shader generator script Windows compatibility
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
* Close file before deletion
* Fix vulkan shader fp32 name
* Add q2_k and q3_k support
Add validation check to compare shader results to cpu results
* Add q4_k support
* Add q5_k support
* Bake SPIR-V bytecode into the library instead of loading shaders from file
* Switch to signal semaphores for flexibility
Prepare broadcasting support for mul mat
* Finish broadcasting mul mat support for GQA
* Clean up unused functions
Add repeat op
* Add further ops, not yet enabled. Improve semaphore code
* Reduce number of used semaphores by utilizing timelines more properly
* Remove queue information
* Reuse timeline semaphores, allow parallel operation with binary semaphores to work around nvidia driver limitations
* Add Vulkan to llama-bench
* Remove cblas dependency
* Fix matmul k-split bug
* Fix q4_k dmmv K_QUANTS_PER_ITERATION 1 shader
* Add RMS Norm shader, rework op_f32 shader setup, fix matmul bug
* Fix issues with float16 overflows in shaders
* Fix issues with older Vulkan headers on Ubuntu 22.04
* Allow multi-op partial offloading by parsing the graph to preallocate enough between-op buffers
* Implement further ops, rework op_f32 calls, fix bugs
* Finish full offloading support, add last remaining ops, fix bugs, remove redundant code
* Upload generated file ggml-vulkan-shaders.hpp, remove redundant shaders
* Merge upstream changes, fix conflicts, adapt soft_max op
* Fix Python and shader header format
* Free model gpu buffers on exit
* Use single queue per device to simplify code
* Add matmul shader support for running multiple calculations in parallel
* Switch from semaphore-synchronized multiple command buffers per op to single command buffer for multiple ops, whole graph if possible
* Fix missing event cast
* Replace uint64_t(-1) with UINT64_MAX, rename function for clarity
* Fix warning about empty C function parameters
* Fix compiler warnings
* Properly implement Vulkan backend buffer handling
* Fix oversized host staging buffers
* Simplify barrier synchronization calls
* Fix gcc warnings
* Implement max_size for backend buffer types to limit the size of a single allocation
* Use min of maxMemoryAllocationSize and maxBufferSize for device max allocation size
* refactor multi buf
* Disable unsupported ops to fix tests
* Check for maintenance4 support before using it
* Handle devices with only a single queue
* Fix single queue logic
* propagate buffer usage in multi buffers
* Implement rope_neox op
* Cleanup header and other files
* Simplify gpu_extras by removing events and putting staging memcpys into contexts
* Move queue into context
Add not-yet-enabled async backend ops
* Simplify context use, optimize matmul shader for warp size 64 (AMD GCN), fix split_k matmul shader optimization
* Add get_max_size to SYCL backend.
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* llama : fix trailing whitespace
---------
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-01-28 17:03:59 +00:00
2024-10-02 23:49:47 +00:00
ggml_backend_buffer_t ggml_backend_multi_buffer_alloc_buffer ( ggml_backend_buffer_t * buffers , size_t n_buffers ) {
ggml_backend_multi_buffer_context * ctx = ( ggml_backend_multi_buffer_context * ) malloc ( sizeof ( struct ggml_backend_multi_buffer_context ) ) ;
ggml : add Vulkan backend (#2059)
* Vulkan loader code
* Fix matmul kernel, continue implementation
* Continue implementation
* Vulkan memory management
* Vulkan development
* Matmul call
* Add aligned malloc and free for VMA
* Continue implementation
* First matmul success
* GEMM Kernel optimization
* 1D Blocktiling
* 2D Blocktiling
* Write coalescing
* Continue vulkan implementation and optimization
* First FP16 attempt, disabled for now
* Code abstraction, FP16 implementation, fix kernel, add FP16 to FP32 kernel
* Enable device extensions properly, restore fp16 matmul op
* Fix mulmat_f16
* Output FP32 in fp16 matmul shader
* Fix f16_to_f32 kernel
* dequant_q4_0 kernel
* Add VMA library
* Avoid requesting dedicated memory, VMA can decide that by itself
* Add bounds checking to matmul kernels, improve implementation, fix command buffers not freed properly
* add cmake commands
* Add 2d write operation, profiling code
* Fix 2d write
* Fix queue selection for AMD RADV
* Fix trailing whitespace in vk_mem_alloc.h
* Add WIP warp tile mat mul shaders
* Disable glslc optimization
* Disable glslc optimization for CMake
* Optimize warptile matmul shader, replace blocktile with it
* Add split-k optimization for small matrix multiplication
Use semaphores for synchronization instead of fences or waitidle
Rework async write/read for synchronization
* Fix validation errors, improve compatibility with AMD GPUs
* Rework command buffer handling
* Variable matmul kernel using specialization constants
* Fix synchronization on AMD, add barriers for buffer ownership transfer, add debug flag and prints
* Reuse semaphores
* Handle stage flags during command buffer submission properly
* Increase matmul test runs for consistent results
* Fix F32 matmul
* Add vectorized loading and zeropadding for matrix multiplication
* Use pinned memory for f16 preprocessing
* Don't force aligned matmul
* Don't free before queue done
* Replace VMA library with native Vulkan buffer management
* Basic offloading support with mul_f32 and dmmv for q4_0
* Run glslc commands in parallel
* Unroll loops in dmmv shader
* Reduce usage of waitIdle
* Reuse pinned allocation for f16 conversion
* Handle devices with only a single queue
* Fix trailing whitespace in CMakeLists.txt
* Allow parallel execution of kernels, parallelize third and fourth dimension calls
* Add fallback for devices only supporting one DescriptorSet per DescriptorPool
* Move to graph function similar to CUDA implementation
* Use F16 kernel for most things, replace q_f32 with mul_mat_q_f16 function
* Add F32 dmmv shaders
* Batch submissions
* Add .spv to gitignore
* Split off matrix vector multiplication for separate optimization
* Use single command buffer for matrix vector multiplication ops
* Reduce overhead of mul_f32 calls by using a single command buffer
* Add submission batching to mul_f32
* Fix tests
* Add missing barrier
* Add further missing barrier
* Add further ops
* Replace vk::QueueFamilyIgnored with VK_QUEUE_FAMILY_IGNORED to support more Vulkan header versions
* Remove unnecessary cblas link
* Fix descriptor set pre-allocation assert
* Add runtime shader compilation, start transferring shaders to this approach
* Transfer remaining shaders to header and compile on runtime
* Fix fp32 fallback if device doesn't support fp16, add force disable env var GGML_VULKAN_DISABLE_F16
* Add support for q4_1, q5_0, q5_1 and q8_0
* Remove unnecessary scalar layout extension
* Parse graph early to pre-record command buffers
* Add q6_k support
* Add multi-submit for command buffers
* Fix q6_k dequant shader for AMD
* Fix q6_k for GPUs without fp16 support
* Simplify q6_k fp16 fix
* Minor fixes
* Fix wg_denom of m-mulmat shaders
* Add Python-based Vulkan shader generator
* Replace shaderc dependency with precompiled shaders
Fix python script to generate shaders
* Clean up code
* Fix shader generator script Windows compatibility
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
* Close file before deletion
* Fix vulkan shader fp32 name
* Add q2_k and q3_k support
Add validation check to compare shader results to cpu results
* Add q4_k support
* Add q5_k support
* Bake SPIR-V bytecode into the library instead of loading shaders from file
* Switch to signal semaphores for flexibility
Prepare broadcasting support for mul mat
* Finish broadcasting mul mat support for GQA
* Clean up unused functions
Add repeat op
* Add further ops, not yet enabled. Improve semaphore code
* Reduce number of used semaphores by utilizing timelines more properly
* Remove queue information
* Reuse timeline semaphores, allow parallel operation with binary semaphores to work around nvidia driver limitations
* Add Vulkan to llama-bench
* Remove cblas dependency
* Fix matmul k-split bug
* Fix q4_k dmmv K_QUANTS_PER_ITERATION 1 shader
* Add RMS Norm shader, rework op_f32 shader setup, fix matmul bug
* Fix issues with float16 overflows in shaders
* Fix issues with older Vulkan headers on Ubuntu 22.04
* Allow multi-op partial offloading by parsing the graph to preallocate enough between-op buffers
* Implement further ops, rework op_f32 calls, fix bugs
* Finish full offloading support, add last remaining ops, fix bugs, remove redundant code
* Upload generated file ggml-vulkan-shaders.hpp, remove redundant shaders
* Merge upstream changes, fix conflicts, adapt soft_max op
* Fix Python and shader header format
* Free model gpu buffers on exit
* Use single queue per device to simplify code
* Add matmul shader support for running multiple calculations in parallel
* Switch from semaphore-synchronized multiple command buffers per op to single command buffer for multiple ops, whole graph if possible
* Fix missing event cast
* Replace uint64_t(-1) with UINT64_MAX, rename function for clarity
* Fix warning about empty C function parameters
* Fix compiler warnings
* Properly implement Vulkan backend buffer handling
* Fix oversized host staging buffers
* Simplify barrier synchronization calls
* Fix gcc warnings
* Implement max_size for backend buffer types to limit the size of a single allocation
* Use min of maxMemoryAllocationSize and maxBufferSize for device max allocation size
* refactor multi buf
* Disable unsupported ops to fix tests
* Check for maintenance4 support before using it
* Handle devices with only a single queue
* Fix single queue logic
* propagate buffer usage in multi buffers
* Implement rope_neox op
* Cleanup header and other files
* Simplify gpu_extras by removing events and putting staging memcpys into contexts
* Move queue into context
Add not-yet-enabled async backend ops
* Simplify context use, optimize matmul shader for warp size 64 (AMD GCN), fix split_k matmul shader optimization
* Add get_max_size to SYCL backend.
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* llama : fix trailing whitespace
---------
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-01-28 17:03:59 +00:00
ctx - > n_buffers = n_buffers ;
ctx - > buffers = ( ggml_backend_buffer_t * ) malloc ( n_buffers * sizeof ( ggml_backend_buffer_t ) ) ;
2024-02-12 07:16:06 +00:00
GGML_ASSERT ( ctx - > buffers ! = NULL ) ;
ggml : add Vulkan backend (#2059)
* Vulkan loader code
* Fix matmul kernel, continue implementation
* Continue implementation
* Vulkan memory management
* Vulkan development
* Matmul call
* Add aligned malloc and free for VMA
* Continue implementation
* First matmul success
* GEMM Kernel optimization
* 1D Blocktiling
* 2D Blocktiling
* Write coalescing
* Continue vulkan implementation and optimization
* First FP16 attempt, disabled for now
* Code abstraction, FP16 implementation, fix kernel, add FP16 to FP32 kernel
* Enable device extensions properly, restore fp16 matmul op
* Fix mulmat_f16
* Output FP32 in fp16 matmul shader
* Fix f16_to_f32 kernel
* dequant_q4_0 kernel
* Add VMA library
* Avoid requesting dedicated memory, VMA can decide that by itself
* Add bounds checking to matmul kernels, improve implementation, fix command buffers not freed properly
* add cmake commands
* Add 2d write operation, profiling code
* Fix 2d write
* Fix queue selection for AMD RADV
* Fix trailing whitespace in vk_mem_alloc.h
* Add WIP warp tile mat mul shaders
* Disable glslc optimization
* Disable glslc optimization for CMake
* Optimize warptile matmul shader, replace blocktile with it
* Add split-k optimization for small matrix multiplication
Use semaphores for synchronization instead of fences or waitidle
Rework async write/read for synchronization
* Fix validation errors, improve compatibility with AMD GPUs
* Rework command buffer handling
* Variable matmul kernel using specialization constants
* Fix synchronization on AMD, add barriers for buffer ownership transfer, add debug flag and prints
* Reuse semaphores
* Handle stage flags during command buffer submission properly
* Increase matmul test runs for consistent results
* Fix F32 matmul
* Add vectorized loading and zeropadding for matrix multiplication
* Use pinned memory for f16 preprocessing
* Don't force aligned matmul
* Don't free before queue done
* Replace VMA library with native Vulkan buffer management
* Basic offloading support with mul_f32 and dmmv for q4_0
* Run glslc commands in parallel
* Unroll loops in dmmv shader
* Reduce usage of waitIdle
* Reuse pinned allocation for f16 conversion
* Handle devices with only a single queue
* Fix trailing whitespace in CMakeLists.txt
* Allow parallel execution of kernels, parallelize third and fourth dimension calls
* Add fallback for devices only supporting one DescriptorSet per DescriptorPool
* Move to graph function similar to CUDA implementation
* Use F16 kernel for most things, replace q_f32 with mul_mat_q_f16 function
* Add F32 dmmv shaders
* Batch submissions
* Add .spv to gitignore
* Split off matrix vector multiplication for separate optimization
* Use single command buffer for matrix vector multiplication ops
* Reduce overhead of mul_f32 calls by using a single command buffer
* Add submission batching to mul_f32
* Fix tests
* Add missing barrier
* Add further missing barrier
* Add further ops
* Replace vk::QueueFamilyIgnored with VK_QUEUE_FAMILY_IGNORED to support more Vulkan header versions
* Remove unnecessary cblas link
* Fix descriptor set pre-allocation assert
* Add runtime shader compilation, start transferring shaders to this approach
* Transfer remaining shaders to header and compile on runtime
* Fix fp32 fallback if device doesn't support fp16, add force disable env var GGML_VULKAN_DISABLE_F16
* Add support for q4_1, q5_0, q5_1 and q8_0
* Remove unnecessary scalar layout extension
* Parse graph early to pre-record command buffers
* Add q6_k support
* Add multi-submit for command buffers
* Fix q6_k dequant shader for AMD
* Fix q6_k for GPUs without fp16 support
* Simplify q6_k fp16 fix
* Minor fixes
* Fix wg_denom of m-mulmat shaders
* Add Python-based Vulkan shader generator
* Replace shaderc dependency with precompiled shaders
Fix python script to generate shaders
* Clean up code
* Fix shader generator script Windows compatibility
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
* Close file before deletion
* Fix vulkan shader fp32 name
* Add q2_k and q3_k support
Add validation check to compare shader results to cpu results
* Add q4_k support
* Add q5_k support
* Bake SPIR-V bytecode into the library instead of loading shaders from file
* Switch to signal semaphores for flexibility
Prepare broadcasting support for mul mat
* Finish broadcasting mul mat support for GQA
* Clean up unused functions
Add repeat op
* Add further ops, not yet enabled. Improve semaphore code
* Reduce number of used semaphores by utilizing timelines more properly
* Remove queue information
* Reuse timeline semaphores, allow parallel operation with binary semaphores to work around nvidia driver limitations
* Add Vulkan to llama-bench
* Remove cblas dependency
* Fix matmul k-split bug
* Fix q4_k dmmv K_QUANTS_PER_ITERATION 1 shader
* Add RMS Norm shader, rework op_f32 shader setup, fix matmul bug
* Fix issues with float16 overflows in shaders
* Fix issues with older Vulkan headers on Ubuntu 22.04
* Allow multi-op partial offloading by parsing the graph to preallocate enough between-op buffers
* Implement further ops, rework op_f32 calls, fix bugs
* Finish full offloading support, add last remaining ops, fix bugs, remove redundant code
* Upload generated file ggml-vulkan-shaders.hpp, remove redundant shaders
* Merge upstream changes, fix conflicts, adapt soft_max op
* Fix Python and shader header format
* Free model gpu buffers on exit
* Use single queue per device to simplify code
* Add matmul shader support for running multiple calculations in parallel
* Switch from semaphore-synchronized multiple command buffers per op to single command buffer for multiple ops, whole graph if possible
* Fix missing event cast
* Replace uint64_t(-1) with UINT64_MAX, rename function for clarity
* Fix warning about empty C function parameters
* Fix compiler warnings
* Properly implement Vulkan backend buffer handling
* Fix oversized host staging buffers
* Simplify barrier synchronization calls
* Fix gcc warnings
* Implement max_size for backend buffer types to limit the size of a single allocation
* Use min of maxMemoryAllocationSize and maxBufferSize for device max allocation size
* refactor multi buf
* Disable unsupported ops to fix tests
* Check for maintenance4 support before using it
* Handle devices with only a single queue
* Fix single queue logic
* propagate buffer usage in multi buffers
* Implement rope_neox op
* Cleanup header and other files
* Simplify gpu_extras by removing events and putting staging memcpys into contexts
* Move queue into context
Add not-yet-enabled async backend ops
* Simplify context use, optimize matmul shader for warp size 64 (AMD GCN), fix split_k matmul shader optimization
* Add get_max_size to SYCL backend.
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* llama : fix trailing whitespace
---------
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-01-28 17:03:59 +00:00
size_t total_size = 0 ;
for ( size_t i = 0 ; i < n_buffers ; i + + ) {
ctx - > buffers [ i ] = buffers [ i ] ;
total_size + = ggml_backend_buffer_get_size ( buffers [ i ] ) ;
}
2024-10-02 23:49:47 +00:00
return ggml_backend_buffer_init ( buffers [ 0 ] - > buft , ggml_backend_multi_buffer_i , ctx , total_size ) ;
ggml : add Vulkan backend (#2059)
* Vulkan loader code
* Fix matmul kernel, continue implementation
* Continue implementation
* Vulkan memory management
* Vulkan development
* Matmul call
* Add aligned malloc and free for VMA
* Continue implementation
* First matmul success
* GEMM Kernel optimization
* 1D Blocktiling
* 2D Blocktiling
* Write coalescing
* Continue vulkan implementation and optimization
* First FP16 attempt, disabled for now
* Code abstraction, FP16 implementation, fix kernel, add FP16 to FP32 kernel
* Enable device extensions properly, restore fp16 matmul op
* Fix mulmat_f16
* Output FP32 in fp16 matmul shader
* Fix f16_to_f32 kernel
* dequant_q4_0 kernel
* Add VMA library
* Avoid requesting dedicated memory, VMA can decide that by itself
* Add bounds checking to matmul kernels, improve implementation, fix command buffers not freed properly
* add cmake commands
* Add 2d write operation, profiling code
* Fix 2d write
* Fix queue selection for AMD RADV
* Fix trailing whitespace in vk_mem_alloc.h
* Add WIP warp tile mat mul shaders
* Disable glslc optimization
* Disable glslc optimization for CMake
* Optimize warptile matmul shader, replace blocktile with it
* Add split-k optimization for small matrix multiplication
Use semaphores for synchronization instead of fences or waitidle
Rework async write/read for synchronization
* Fix validation errors, improve compatibility with AMD GPUs
* Rework command buffer handling
* Variable matmul kernel using specialization constants
* Fix synchronization on AMD, add barriers for buffer ownership transfer, add debug flag and prints
* Reuse semaphores
* Handle stage flags during command buffer submission properly
* Increase matmul test runs for consistent results
* Fix F32 matmul
* Add vectorized loading and zeropadding for matrix multiplication
* Use pinned memory for f16 preprocessing
* Don't force aligned matmul
* Don't free before queue done
* Replace VMA library with native Vulkan buffer management
* Basic offloading support with mul_f32 and dmmv for q4_0
* Run glslc commands in parallel
* Unroll loops in dmmv shader
* Reduce usage of waitIdle
* Reuse pinned allocation for f16 conversion
* Handle devices with only a single queue
* Fix trailing whitespace in CMakeLists.txt
* Allow parallel execution of kernels, parallelize third and fourth dimension calls
* Add fallback for devices only supporting one DescriptorSet per DescriptorPool
* Move to graph function similar to CUDA implementation
* Use F16 kernel for most things, replace q_f32 with mul_mat_q_f16 function
* Add F32 dmmv shaders
* Batch submissions
* Add .spv to gitignore
* Split off matrix vector multiplication for separate optimization
* Use single command buffer for matrix vector multiplication ops
* Reduce overhead of mul_f32 calls by using a single command buffer
* Add submission batching to mul_f32
* Fix tests
* Add missing barrier
* Add further missing barrier
* Add further ops
* Replace vk::QueueFamilyIgnored with VK_QUEUE_FAMILY_IGNORED to support more Vulkan header versions
* Remove unnecessary cblas link
* Fix descriptor set pre-allocation assert
* Add runtime shader compilation, start transferring shaders to this approach
* Transfer remaining shaders to header and compile on runtime
* Fix fp32 fallback if device doesn't support fp16, add force disable env var GGML_VULKAN_DISABLE_F16
* Add support for q4_1, q5_0, q5_1 and q8_0
* Remove unnecessary scalar layout extension
* Parse graph early to pre-record command buffers
* Add q6_k support
* Add multi-submit for command buffers
* Fix q6_k dequant shader for AMD
* Fix q6_k for GPUs without fp16 support
* Simplify q6_k fp16 fix
* Minor fixes
* Fix wg_denom of m-mulmat shaders
* Add Python-based Vulkan shader generator
* Replace shaderc dependency with precompiled shaders
Fix python script to generate shaders
* Clean up code
* Fix shader generator script Windows compatibility
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
* Close file before deletion
* Fix vulkan shader fp32 name
* Add q2_k and q3_k support
Add validation check to compare shader results to cpu results
* Add q4_k support
* Add q5_k support
* Bake SPIR-V bytecode into the library instead of loading shaders from file
* Switch to signal semaphores for flexibility
Prepare broadcasting support for mul mat
* Finish broadcasting mul mat support for GQA
* Clean up unused functions
Add repeat op
* Add further ops, not yet enabled. Improve semaphore code
* Reduce number of used semaphores by utilizing timelines more properly
* Remove queue information
* Reuse timeline semaphores, allow parallel operation with binary semaphores to work around nvidia driver limitations
* Add Vulkan to llama-bench
* Remove cblas dependency
* Fix matmul k-split bug
* Fix q4_k dmmv K_QUANTS_PER_ITERATION 1 shader
* Add RMS Norm shader, rework op_f32 shader setup, fix matmul bug
* Fix issues with float16 overflows in shaders
* Fix issues with older Vulkan headers on Ubuntu 22.04
* Allow multi-op partial offloading by parsing the graph to preallocate enough between-op buffers
* Implement further ops, rework op_f32 calls, fix bugs
* Finish full offloading support, add last remaining ops, fix bugs, remove redundant code
* Upload generated file ggml-vulkan-shaders.hpp, remove redundant shaders
* Merge upstream changes, fix conflicts, adapt soft_max op
* Fix Python and shader header format
* Free model gpu buffers on exit
* Use single queue per device to simplify code
* Add matmul shader support for running multiple calculations in parallel
* Switch from semaphore-synchronized multiple command buffers per op to single command buffer for multiple ops, whole graph if possible
* Fix missing event cast
* Replace uint64_t(-1) with UINT64_MAX, rename function for clarity
* Fix warning about empty C function parameters
* Fix compiler warnings
* Properly implement Vulkan backend buffer handling
* Fix oversized host staging buffers
* Simplify barrier synchronization calls
* Fix gcc warnings
* Implement max_size for backend buffer types to limit the size of a single allocation
* Use min of maxMemoryAllocationSize and maxBufferSize for device max allocation size
* refactor multi buf
* Disable unsupported ops to fix tests
* Check for maintenance4 support before using it
* Handle devices with only a single queue
* Fix single queue logic
* propagate buffer usage in multi buffers
* Implement rope_neox op
* Cleanup header and other files
* Simplify gpu_extras by removing events and putting staging memcpys into contexts
* Move queue into context
Add not-yet-enabled async backend ops
* Simplify context use, optimize matmul shader for warp size 64 (AMD GCN), fix split_k matmul shader optimization
* Add get_max_size to SYCL backend.
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* llama : fix trailing whitespace
---------
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-01-28 17:03:59 +00:00
}
2024-10-02 23:49:47 +00:00
bool ggml_backend_buffer_is_multi_buffer ( ggml_backend_buffer_t buffer ) {
ggml : add Vulkan backend (#2059)
* Vulkan loader code
* Fix matmul kernel, continue implementation
* Continue implementation
* Vulkan memory management
* Vulkan development
* Matmul call
* Add aligned malloc and free for VMA
* Continue implementation
* First matmul success
* GEMM Kernel optimization
* 1D Blocktiling
* 2D Blocktiling
* Write coalescing
* Continue vulkan implementation and optimization
* First FP16 attempt, disabled for now
* Code abstraction, FP16 implementation, fix kernel, add FP16 to FP32 kernel
* Enable device extensions properly, restore fp16 matmul op
* Fix mulmat_f16
* Output FP32 in fp16 matmul shader
* Fix f16_to_f32 kernel
* dequant_q4_0 kernel
* Add VMA library
* Avoid requesting dedicated memory, VMA can decide that by itself
* Add bounds checking to matmul kernels, improve implementation, fix command buffers not freed properly
* add cmake commands
* Add 2d write operation, profiling code
* Fix 2d write
* Fix queue selection for AMD RADV
* Fix trailing whitespace in vk_mem_alloc.h
* Add WIP warp tile mat mul shaders
* Disable glslc optimization
* Disable glslc optimization for CMake
* Optimize warptile matmul shader, replace blocktile with it
* Add split-k optimization for small matrix multiplication
Use semaphores for synchronization instead of fences or waitidle
Rework async write/read for synchronization
* Fix validation errors, improve compatibility with AMD GPUs
* Rework command buffer handling
* Variable matmul kernel using specialization constants
* Fix synchronization on AMD, add barriers for buffer ownership transfer, add debug flag and prints
* Reuse semaphores
* Handle stage flags during command buffer submission properly
* Increase matmul test runs for consistent results
* Fix F32 matmul
* Add vectorized loading and zeropadding for matrix multiplication
* Use pinned memory for f16 preprocessing
* Don't force aligned matmul
* Don't free before queue done
* Replace VMA library with native Vulkan buffer management
* Basic offloading support with mul_f32 and dmmv for q4_0
* Run glslc commands in parallel
* Unroll loops in dmmv shader
* Reduce usage of waitIdle
* Reuse pinned allocation for f16 conversion
* Handle devices with only a single queue
* Fix trailing whitespace in CMakeLists.txt
* Allow parallel execution of kernels, parallelize third and fourth dimension calls
* Add fallback for devices only supporting one DescriptorSet per DescriptorPool
* Move to graph function similar to CUDA implementation
* Use F16 kernel for most things, replace q_f32 with mul_mat_q_f16 function
* Add F32 dmmv shaders
* Batch submissions
* Add .spv to gitignore
* Split off matrix vector multiplication for separate optimization
* Use single command buffer for matrix vector multiplication ops
* Reduce overhead of mul_f32 calls by using a single command buffer
* Add submission batching to mul_f32
* Fix tests
* Add missing barrier
* Add further missing barrier
* Add further ops
* Replace vk::QueueFamilyIgnored with VK_QUEUE_FAMILY_IGNORED to support more Vulkan header versions
* Remove unnecessary cblas link
* Fix descriptor set pre-allocation assert
* Add runtime shader compilation, start transferring shaders to this approach
* Transfer remaining shaders to header and compile on runtime
* Fix fp32 fallback if device doesn't support fp16, add force disable env var GGML_VULKAN_DISABLE_F16
* Add support for q4_1, q5_0, q5_1 and q8_0
* Remove unnecessary scalar layout extension
* Parse graph early to pre-record command buffers
* Add q6_k support
* Add multi-submit for command buffers
* Fix q6_k dequant shader for AMD
* Fix q6_k for GPUs without fp16 support
* Simplify q6_k fp16 fix
* Minor fixes
* Fix wg_denom of m-mulmat shaders
* Add Python-based Vulkan shader generator
* Replace shaderc dependency with precompiled shaders
Fix python script to generate shaders
* Clean up code
* Fix shader generator script Windows compatibility
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
* Close file before deletion
* Fix vulkan shader fp32 name
* Add q2_k and q3_k support
Add validation check to compare shader results to cpu results
* Add q4_k support
* Add q5_k support
* Bake SPIR-V bytecode into the library instead of loading shaders from file
* Switch to signal semaphores for flexibility
Prepare broadcasting support for mul mat
* Finish broadcasting mul mat support for GQA
* Clean up unused functions
Add repeat op
* Add further ops, not yet enabled. Improve semaphore code
* Reduce number of used semaphores by utilizing timelines more properly
* Remove queue information
* Reuse timeline semaphores, allow parallel operation with binary semaphores to work around nvidia driver limitations
* Add Vulkan to llama-bench
* Remove cblas dependency
* Fix matmul k-split bug
* Fix q4_k dmmv K_QUANTS_PER_ITERATION 1 shader
* Add RMS Norm shader, rework op_f32 shader setup, fix matmul bug
* Fix issues with float16 overflows in shaders
* Fix issues with older Vulkan headers on Ubuntu 22.04
* Allow multi-op partial offloading by parsing the graph to preallocate enough between-op buffers
* Implement further ops, rework op_f32 calls, fix bugs
* Finish full offloading support, add last remaining ops, fix bugs, remove redundant code
* Upload generated file ggml-vulkan-shaders.hpp, remove redundant shaders
* Merge upstream changes, fix conflicts, adapt soft_max op
* Fix Python and shader header format
* Free model gpu buffers on exit
* Use single queue per device to simplify code
* Add matmul shader support for running multiple calculations in parallel
* Switch from semaphore-synchronized multiple command buffers per op to single command buffer for multiple ops, whole graph if possible
* Fix missing event cast
* Replace uint64_t(-1) with UINT64_MAX, rename function for clarity
* Fix warning about empty C function parameters
* Fix compiler warnings
* Properly implement Vulkan backend buffer handling
* Fix oversized host staging buffers
* Simplify barrier synchronization calls
* Fix gcc warnings
* Implement max_size for backend buffer types to limit the size of a single allocation
* Use min of maxMemoryAllocationSize and maxBufferSize for device max allocation size
* refactor multi buf
* Disable unsupported ops to fix tests
* Check for maintenance4 support before using it
* Handle devices with only a single queue
* Fix single queue logic
* propagate buffer usage in multi buffers
* Implement rope_neox op
* Cleanup header and other files
* Simplify gpu_extras by removing events and putting staging memcpys into contexts
* Move queue into context
Add not-yet-enabled async backend ops
* Simplify context use, optimize matmul shader for warp size 64 (AMD GCN), fix split_k matmul shader optimization
* Add get_max_size to SYCL backend.
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* llama : fix trailing whitespace
---------
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-01-28 17:03:59 +00:00
return buffer - > iface . get_name = = ggml_backend_multi_buffer_get_name ;
}
2024-10-02 23:49:47 +00:00
void ggml_backend_multi_buffer_set_usage ( ggml_backend_buffer_t buffer , enum ggml_backend_buffer_usage usage ) {
ggml : add Vulkan backend (#2059)
* Vulkan loader code
* Fix matmul kernel, continue implementation
* Continue implementation
* Vulkan memory management
* Vulkan development
* Matmul call
* Add aligned malloc and free for VMA
* Continue implementation
* First matmul success
* GEMM Kernel optimization
* 1D Blocktiling
* 2D Blocktiling
* Write coalescing
* Continue vulkan implementation and optimization
* First FP16 attempt, disabled for now
* Code abstraction, FP16 implementation, fix kernel, add FP16 to FP32 kernel
* Enable device extensions properly, restore fp16 matmul op
* Fix mulmat_f16
* Output FP32 in fp16 matmul shader
* Fix f16_to_f32 kernel
* dequant_q4_0 kernel
* Add VMA library
* Avoid requesting dedicated memory, VMA can decide that by itself
* Add bounds checking to matmul kernels, improve implementation, fix command buffers not freed properly
* add cmake commands
* Add 2d write operation, profiling code
* Fix 2d write
* Fix queue selection for AMD RADV
* Fix trailing whitespace in vk_mem_alloc.h
* Add WIP warp tile mat mul shaders
* Disable glslc optimization
* Disable glslc optimization for CMake
* Optimize warptile matmul shader, replace blocktile with it
* Add split-k optimization for small matrix multiplication
Use semaphores for synchronization instead of fences or waitidle
Rework async write/read for synchronization
* Fix validation errors, improve compatibility with AMD GPUs
* Rework command buffer handling
* Variable matmul kernel using specialization constants
* Fix synchronization on AMD, add barriers for buffer ownership transfer, add debug flag and prints
* Reuse semaphores
* Handle stage flags during command buffer submission properly
* Increase matmul test runs for consistent results
* Fix F32 matmul
* Add vectorized loading and zeropadding for matrix multiplication
* Use pinned memory for f16 preprocessing
* Don't force aligned matmul
* Don't free before queue done
* Replace VMA library with native Vulkan buffer management
* Basic offloading support with mul_f32 and dmmv for q4_0
* Run glslc commands in parallel
* Unroll loops in dmmv shader
* Reduce usage of waitIdle
* Reuse pinned allocation for f16 conversion
* Handle devices with only a single queue
* Fix trailing whitespace in CMakeLists.txt
* Allow parallel execution of kernels, parallelize third and fourth dimension calls
* Add fallback for devices only supporting one DescriptorSet per DescriptorPool
* Move to graph function similar to CUDA implementation
* Use F16 kernel for most things, replace q_f32 with mul_mat_q_f16 function
* Add F32 dmmv shaders
* Batch submissions
* Add .spv to gitignore
* Split off matrix vector multiplication for separate optimization
* Use single command buffer for matrix vector multiplication ops
* Reduce overhead of mul_f32 calls by using a single command buffer
* Add submission batching to mul_f32
* Fix tests
* Add missing barrier
* Add further missing barrier
* Add further ops
* Replace vk::QueueFamilyIgnored with VK_QUEUE_FAMILY_IGNORED to support more Vulkan header versions
* Remove unnecessary cblas link
* Fix descriptor set pre-allocation assert
* Add runtime shader compilation, start transferring shaders to this approach
* Transfer remaining shaders to header and compile on runtime
* Fix fp32 fallback if device doesn't support fp16, add force disable env var GGML_VULKAN_DISABLE_F16
* Add support for q4_1, q5_0, q5_1 and q8_0
* Remove unnecessary scalar layout extension
* Parse graph early to pre-record command buffers
* Add q6_k support
* Add multi-submit for command buffers
* Fix q6_k dequant shader for AMD
* Fix q6_k for GPUs without fp16 support
* Simplify q6_k fp16 fix
* Minor fixes
* Fix wg_denom of m-mulmat shaders
* Add Python-based Vulkan shader generator
* Replace shaderc dependency with precompiled shaders
Fix python script to generate shaders
* Clean up code
* Fix shader generator script Windows compatibility
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
* Close file before deletion
* Fix vulkan shader fp32 name
* Add q2_k and q3_k support
Add validation check to compare shader results to cpu results
* Add q4_k support
* Add q5_k support
* Bake SPIR-V bytecode into the library instead of loading shaders from file
* Switch to signal semaphores for flexibility
Prepare broadcasting support for mul mat
* Finish broadcasting mul mat support for GQA
* Clean up unused functions
Add repeat op
* Add further ops, not yet enabled. Improve semaphore code
* Reduce number of used semaphores by utilizing timelines more properly
* Remove queue information
* Reuse timeline semaphores, allow parallel operation with binary semaphores to work around nvidia driver limitations
* Add Vulkan to llama-bench
* Remove cblas dependency
* Fix matmul k-split bug
* Fix q4_k dmmv K_QUANTS_PER_ITERATION 1 shader
* Add RMS Norm shader, rework op_f32 shader setup, fix matmul bug
* Fix issues with float16 overflows in shaders
* Fix issues with older Vulkan headers on Ubuntu 22.04
* Allow multi-op partial offloading by parsing the graph to preallocate enough between-op buffers
* Implement further ops, rework op_f32 calls, fix bugs
* Finish full offloading support, add last remaining ops, fix bugs, remove redundant code
* Upload generated file ggml-vulkan-shaders.hpp, remove redundant shaders
* Merge upstream changes, fix conflicts, adapt soft_max op
* Fix Python and shader header format
* Free model gpu buffers on exit
* Use single queue per device to simplify code
* Add matmul shader support for running multiple calculations in parallel
* Switch from semaphore-synchronized multiple command buffers per op to single command buffer for multiple ops, whole graph if possible
* Fix missing event cast
* Replace uint64_t(-1) with UINT64_MAX, rename function for clarity
* Fix warning about empty C function parameters
* Fix compiler warnings
* Properly implement Vulkan backend buffer handling
* Fix oversized host staging buffers
* Simplify barrier synchronization calls
* Fix gcc warnings
* Implement max_size for backend buffer types to limit the size of a single allocation
* Use min of maxMemoryAllocationSize and maxBufferSize for device max allocation size
* refactor multi buf
* Disable unsupported ops to fix tests
* Check for maintenance4 support before using it
* Handle devices with only a single queue
* Fix single queue logic
* propagate buffer usage in multi buffers
* Implement rope_neox op
* Cleanup header and other files
* Simplify gpu_extras by removing events and putting staging memcpys into contexts
* Move queue into context
Add not-yet-enabled async backend ops
* Simplify context use, optimize matmul shader for warp size 64 (AMD GCN), fix split_k matmul shader optimization
* Add get_max_size to SYCL backend.
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* llama : fix trailing whitespace
---------
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-01-28 17:03:59 +00:00
GGML_ASSERT ( ggml_backend_buffer_is_multi_buffer ( buffer ) ) ;
2024-10-02 23:49:47 +00:00
ggml_backend_multi_buffer_context * ctx = ( ggml_backend_multi_buffer_context * ) buffer - > context ;
ggml : add Vulkan backend (#2059)
* Vulkan loader code
* Fix matmul kernel, continue implementation
* Continue implementation
* Vulkan memory management
* Vulkan development
* Matmul call
* Add aligned malloc and free for VMA
* Continue implementation
* First matmul success
* GEMM Kernel optimization
* 1D Blocktiling
* 2D Blocktiling
* Write coalescing
* Continue vulkan implementation and optimization
* First FP16 attempt, disabled for now
* Code abstraction, FP16 implementation, fix kernel, add FP16 to FP32 kernel
* Enable device extensions properly, restore fp16 matmul op
* Fix mulmat_f16
* Output FP32 in fp16 matmul shader
* Fix f16_to_f32 kernel
* dequant_q4_0 kernel
* Add VMA library
* Avoid requesting dedicated memory, VMA can decide that by itself
* Add bounds checking to matmul kernels, improve implementation, fix command buffers not freed properly
* add cmake commands
* Add 2d write operation, profiling code
* Fix 2d write
* Fix queue selection for AMD RADV
* Fix trailing whitespace in vk_mem_alloc.h
* Add WIP warp tile mat mul shaders
* Disable glslc optimization
* Disable glslc optimization for CMake
* Optimize warptile matmul shader, replace blocktile with it
* Add split-k optimization for small matrix multiplication
Use semaphores for synchronization instead of fences or waitidle
Rework async write/read for synchronization
* Fix validation errors, improve compatibility with AMD GPUs
* Rework command buffer handling
* Variable matmul kernel using specialization constants
* Fix synchronization on AMD, add barriers for buffer ownership transfer, add debug flag and prints
* Reuse semaphores
* Handle stage flags during command buffer submission properly
* Increase matmul test runs for consistent results
* Fix F32 matmul
* Add vectorized loading and zeropadding for matrix multiplication
* Use pinned memory for f16 preprocessing
* Don't force aligned matmul
* Don't free before queue done
* Replace VMA library with native Vulkan buffer management
* Basic offloading support with mul_f32 and dmmv for q4_0
* Run glslc commands in parallel
* Unroll loops in dmmv shader
* Reduce usage of waitIdle
* Reuse pinned allocation for f16 conversion
* Handle devices with only a single queue
* Fix trailing whitespace in CMakeLists.txt
* Allow parallel execution of kernels, parallelize third and fourth dimension calls
* Add fallback for devices only supporting one DescriptorSet per DescriptorPool
* Move to graph function similar to CUDA implementation
* Use F16 kernel for most things, replace q_f32 with mul_mat_q_f16 function
* Add F32 dmmv shaders
* Batch submissions
* Add .spv to gitignore
* Split off matrix vector multiplication for separate optimization
* Use single command buffer for matrix vector multiplication ops
* Reduce overhead of mul_f32 calls by using a single command buffer
* Add submission batching to mul_f32
* Fix tests
* Add missing barrier
* Add further missing barrier
* Add further ops
* Replace vk::QueueFamilyIgnored with VK_QUEUE_FAMILY_IGNORED to support more Vulkan header versions
* Remove unnecessary cblas link
* Fix descriptor set pre-allocation assert
* Add runtime shader compilation, start transferring shaders to this approach
* Transfer remaining shaders to header and compile on runtime
* Fix fp32 fallback if device doesn't support fp16, add force disable env var GGML_VULKAN_DISABLE_F16
* Add support for q4_1, q5_0, q5_1 and q8_0
* Remove unnecessary scalar layout extension
* Parse graph early to pre-record command buffers
* Add q6_k support
* Add multi-submit for command buffers
* Fix q6_k dequant shader for AMD
* Fix q6_k for GPUs without fp16 support
* Simplify q6_k fp16 fix
* Minor fixes
* Fix wg_denom of m-mulmat shaders
* Add Python-based Vulkan shader generator
* Replace shaderc dependency with precompiled shaders
Fix python script to generate shaders
* Clean up code
* Fix shader generator script Windows compatibility
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
* Close file before deletion
* Fix vulkan shader fp32 name
* Add q2_k and q3_k support
Add validation check to compare shader results to cpu results
* Add q4_k support
* Add q5_k support
* Bake SPIR-V bytecode into the library instead of loading shaders from file
* Switch to signal semaphores for flexibility
Prepare broadcasting support for mul mat
* Finish broadcasting mul mat support for GQA
* Clean up unused functions
Add repeat op
* Add further ops, not yet enabled. Improve semaphore code
* Reduce number of used semaphores by utilizing timelines more properly
* Remove queue information
* Reuse timeline semaphores, allow parallel operation with binary semaphores to work around nvidia driver limitations
* Add Vulkan to llama-bench
* Remove cblas dependency
* Fix matmul k-split bug
* Fix q4_k dmmv K_QUANTS_PER_ITERATION 1 shader
* Add RMS Norm shader, rework op_f32 shader setup, fix matmul bug
* Fix issues with float16 overflows in shaders
* Fix issues with older Vulkan headers on Ubuntu 22.04
* Allow multi-op partial offloading by parsing the graph to preallocate enough between-op buffers
* Implement further ops, rework op_f32 calls, fix bugs
* Finish full offloading support, add last remaining ops, fix bugs, remove redundant code
* Upload generated file ggml-vulkan-shaders.hpp, remove redundant shaders
* Merge upstream changes, fix conflicts, adapt soft_max op
* Fix Python and shader header format
* Free model gpu buffers on exit
* Use single queue per device to simplify code
* Add matmul shader support for running multiple calculations in parallel
* Switch from semaphore-synchronized multiple command buffers per op to single command buffer for multiple ops, whole graph if possible
* Fix missing event cast
* Replace uint64_t(-1) with UINT64_MAX, rename function for clarity
* Fix warning about empty C function parameters
* Fix compiler warnings
* Properly implement Vulkan backend buffer handling
* Fix oversized host staging buffers
* Simplify barrier synchronization calls
* Fix gcc warnings
* Implement max_size for backend buffer types to limit the size of a single allocation
* Use min of maxMemoryAllocationSize and maxBufferSize for device max allocation size
* refactor multi buf
* Disable unsupported ops to fix tests
* Check for maintenance4 support before using it
* Handle devices with only a single queue
* Fix single queue logic
* propagate buffer usage in multi buffers
* Implement rope_neox op
* Cleanup header and other files
* Simplify gpu_extras by removing events and putting staging memcpys into contexts
* Move queue into context
Add not-yet-enabled async backend ops
* Simplify context use, optimize matmul shader for warp size 64 (AMD GCN), fix split_k matmul shader optimization
* Add get_max_size to SYCL backend.
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* llama : fix trailing whitespace
---------
Co-authored-by: Henri Vasserman <henv@hot.ee>
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com>
Co-authored-by: slaren <slarengh@gmail.com>
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
2024-01-28 17:03:59 +00:00
for ( size_t i = 0 ; i < ctx - > n_buffers ; i + + ) {
ggml_backend_buffer_set_usage ( ctx - > buffers [ i ] , usage ) ;
}
}
2024-02-12 07:16:06 +00:00
// creates a copy of the tensor with the same memory layout
static struct ggml_tensor * ggml_dup_tensor_layout ( struct ggml_context * ctx , const struct ggml_tensor * tensor ) {
struct ggml_tensor * dup = ggml_dup_tensor ( ctx , tensor ) ;
for ( int i = 0 ; i < GGML_MAX_DIMS ; i + + ) {
dup - > nb [ i ] = tensor - > nb [ i ] ;
}
return dup ;
}
static bool ggml_is_view_op ( enum ggml_op op ) {
return op = = GGML_OP_VIEW | | op = = GGML_OP_RESHAPE | | op = = GGML_OP_PERMUTE | | op = = GGML_OP_TRANSPOSE ;
}
2023-12-07 20:26:54 +00:00
2023-11-13 12:16:23 +00:00
// scheduler
2024-03-13 17:54:21 +00:00
# ifndef GGML_SCHED_MAX_BACKENDS
# define GGML_SCHED_MAX_BACKENDS 16
# endif
# ifndef GGML_SCHED_MAX_SPLIT_INPUTS
2024-03-18 15:33:44 +00:00
# define GGML_SCHED_MAX_SPLIT_INPUTS GGML_MAX_SRC
2024-03-13 17:54:21 +00:00
# endif
# ifndef GGML_SCHED_MAX_COPIES
# define GGML_SCHED_MAX_COPIES 4
# endif
2023-11-13 12:16:23 +00:00
struct ggml_backend_sched_split {
2024-02-12 07:16:06 +00:00
int backend_id ;
2023-11-13 12:16:23 +00:00
int i_start ;
int i_end ;
2024-03-13 17:54:21 +00:00
struct ggml_tensor * inputs [ GGML_SCHED_MAX_SPLIT_INPUTS ] ;
2023-11-13 12:16:23 +00:00
int n_inputs ;
2024-01-12 19:07:38 +00:00
// graph view of this split
2023-12-07 20:26:54 +00:00
struct ggml_cgraph graph ;
2023-11-13 12:16:23 +00:00
} ;
struct ggml_backend_sched {
2024-01-12 19:07:38 +00:00
bool is_reset ; // true if the scheduler has been reset since the last graph split
2024-03-13 17:54:21 +00:00
bool is_alloc ;
2024-01-12 19:07:38 +00:00
2023-11-13 12:16:23 +00:00
int n_backends ;
2024-03-13 17:54:21 +00:00
ggml_backend_t backends [ GGML_SCHED_MAX_BACKENDS ] ;
ggml_backend_buffer_type_t bufts [ GGML_SCHED_MAX_BACKENDS ] ;
2023-11-13 12:16:23 +00:00
ggml_gallocr_t galloc ;
2024-07-27 02:41:55 +00:00
// hash map of the nodes in the graph
struct ggml_hash_set hash_set ;
int * hv_tensor_backend_ids ; // [hash_set.size]
struct ggml_tensor * * hv_tensor_copies ; // [hash_set.size][n_backends][n_copies]
2024-02-12 07:16:06 +00:00
2024-03-13 17:54:21 +00:00
int * node_backend_ids ; // [graph_size]
int * leaf_backend_ids ; // [graph_size]
2023-11-13 12:16:23 +00:00
2024-06-13 01:11:35 +00:00
int * prev_node_backend_ids ; // [graph_size]
int * prev_leaf_backend_ids ; // [graph_size]
2024-01-12 19:07:38 +00:00
// copy of the graph with modified inputs
2024-07-27 02:41:55 +00:00
struct ggml_cgraph graph ;
2024-01-12 19:07:38 +00:00
2024-03-13 17:54:21 +00:00
// graph splits
2024-03-18 10:03:04 +00:00
struct ggml_backend_sched_split * splits ;
2023-11-13 12:16:23 +00:00
int n_splits ;
2024-03-18 10:03:04 +00:00
int splits_capacity ;
2023-11-13 12:16:23 +00:00
2024-03-13 17:54:21 +00:00
// pipeline parallelism support
int n_copies ;
int cur_copy ;
ggml_backend_event_t events [ GGML_SCHED_MAX_BACKENDS ] [ GGML_SCHED_MAX_COPIES ] ;
struct ggml_tensor * graph_inputs [ GGML_SCHED_MAX_SPLIT_INPUTS ] ;
int n_graph_inputs ;
2023-11-13 12:16:23 +00:00
struct ggml_context * ctx ;
2024-02-12 07:16:06 +00:00
ggml_backend_sched_eval_callback callback_eval ;
void * callback_eval_user_data ;
2024-07-27 02:41:55 +00:00
char * context_buffer ;
size_t context_buffer_size ;
2024-06-13 01:11:35 +00:00
2024-07-27 02:41:55 +00:00
bool debug ;
2023-11-13 12:16:23 +00:00
} ;
2024-07-27 02:41:55 +00:00
# define hash_id(tensor) ggml_hash_find_or_insert(&sched->hash_set, tensor)
# define tensor_backend_id(tensor) sched->hv_tensor_backend_ids[hash_id(tensor)]
# define tensor_id_copy(id, backend_id, copy_id) sched->hv_tensor_copies[(id) * sched->n_backends * sched->n_copies + (backend_id) * sched->n_copies + (copy_id)]
# define tensor_copy(tensor, backend_id, copy_id) tensor_id_copy(hash_id(tensor), backend_id, copy_id)
2023-11-13 12:16:23 +00:00
2024-02-12 07:16:06 +00:00
// returns the priority of the backend, lower id is higher priority
static int ggml_backend_sched_backend_id ( ggml_backend_sched_t sched , ggml_backend_t backend ) {
2023-11-13 12:16:23 +00:00
for ( int i = 0 ; i < sched - > n_backends ; i + + ) {
if ( sched - > backends [ i ] = = backend ) {
return i ;
}
}
2024-02-12 07:16:06 +00:00
return - 1 ;
2023-11-13 12:16:23 +00:00
}
2024-06-13 01:11:35 +00:00
static int ggml_backend_sched_backend_from_buffer ( ggml_backend_sched_t sched , const struct ggml_tensor * tensor , const struct ggml_tensor * op ) {
2024-03-13 17:54:21 +00:00
ggml_backend_buffer_t buffer = tensor - > buffer ;
2023-12-07 20:26:54 +00:00
if ( buffer = = NULL ) {
2024-02-12 07:16:06 +00:00
return - 1 ;
2024-01-12 19:07:38 +00:00
}
2024-06-13 01:11:35 +00:00
// find highest prio backend that supports the buffer type and the op
2023-12-07 20:26:54 +00:00
for ( int i = 0 ; i < sched - > n_backends ; i + + ) {
2024-06-13 01:11:35 +00:00
if ( ggml_backend_supports_buft ( sched - > backends [ i ] , buffer - > buft ) & &
ggml_backend_supports_op ( sched - > backends [ i ] , op ) ) {
2024-02-12 07:16:06 +00:00
return i ;
2023-12-07 20:26:54 +00:00
}
}
2024-03-13 17:54:21 +00:00
2024-06-13 01:11:35 +00:00
# ifndef NDEBUG
2024-10-11 13:34:45 +00:00
GGML_LOG_DEBUG ( " %s: warning: no backend supports op %s with a weight with buffer type %s used in tensor %s, the weight will need to be copied \n " ,
2024-06-13 01:11:35 +00:00
__func__ , ggml_op_desc ( tensor ) , ggml_backend_buffer_name ( buffer ) , tensor - > name ) ;
# endif
2024-03-13 17:54:21 +00:00
return - 1 ;
2023-12-07 20:26:54 +00:00
}
#if 0
2024-08-16 02:22:55 +00:00
# define GGML_SCHED_MAX_SPLITS_DEBUG 4096
static char causes [ GGML_DEFAULT_GRAPH_SIZE * 16 + GGML_SCHED_MAX_SPLITS_DEBUG * GGML_SCHED_MAX_SPLIT_INPUTS ] [ 128 ] ; // debug only
2023-12-07 20:26:54 +00:00
# define SET_CAUSE(node, ...) sprintf(causes[hash_id(node)], __VA_ARGS__)
# define GET_CAUSE(node) causes[hash_id(node)]
# else
# define SET_CAUSE(node, ...)
# define GET_CAUSE(node) ""
# endif
2023-11-13 12:16:23 +00:00
// returns the backend that should be used for the node based on the current locations
2024-02-12 07:16:06 +00:00
static int ggml_backend_sched_backend_id_from_cur ( ggml_backend_sched_t sched , struct ggml_tensor * tensor ) {
// TODO: use supports_op to check if the backend supports the op
2024-01-12 19:07:38 +00:00
// assign pre-allocated nodes to their backend
2024-06-13 01:11:35 +00:00
int cur_backend_id = ggml_backend_sched_backend_from_buffer ( sched , tensor , tensor ) ;
2024-03-18 10:03:04 +00:00
if ( cur_backend_id ! = - 1 ) {
2024-03-13 17:54:21 +00:00
SET_CAUSE ( tensor , " 1.dst " ) ;
2024-03-18 10:03:04 +00:00
return cur_backend_id ;
2023-11-13 12:16:23 +00:00
}
2024-03-13 17:54:21 +00:00
2023-11-13 12:16:23 +00:00
// view_src
2024-02-12 07:16:06 +00:00
if ( tensor - > view_src ! = NULL ) {
2024-06-13 01:11:35 +00:00
cur_backend_id = ggml_backend_sched_backend_from_buffer ( sched , tensor - > view_src , tensor ) ;
2024-03-18 10:03:04 +00:00
if ( cur_backend_id ! = - 1 ) {
2024-03-13 17:54:21 +00:00
SET_CAUSE ( tensor , " 1.vsrc " ) ;
2024-03-18 10:03:04 +00:00
return cur_backend_id ;
2024-01-12 19:07:38 +00:00
}
2023-11-13 12:16:23 +00:00
}
2024-03-13 17:54:21 +00:00
2024-09-05 09:13:11 +00:00
if ( tensor - > buffer | | ( tensor - > view_src & & tensor - > view_src - > buffer ) ) {
// since the tensor is pre-allocated, it cannot be moved to another backend
GGML_ABORT ( " pre-allocated tensor in a backend that cannot run the operation " ) ;
}
2024-03-18 10:03:04 +00:00
// graph input
2024-03-13 17:54:21 +00:00
if ( tensor - > flags & GGML_TENSOR_FLAG_INPUT ) {
2024-03-18 10:03:04 +00:00
cur_backend_id = sched - > n_backends - 1 ; // last backend (assumed CPU)
2024-03-13 17:54:21 +00:00
SET_CAUSE ( tensor , " 1.inp " ) ;
2024-03-18 10:03:04 +00:00
return cur_backend_id ;
2024-03-13 17:54:21 +00:00
}
2024-03-18 10:03:04 +00:00
// operations with weights are preferably run on the same backend as the weights
2023-11-13 12:16:23 +00:00
for ( int i = 0 ; i < GGML_MAX_SRC ; i + + ) {
2024-02-12 07:16:06 +00:00
const struct ggml_tensor * src = tensor - > src [ i ] ;
2023-11-13 12:16:23 +00:00
if ( src = = NULL ) {
2024-02-17 21:04:16 +00:00
continue ;
2023-11-13 12:16:23 +00:00
}
2024-01-12 19:07:38 +00:00
if ( src - > buffer ! = NULL & & src - > buffer - > usage = = GGML_BACKEND_BUFFER_USAGE_WEIGHTS ) {
2024-06-13 01:11:35 +00:00
int src_backend_id = ggml_backend_sched_backend_from_buffer ( sched , src , tensor ) ;
2024-03-18 10:03:04 +00:00
// check if a backend with higher prio wants to offload the op
if ( src_backend_id = = sched - > n_backends - 1 ) {
for ( int b = 0 ; b < src_backend_id ; b + + ) {
2024-06-17 14:51:42 +00:00
if ( ggml_backend_supports_op ( sched - > backends [ b ] , tensor ) & & ggml_backend_offload_op ( sched - > backends [ b ] , tensor ) ) {
2024-03-18 10:03:04 +00:00
SET_CAUSE ( tensor , " 1.off " ) ;
return b ;
}
}
}
2024-03-13 17:54:21 +00:00
SET_CAUSE ( tensor , " 1.wgt%d " , i ) ;
2024-03-18 10:03:04 +00:00
return src_backend_id ;
2023-11-13 12:16:23 +00:00
}
}
2024-01-12 19:07:38 +00:00
2024-02-12 07:16:06 +00:00
return - 1 ;
2023-11-13 12:16:23 +00:00
}
static char * fmt_size ( size_t size ) {
static char buffer [ 128 ] ;
if ( size > = 1024 * 1024 ) {
2024-04-25 14:24:07 +00:00
snprintf ( buffer , sizeof ( buffer ) , " %zuM " , size / 1024 / 1024 ) ;
2023-11-13 12:16:23 +00:00
} else {
2024-04-25 14:24:07 +00:00
snprintf ( buffer , sizeof ( buffer ) , " %zuK " , size / 1024 ) ;
2023-11-13 12:16:23 +00:00
}
return buffer ;
}
2024-02-12 07:16:06 +00:00
static void ggml_backend_sched_print_assignments ( ggml_backend_sched_t sched , struct ggml_cgraph * graph ) {
2023-11-13 12:16:23 +00:00
int cur_split = 0 ;
for ( int i = 0 ; i < graph - > n_nodes ; i + + ) {
if ( cur_split < sched - > n_splits & & i = = sched - > splits [ cur_split ] . i_start ) {
2024-02-12 07:16:06 +00:00
ggml_backend_t split_backend = sched - > backends [ sched - > splits [ cur_split ] . backend_id ] ;
2024-10-11 13:34:45 +00:00
GGML_LOG_DEBUG ( " \n ## SPLIT #%d: %s # %d inputs: " , cur_split , ggml_backend_name ( split_backend ) ,
2023-12-07 20:26:54 +00:00
sched - > splits [ cur_split ] . n_inputs ) ;
2023-11-13 12:16:23 +00:00
for ( int j = 0 ; j < sched - > splits [ cur_split ] . n_inputs ; j + + ) {
2024-10-11 13:34:45 +00:00
GGML_LOG_DEBUG ( " [%s (%5.5s)] " , sched - > splits [ cur_split ] . inputs [ j ] - > name ,
2023-12-07 20:26:54 +00:00
fmt_size ( ggml_nbytes ( sched - > splits [ cur_split ] . inputs [ j ] ) ) ) ;
2023-11-13 12:16:23 +00:00
}
2024-10-11 13:34:45 +00:00
GGML_LOG_DEBUG ( " \n " ) ;
2023-11-13 12:16:23 +00:00
cur_split + + ;
}
struct ggml_tensor * node = graph - > nodes [ i ] ;
if ( ggml_is_view_op ( node - > op ) ) {
continue ;
}
2024-03-13 17:54:21 +00:00
ggml_backend_t tensor_backend = ggml_backend_sched_get_tensor_backend ( sched , node ) ;
2024-10-11 13:34:45 +00:00
GGML_LOG_DEBUG ( " node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s]: " , i , ggml_op_name ( node - > op ) , node - > name ,
2024-02-12 07:16:06 +00:00
fmt_size ( ggml_nbytes ( node ) ) , tensor_backend ? ggml_backend_name ( tensor_backend ) : " NULL " , GET_CAUSE ( node ) ) ;
2023-11-13 12:16:23 +00:00
for ( int j = 0 ; j < GGML_MAX_SRC ; j + + ) {
struct ggml_tensor * src = node - > src [ j ] ;
if ( src = = NULL ) {
2024-02-17 21:04:16 +00:00
continue ;
2023-11-13 12:16:23 +00:00
}
2024-03-13 17:54:21 +00:00
ggml_backend_t src_backend = ggml_backend_sched_get_tensor_backend ( sched , src ) ;
2024-10-11 13:34:45 +00:00
GGML_LOG_DEBUG ( " %20.20s (%5.5s) [%5.5s %8.8s] " , src - > name ,
2023-12-07 20:26:54 +00:00
fmt_size ( ggml_nbytes ( src ) ) , src_backend ? ggml_backend_name ( src_backend ) : " NULL " , GET_CAUSE ( src ) ) ;
2023-11-13 12:16:23 +00:00
}
2024-10-11 13:34:45 +00:00
GGML_LOG_DEBUG ( " \n " ) ;
2023-11-13 12:16:23 +00:00
}
}
2024-06-13 01:11:35 +00:00
static bool ggml_backend_sched_buffer_supported ( ggml_backend_sched_t sched , struct ggml_tensor * t , int backend_id ) {
ggml_backend_buffer_t buf = t - > view_src ? t - > view_src - > buffer : t - > buffer ;
ggml_backend_buffer_type_t buft = NULL ;
if ( buf ) {
// the tensor is already allocated
buft = buf - > buft ;
} else {
// see if the tensor already has a backend assigned, and use the buffer type of that backend
int tensor_backend_id = tensor_backend_id ( t ) ;
if ( tensor_backend_id = = - 1 & & t - > view_src ) {
tensor_backend_id = tensor_backend_id ( t - > view_src ) ;
}
if ( tensor_backend_id ! = - 1 ) {
buft = sched - > bufts [ tensor_backend_id ] ;
}
}
return buft ! = NULL & & ggml_backend_supports_buft ( sched - > backends [ backend_id ] , buft ) ;
}
static void ggml_backend_sched_set_if_supported ( ggml_backend_sched_t sched , struct ggml_tensor * node , int cur_backend_id , int * node_backend_id ) {
if ( ggml_backend_supports_op ( sched - > backends [ cur_backend_id ] , node ) ) {
* node_backend_id = cur_backend_id ;
SET_CAUSE ( node , " 2.sup " ) ;
}
}
2024-01-12 19:07:38 +00:00
2023-11-13 12:16:23 +00:00
// assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend
2024-02-12 07:16:06 +00:00
static void ggml_backend_sched_split_graph ( ggml_backend_sched_t sched , struct ggml_cgraph * graph ) {
2024-01-12 19:07:38 +00:00
// reset splits
2023-11-13 12:16:23 +00:00
sched - > n_splits = 0 ;
2024-03-13 17:54:21 +00:00
sched - > n_graph_inputs = 0 ;
2024-01-12 19:07:38 +00:00
sched - > is_reset = false ;
2023-11-13 12:16:23 +00:00
struct ggml_init_params params = {
2024-07-27 02:41:55 +00:00
/* .mem_size = */ sched - > context_buffer_size ,
2023-12-07 20:26:54 +00:00
/* .mem_buffer = */ sched - > context_buffer ,
/* .no_alloc = */ true
2023-11-13 12:16:23 +00:00
} ;
2024-01-12 19:07:38 +00:00
ggml_free ( sched - > ctx ) ;
2023-11-13 12:16:23 +00:00
sched - > ctx = ggml_init ( params ) ;
2024-01-12 19:07:38 +00:00
if ( sched - > ctx = = NULL ) {
2024-07-27 02:41:55 +00:00
GGML_ABORT ( " %s: failed to initialize context \n " , __func__ ) ;
2024-01-12 19:07:38 +00:00
}
2023-11-13 12:16:23 +00:00
2024-01-12 19:07:38 +00:00
// pass 1: assign backends to ops with pre-allocated inputs
2023-11-13 12:16:23 +00:00
for ( int i = 0 ; i < graph - > n_leafs ; i + + ) {
struct ggml_tensor * leaf = graph - > leafs [ i ] ;
2024-03-18 10:03:04 +00:00
int * leaf_backend_id = & tensor_backend_id ( leaf ) ;
2024-07-27 02:41:55 +00:00
// do not overwrite user assignments
if ( * leaf_backend_id = = - 1 ) {
* leaf_backend_id = ggml_backend_sched_backend_id_from_cur ( sched , leaf ) ;
2023-11-13 12:16:23 +00:00
}
}
for ( int i = 0 ; i < graph - > n_nodes ; i + + ) {
struct ggml_tensor * node = graph - > nodes [ i ] ;
2024-03-18 10:03:04 +00:00
int * node_backend_id = & tensor_backend_id ( node ) ;
2024-07-27 02:41:55 +00:00
// do not overwrite user assignments
if ( * node_backend_id = = - 1 ) {
* node_backend_id = ggml_backend_sched_backend_id_from_cur ( sched , node ) ;
#if 0
// src
if ( node - > op = = GGML_OP_NONE ) {
2024-02-17 21:04:16 +00:00
continue ;
2024-01-12 19:07:38 +00:00
}
2024-07-27 02:41:55 +00:00
for ( int j = 0 ; j < GGML_MAX_SRC ; j + + ) {
struct ggml_tensor * src = node - > src [ j ] ;
if ( src = = NULL ) {
continue ;
}
int * src_backend_id = & tensor_backend_id ( src ) ;
if ( * src_backend_id = = - 1 ) {
* src_backend_id = ggml_backend_sched_backend_id_from_cur ( sched , src ) ;
}
2024-01-12 19:07:38 +00:00
}
2024-07-27 02:41:55 +00:00
# endif
2023-11-13 12:16:23 +00:00
}
}
2024-01-12 19:07:38 +00:00
// pass 2: expand current backend assignments
// assign the same backend to adjacent nodes
// expand gpu backends (i.e. non last prio) up and down, ignoring cpu (the lowest priority backend)
// thus, cpu will never be used unless weights are on cpu, or there are no gpu ops between cpu ops
2024-06-13 01:11:35 +00:00
// ops unsupported by the backend being expanded will be left unassigned so that they can be assigned later when the locations of its inputs are known
// expand gpu down
2024-01-12 19:07:38 +00:00
{
2024-02-12 07:16:06 +00:00
int cur_backend_id = - 1 ;
2024-03-13 17:54:21 +00:00
for ( int i = 0 ; i < graph - > n_nodes ; i + + ) {
2024-01-12 19:07:38 +00:00
struct ggml_tensor * node = graph - > nodes [ i ] ;
if ( ggml_is_view_op ( node - > op ) ) {
continue ;
}
2024-03-18 10:03:04 +00:00
int * node_backend_id = & tensor_backend_id ( node ) ;
if ( * node_backend_id ! = - 1 ) {
if ( * node_backend_id = = sched - > n_backends - 1 ) {
2024-01-12 19:07:38 +00:00
// skip cpu (lowest prio backend)
2024-02-12 07:16:06 +00:00
cur_backend_id = - 1 ;
2024-01-12 19:07:38 +00:00
} else {
2024-03-18 10:03:04 +00:00
cur_backend_id = * node_backend_id ;
2023-11-13 12:16:23 +00:00
}
2024-06-13 01:11:35 +00:00
} else if ( cur_backend_id ! = - 1 ) {
ggml_backend_sched_set_if_supported ( sched , node , cur_backend_id , node_backend_id ) ;
2024-01-12 19:07:38 +00:00
}
}
}
2024-06-13 01:11:35 +00:00
// expand gpu up
2024-01-12 19:07:38 +00:00
{
2024-02-12 07:16:06 +00:00
int cur_backend_id = - 1 ;
2024-03-13 17:54:21 +00:00
for ( int i = graph - > n_nodes - 1 ; i > = 0 ; i - - ) {
2024-01-12 19:07:38 +00:00
struct ggml_tensor * node = graph - > nodes [ i ] ;
if ( ggml_is_view_op ( node - > op ) ) {
continue ;
}
2024-03-18 10:03:04 +00:00
int * node_backend_id = & tensor_backend_id ( node ) ;
if ( * node_backend_id ! = - 1 ) {
if ( * node_backend_id = = sched - > n_backends - 1 ) {
2024-01-12 19:07:38 +00:00
// skip cpu (lowest prio backend)
2024-02-12 07:16:06 +00:00
cur_backend_id = - 1 ;
2024-01-12 19:07:38 +00:00
} else {
2024-03-18 10:03:04 +00:00
cur_backend_id = * node_backend_id ;
2023-11-13 12:16:23 +00:00
}
2024-06-13 01:11:35 +00:00
} else if ( cur_backend_id ! = - 1 ) {
ggml_backend_sched_set_if_supported ( sched , node , cur_backend_id , node_backend_id ) ;
2023-11-13 12:16:23 +00:00
}
2024-01-12 19:07:38 +00:00
}
}
2024-06-13 01:11:35 +00:00
// expand rest down
2024-01-12 19:07:38 +00:00
{
2024-02-12 07:16:06 +00:00
int cur_backend_id = - 1 ;
2024-03-13 17:54:21 +00:00
for ( int i = 0 ; i < graph - > n_nodes ; i + + ) {
2024-01-12 19:07:38 +00:00
struct ggml_tensor * node = graph - > nodes [ i ] ;
if ( ggml_is_view_op ( node - > op ) ) {
continue ;
}
2024-03-18 10:03:04 +00:00
int * node_backend_id = & tensor_backend_id ( node ) ;
if ( * node_backend_id ! = - 1 ) {
cur_backend_id = * node_backend_id ;
2024-06-13 01:11:35 +00:00
} else if ( cur_backend_id ! = - 1 ) {
ggml_backend_sched_set_if_supported ( sched , node , cur_backend_id , node_backend_id ) ;
2023-11-13 12:16:23 +00:00
}
}
}
2024-06-13 01:11:35 +00:00
// expand rest up
2024-01-12 19:38:34 +00:00
{
2024-02-12 07:16:06 +00:00
int cur_backend_id = - 1 ;
2024-03-13 17:54:21 +00:00
for ( int i = graph - > n_nodes - 1 ; i > = 0 ; i - - ) {
2024-01-12 19:38:34 +00:00
struct ggml_tensor * node = graph - > nodes [ i ] ;
if ( ggml_is_view_op ( node - > op ) ) {
continue ;
}
2024-03-18 10:03:04 +00:00
int * node_backend_id = & tensor_backend_id ( node ) ;
if ( * node_backend_id ! = - 1 ) {
cur_backend_id = * node_backend_id ;
2024-06-13 01:11:35 +00:00
} else if ( cur_backend_id ! = - 1 ) {
ggml_backend_sched_set_if_supported ( sched , node , cur_backend_id , node_backend_id ) ;
2024-01-12 19:38:34 +00:00
}
}
}
2024-03-13 17:54:21 +00:00
2024-06-13 01:11:35 +00:00
// pass 3: upgrade nodes to higher prio backends with compatible buffer types
// if the tensor is already in the same buffer type (*) as another higher priority backend, we should move it there
// however, we also need to verify that the sources are in compatible buffer types
// (*) the actual requirement is more relaxed, the buffer type of the backend should be supported by all the users of this tensor further down the graph
// however, this is slow to verify, so we have a more strict requirement that the buffer type is the same
// this is not uncommon since multiple backends can use host memory, with the same buffer type (eg. BLAS and CPU)
// additionally, set remaining unassigned nodes to the backend with the most supported inputs
// only nodes that could not be assigned during expansion due to the backend not supporting the op should be unassigned at this point
for ( int i = 0 ; i < graph - > n_nodes ; i + + ) {
struct ggml_tensor * node = graph - > nodes [ i ] ;
if ( ggml_is_view_op ( node - > op ) ) {
continue ;
}
int * node_backend_id = & tensor_backend_id ( node ) ;
if ( * node_backend_id = = - 1 ) {
// unassigned node: find the backend with the most supported inputs
int n_supported_best = - 1 ;
for ( int b = 0 ; b < sched - > n_backends ; b + + ) {
if ( ggml_backend_supports_op ( sched - > backends [ b ] , node ) ) {
int n_supported = 0 ;
for ( int j = 0 ; j < GGML_MAX_SRC ; j + + ) {
struct ggml_tensor * src = node - > src [ j ] ;
if ( src = = NULL ) {
continue ;
}
if ( ( tensor_backend_id ( src ) ! = - 1 | | tensor_backend_id ( src - > view_src ) ! = - 1 ) & & ggml_backend_sched_buffer_supported ( sched , src , b ) ) {
n_supported + + ;
}
}
if ( n_supported > n_supported_best ) {
n_supported_best = n_supported ;
* node_backend_id = b ;
SET_CAUSE ( node , " 3.best " ) ;
}
}
}
} else {
// assigned node: upgrade to higher prio backend if possible
for ( int b = 0 ; b < * node_backend_id ; b + + ) {
if ( sched - > bufts [ b ] = = sched - > bufts [ * node_backend_id ] & & ggml_backend_supports_op ( sched - > backends [ b ] , node ) ) {
bool supported = true ;
for ( int j = 0 ; j < GGML_MAX_SRC ; j + + ) {
struct ggml_tensor * src = node - > src [ j ] ;
if ( src = = NULL ) {
continue ;
}
if ( ! ggml_backend_sched_buffer_supported ( sched , src , b ) ) {
supported = false ;
break ;
}
}
if ( supported ) {
* node_backend_id = b ;
SET_CAUSE ( node , " 3.upg " ) ;
break ;
}
}
}
}
}
2023-11-13 12:16:23 +00:00
2024-06-13 01:11:35 +00:00
// pass 4: assign backends to remaining src from dst and view_src
2023-11-13 12:16:23 +00:00
for ( int i = 0 ; i < graph - > n_nodes ; i + + ) {
struct ggml_tensor * node = graph - > nodes [ i ] ;
2024-03-18 10:03:04 +00:00
int * cur_backend_id = & tensor_backend_id ( node ) ;
if ( node - > view_src ! = NULL & & * cur_backend_id = = - 1 ) {
* cur_backend_id = tensor_backend_id ( node - > view_src ) ;
2024-06-13 01:11:35 +00:00
SET_CAUSE ( node , " 4.vsrc " ) ;
2024-01-12 19:07:38 +00:00
}
2023-11-13 12:16:23 +00:00
for ( int j = 0 ; j < GGML_MAX_SRC ; j + + ) {
struct ggml_tensor * src = node - > src [ j ] ;
if ( src = = NULL ) {
2024-02-17 21:04:16 +00:00
continue ;
2023-11-13 12:16:23 +00:00
}
2024-03-18 10:03:04 +00:00
int * src_backend_id = & tensor_backend_id ( src ) ;
if ( * src_backend_id = = - 1 ) {
2024-01-12 19:07:38 +00:00
if ( src - > view_src ! = NULL ) {
// views are always on the same backend as the source
2024-03-18 10:03:04 +00:00
* src_backend_id = tensor_backend_id ( src - > view_src ) ;
2024-06-13 01:11:35 +00:00
SET_CAUSE ( src , " 4.vsrc " ) ;
2024-01-12 19:07:38 +00:00
} else {
2024-03-18 10:03:04 +00:00
* src_backend_id = * cur_backend_id ;
2024-06-13 01:11:35 +00:00
SET_CAUSE ( src , " 4.cur " ) ;
2024-01-12 19:07:38 +00:00
}
2023-11-13 12:16:23 +00:00
}
}
}
2024-07-27 02:41:55 +00:00
// pass 5: split graph, find tensors that need to be copied
2024-01-12 19:07:38 +00:00
{
2024-03-18 10:03:04 +00:00
int i_split = 0 ;
struct ggml_backend_sched_split * split = & sched - > splits [ 0 ] ;
2024-01-12 19:07:38 +00:00
// find the backend of the first split, skipping view ops
2024-07-27 02:41:55 +00:00
int i = 0 ;
for ( ; i < graph - > n_nodes ; i + + ) {
2024-01-12 19:07:38 +00:00
struct ggml_tensor * node = graph - > nodes [ i ] ;
if ( ! ggml_is_view_op ( node - > op ) ) {
2024-03-18 10:03:04 +00:00
split - > backend_id = tensor_backend_id ( node ) ;
2024-01-12 19:07:38 +00:00
break ;
}
2023-11-13 12:16:23 +00:00
}
2024-03-18 10:03:04 +00:00
split - > i_start = 0 ;
split - > n_inputs = 0 ;
int cur_backend_id = split - > backend_id ;
2024-07-27 02:41:55 +00:00
for ( ; i < graph - > n_nodes ; i + + ) {
2024-01-12 19:07:38 +00:00
struct ggml_tensor * node = graph - > nodes [ i ] ;
if ( ggml_is_view_op ( node - > op ) ) {
continue ;
}
2023-11-13 12:16:23 +00:00
2024-03-18 10:03:04 +00:00
const int node_backend_id = tensor_backend_id ( node ) ;
2024-01-12 19:07:38 +00:00
2024-07-27 02:41:55 +00:00
assert ( node_backend_id ! = - 1 ) ; // all nodes should be assigned by now
2024-01-12 19:38:34 +00:00
2024-03-18 10:03:04 +00:00
// check if we should start a new split based on the sources of the current node
bool need_new_split = false ;
if ( node_backend_id = = cur_backend_id & & split - > n_inputs > 0 ) {
for ( int j = 0 ; j < GGML_MAX_SRC ; j + + ) {
struct ggml_tensor * src = node - > src [ j ] ;
if ( src = = NULL ) {
continue ;
}
// check if a weight is on a different backend
// by starting a new split, the memory of the previously offloaded weights can be reused
if ( src - > buffer ! = NULL & & src - > buffer - > usage = = GGML_BACKEND_BUFFER_USAGE_WEIGHTS ) {
int src_backend_id = tensor_backend_id ( src ) ;
2024-07-27 02:41:55 +00:00
if ( src_backend_id ! = cur_backend_id ) {
2024-03-18 10:03:04 +00:00
need_new_split = true ;
break ;
}
}
// check if the split has too many inputs
2024-06-13 01:11:35 +00:00
// FIXME: count the number of inputs instead of only checking when full
2024-03-18 10:03:04 +00:00
if ( split - > n_inputs = = GGML_SCHED_MAX_SPLIT_INPUTS ) {
const size_t id = hash_id ( src ) ;
2024-07-27 02:41:55 +00:00
int src_backend_id = sched - > hv_tensor_backend_ids [ id ] ;
2024-06-13 01:11:35 +00:00
bool supported = ggml_backend_sched_buffer_supported ( sched , src , cur_backend_id ) ;
2024-07-27 02:41:55 +00:00
if ( src_backend_id ! = cur_backend_id & & tensor_id_copy ( id , cur_backend_id , 0 ) = = NULL & & ! supported ) {
2024-03-18 10:03:04 +00:00
//printf("starting new split because of too many inputs: node %s, input %s\n", node->name, src->name);
need_new_split = true ;
break ;
}
}
}
}
if ( node_backend_id ! = cur_backend_id | | need_new_split ) {
split - > i_end = i ;
i_split + + ;
if ( i_split > = sched - > splits_capacity ) {
sched - > splits_capacity * = 2 ;
2024-10-02 23:49:47 +00:00
sched - > splits = ( ggml_backend_sched_split * )
realloc ( sched - > splits , sched - > splits_capacity * sizeof ( struct ggml_backend_sched_split ) ) ;
2024-03-18 10:03:04 +00:00
GGML_ASSERT ( sched - > splits ! = NULL ) ;
}
split = & sched - > splits [ i_split ] ;
split - > backend_id = node_backend_id ;
split - > i_start = i ;
split - > n_inputs = 0 ;
cur_backend_id = node_backend_id ;
2024-01-12 19:07:38 +00:00
}
2023-11-13 12:16:23 +00:00
2024-01-12 19:07:38 +00:00
// find inputs that are not on the same backend
for ( int j = 0 ; j < GGML_MAX_SRC ; j + + ) {
struct ggml_tensor * src = node - > src [ j ] ;
if ( src = = NULL ) {
2024-02-17 21:04:16 +00:00
continue ;
2024-01-12 19:07:38 +00:00
}
2024-03-13 17:54:21 +00:00
2024-07-27 02:41:55 +00:00
size_t src_id = hash_id ( src ) ;
const int src_backend_id = sched - > hv_tensor_backend_ids [ src_id ] ;
2024-02-12 07:16:06 +00:00
assert ( src_backend_id ! = - 1 ) ; // all inputs should be assigned by now
2024-03-13 17:54:21 +00:00
2024-06-13 01:11:35 +00:00
if ( src - > flags & GGML_TENSOR_FLAG_INPUT & & sched - > n_copies > 1 ) {
2024-07-27 02:41:55 +00:00
if ( tensor_id_copy ( src_id , src_backend_id , 0 ) = = NULL ) {
2024-03-13 17:54:21 +00:00
ggml_backend_t backend = sched - > backends [ src_backend_id ] ;
for ( int c = 0 ; c < sched - > n_copies ; c + + ) {
struct ggml_tensor * tensor_copy ;
if ( c = = sched - > cur_copy ) {
tensor_copy = src ; // use the original tensor as the current copy
} else {
tensor_copy = ggml_dup_tensor_layout ( sched - > ctx , src ) ;
ggml_format_name ( tensor_copy , " %s#%s#%d " , ggml_backend_name ( backend ) , src - > name , c ) ;
}
if ( sched - > n_copies > 1 ) {
ggml_set_input ( tensor_copy ) ;
ggml_set_output ( tensor_copy ) ; // prevent ggml-alloc from overwriting the tensor
}
2024-07-27 02:41:55 +00:00
tensor_id_copy ( src_id , src_backend_id , c ) = tensor_copy ;
2024-03-13 17:54:21 +00:00
SET_CAUSE ( tensor_copy , " 4.cpy " ) ;
}
int n_graph_inputs = sched - > n_graph_inputs + + ;
GGML_ASSERT ( n_graph_inputs < GGML_SCHED_MAX_SPLIT_INPUTS ) ;
sched - > graph_inputs [ n_graph_inputs ] = src ;
}
}
2024-07-27 02:41:55 +00:00
if ( src_backend_id ! = cur_backend_id & & ! ggml_backend_sched_buffer_supported ( sched , src , cur_backend_id ) ) {
2024-01-20 15:05:49 +00:00
// create a copy of the input in the split's backend
2024-07-27 02:41:55 +00:00
if ( tensor_id_copy ( src_id , cur_backend_id , 0 ) = = NULL ) {
2024-02-12 07:16:06 +00:00
ggml_backend_t backend = sched - > backends [ cur_backend_id ] ;
2024-03-13 17:54:21 +00:00
for ( int c = 0 ; c < sched - > n_copies ; c + + ) {
struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout ( sched - > ctx , src ) ;
ggml_format_name ( tensor_copy , " %s#%s#%d " , ggml_backend_name ( backend ) , src - > name , c ) ;
if ( sched - > n_copies > 1 ) {
ggml_set_input ( tensor_copy ) ;
ggml_set_output ( tensor_copy ) ; // prevent ggml-alloc from overwriting the tensor
}
2024-07-27 02:41:55 +00:00
tensor_id_copy ( src_id , cur_backend_id , c ) = tensor_copy ;
2024-03-13 17:54:21 +00:00
SET_CAUSE ( tensor_copy , " 4.cpy " ) ;
}
2024-03-18 10:03:04 +00:00
int n_inputs = split - > n_inputs + + ;
2024-03-13 17:54:21 +00:00
GGML_ASSERT ( n_inputs < GGML_SCHED_MAX_SPLIT_INPUTS ) ;
2024-03-18 10:03:04 +00:00
split - > inputs [ n_inputs ] = src ;
2024-01-20 15:05:49 +00:00
}
2024-07-27 02:41:55 +00:00
node - > src [ j ] = tensor_id_copy ( src_id , cur_backend_id , sched - > cur_copy ) ;
2023-11-13 12:16:23 +00:00
}
}
}
2024-03-18 10:03:04 +00:00
split - > i_end = graph - > n_nodes ;
sched - > n_splits = i_split + 1 ;
2023-11-13 12:16:23 +00:00
}
2024-06-13 01:11:35 +00:00
if ( sched - > debug ) {
ggml_backend_sched_print_assignments ( sched , graph ) ;
}
2024-07-27 02:41:55 +00:00
// swap node_backend_ids and leaf _backend_ids with prevs
2024-06-13 01:11:35 +00:00
{
int * tmp = sched - > node_backend_ids ;
sched - > node_backend_ids = sched - > prev_node_backend_ids ;
sched - > prev_node_backend_ids = tmp ;
tmp = sched - > leaf_backend_ids ;
sched - > leaf_backend_ids = sched - > prev_leaf_backend_ids ;
sched - > prev_leaf_backend_ids = tmp ;
}
2023-11-13 12:16:23 +00:00
2024-10-02 23:49:47 +00:00
int graph_size = std : : max ( graph - > n_nodes , graph - > n_leafs ) + sched - > n_splits * GGML_SCHED_MAX_SPLIT_INPUTS * 2 * sched - > n_copies ;
2024-07-27 02:41:55 +00:00
if ( sched - > graph . size < graph_size ) {
sched - > graph . size = graph_size ;
2024-10-02 23:49:47 +00:00
sched - > graph . nodes = ( ggml_tensor * * ) realloc ( sched - > graph . nodes , graph_size * sizeof ( struct ggml_tensor * ) ) ;
sched - > graph . leafs = ( ggml_tensor * * ) realloc ( sched - > graph . leafs , graph_size * sizeof ( struct ggml_tensor * ) ) ;
2024-07-27 02:41:55 +00:00
GGML_ASSERT ( sched - > graph . nodes ! = NULL ) ;
GGML_ASSERT ( sched - > graph . leafs ! = NULL ) ;
}
sched - > graph . n_nodes = 0 ;
sched - > graph . n_leafs = 0 ;
struct ggml_cgraph * graph_copy = & sched - > graph ;
2023-11-13 12:16:23 +00:00
for ( int i = 0 ; i < sched - > n_splits ; i + + ) {
struct ggml_backend_sched_split * split = & sched - > splits [ i ] ;
2023-12-07 20:26:54 +00:00
split - > graph = ggml_graph_view ( graph , split - > i_start , split - > i_end ) ;
2023-11-13 12:16:23 +00:00
2024-03-13 17:54:21 +00:00
// add inputs to the graph copy so that they are allocated by ggml-alloc at the start of the split
2023-11-13 12:16:23 +00:00
for ( int j = 0 ; j < split - > n_inputs ; j + + ) {
2024-03-18 10:03:04 +00:00
assert ( graph_copy - > size > ( graph_copy - > n_nodes + 1 ) ) ;
2023-11-13 12:16:23 +00:00
struct ggml_tensor * input = split - > inputs [ j ] ;
2024-03-18 10:03:04 +00:00
const size_t input_id = hash_id ( input ) ;
2024-07-27 02:41:55 +00:00
struct ggml_tensor * input_cpy = tensor_id_copy ( input_id , split - > backend_id , sched - > cur_copy ) ;
2024-02-12 07:16:06 +00:00
2024-01-12 19:07:38 +00:00
// add a dependency to the input source so that it is not freed before the copy is done
2024-02-12 07:16:06 +00:00
struct ggml_tensor * input_dep = ggml_view_tensor ( sched - > ctx , input ) ;
2024-03-13 17:54:21 +00:00
input_dep - > src [ 0 ] = input ;
2024-07-27 02:41:55 +00:00
sched - > node_backend_ids [ graph_copy - > n_nodes ] = sched - > hv_tensor_backend_ids [ input_id ] ;
2024-02-12 07:16:06 +00:00
graph_copy - > nodes [ graph_copy - > n_nodes + + ] = input_dep ;
// add a dependency to the input copy so that it is allocated at the start of the split
sched - > node_backend_ids [ graph_copy - > n_nodes ] = split - > backend_id ;
2023-11-13 12:16:23 +00:00
graph_copy - > nodes [ graph_copy - > n_nodes + + ] = input_cpy ;
}
for ( int j = split - > i_start ; j < split - > i_end ; j + + ) {
2024-03-18 10:03:04 +00:00
assert ( graph_copy - > size > graph_copy - > n_nodes ) ;
2024-02-12 07:16:06 +00:00
sched - > node_backend_ids [ graph_copy - > n_nodes ] = tensor_backend_id ( graph - > nodes [ j ] ) ;
2023-11-13 12:16:23 +00:00
graph_copy - > nodes [ graph_copy - > n_nodes + + ] = graph - > nodes [ j ] ;
}
}
2024-03-13 17:54:21 +00:00
if ( sched - > n_copies > 1 ) {
// add input copies as leafs so that they are allocated first
for ( int i = 0 ; i < sched - > n_graph_inputs ; i + + ) {
struct ggml_tensor * input = sched - > graph_inputs [ i ] ;
size_t id = hash_id ( input ) ;
int backend_id = tensor_backend_id ( input ) ;
for ( int c = 0 ; c < sched - > n_copies ; c + + ) {
2024-07-27 02:41:55 +00:00
struct ggml_tensor * input_cpy = tensor_id_copy ( id , backend_id , c ) ;
2024-03-13 17:54:21 +00:00
sched - > leaf_backend_ids [ graph_copy - > n_leafs ] = backend_id ;
2024-09-05 09:13:11 +00:00
assert ( graph_copy - > size > graph_copy - > n_leafs ) ;
2024-03-13 17:54:21 +00:00
graph_copy - > leafs [ graph_copy - > n_leafs + + ] = input_cpy ;
}
}
for ( int i = 0 ; i < sched - > n_splits ; i + + ) {
struct ggml_backend_sched_split * split = & sched - > splits [ i ] ;
int backend_id = split - > backend_id ;
for ( int j = 0 ; j < split - > n_inputs ; j + + ) {
struct ggml_tensor * input = split - > inputs [ j ] ;
size_t id = hash_id ( input ) ;
for ( int c = 0 ; c < sched - > n_copies ; c + + ) {
2024-07-27 02:41:55 +00:00
struct ggml_tensor * input_cpy = tensor_id_copy ( id , backend_id , c ) ;
2024-03-13 17:54:21 +00:00
sched - > leaf_backend_ids [ graph_copy - > n_leafs ] = backend_id ;
2024-09-05 09:13:11 +00:00
assert ( graph_copy - > size > graph_copy - > n_leafs ) ;
2024-03-13 17:54:21 +00:00
graph_copy - > leafs [ graph_copy - > n_leafs + + ] = input_cpy ;
}
}
}
}
// add leafs from the original graph
for ( int i = 0 ; i < graph - > n_leafs ; i + + ) {
struct ggml_tensor * leaf = graph - > leafs [ i ] ;
sched - > leaf_backend_ids [ graph_copy - > n_leafs ] = tensor_backend_id ( leaf ) ;
2024-09-05 09:13:11 +00:00
assert ( graph_copy - > size > graph_copy - > n_leafs ) ;
2024-03-13 17:54:21 +00:00
graph_copy - > leafs [ graph_copy - > n_leafs + + ] = leaf ;
}
2023-11-13 12:16:23 +00:00
}
2024-02-12 07:16:06 +00:00
static bool ggml_backend_sched_alloc_splits ( ggml_backend_sched_t sched ) {
2024-06-13 01:11:35 +00:00
bool backend_ids_changed = false ;
2024-07-27 02:41:55 +00:00
for ( int i = 0 ; i < sched - > graph . n_nodes ; i + + ) {
2024-06-18 06:37:20 +00:00
if ( sched - > node_backend_ids [ i ] ! = sched - > prev_node_backend_ids [ i ] & &
sched - > bufts [ sched - > node_backend_ids [ i ] ] ! = sched - > bufts [ sched - > prev_node_backend_ids [ i ] ] ) {
2024-06-13 01:11:35 +00:00
backend_ids_changed = true ;
break ;
}
}
if ( ! backend_ids_changed ) {
2024-07-27 02:41:55 +00:00
for ( int i = 0 ; i < sched - > graph . n_leafs ; i + + ) {
2024-06-18 06:37:20 +00:00
if ( sched - > leaf_backend_ids [ i ] ! = sched - > prev_leaf_backend_ids [ i ] & &
sched - > bufts [ sched - > leaf_backend_ids [ i ] ] ! = sched - > bufts [ sched - > prev_leaf_backend_ids [ i ] ] ) {
2024-06-13 01:11:35 +00:00
backend_ids_changed = true ;
break ;
}
}
}
2024-03-13 17:54:21 +00:00
// allocate graph
2024-07-27 02:41:55 +00:00
if ( backend_ids_changed | | ! ggml_gallocr_alloc_graph ( sched - > galloc , & sched - > graph ) ) {
2024-03-13 17:54:21 +00:00
// the re-allocation may cause the split inputs to be moved to a different address
ggml_backend_sched_synchronize ( sched ) ;
2024-02-12 07:16:06 +00:00
# ifndef NDEBUG
2024-10-11 13:34:45 +00:00
GGML_LOG_DEBUG ( " %s: failed to allocate graph, reserving (backend_ids_changed = %d) \n " , __func__ , backend_ids_changed ) ;
2024-02-12 07:16:06 +00:00
# endif
2024-07-27 02:41:55 +00:00
ggml_gallocr_reserve_n ( sched - > galloc , & sched - > graph , sched - > node_backend_ids , sched - > leaf_backend_ids ) ;
if ( ! ggml_gallocr_alloc_graph ( sched - > galloc , & sched - > graph ) ) {
2024-10-11 13:34:45 +00:00
GGML_LOG_ERROR ( " %s: failed to allocate graph \n " , __func__ ) ;
2024-02-12 07:16:06 +00:00
return false ;
}
}
return true ;
2023-11-13 12:16:23 +00:00
}
2024-03-04 09:05:42 +00:00
static enum ggml_status ggml_backend_sched_compute_splits ( ggml_backend_sched_t sched ) {
2023-11-13 12:16:23 +00:00
struct ggml_backend_sched_split * splits = sched - > splits ;
for ( int i = 0 ; i < sched - > n_splits ; i + + ) {
struct ggml_backend_sched_split * split = & splits [ i ] ;
2024-02-12 07:16:06 +00:00
int split_backend_id = split - > backend_id ;
ggml_backend_t split_backend = sched - > backends [ split_backend_id ] ;
2023-11-13 12:16:23 +00:00
// copy the input tensors to the split backend
for ( int j = 0 ; j < split - > n_inputs ; j + + ) {
2024-03-13 17:54:21 +00:00
ggml_backend_t input_backend = ggml_backend_sched_get_tensor_backend ( sched , split - > inputs [ j ] ) ;
2023-12-07 20:26:54 +00:00
struct ggml_tensor * input = split - > inputs [ j ] ;
2024-07-27 02:41:55 +00:00
struct ggml_tensor * input_cpy = tensor_copy ( input , split_backend_id , sched - > cur_copy ) ;
2024-01-12 19:07:38 +00:00
2024-03-13 17:54:21 +00:00
if ( input - > flags & GGML_TENSOR_FLAG_INPUT ) {
// inputs from the user must be copied immediately to prevent the user overwriting the data before the copy is done
if ( sched - > events [ split_backend_id ] [ sched - > cur_copy ] ! = NULL ) {
ggml_backend_event_synchronize ( sched - > events [ split_backend_id ] [ sched - > cur_copy ] ) ;
} else {
ggml_backend_synchronize ( split_backend ) ;
}
ggml_backend_tensor_copy ( input , input_cpy ) ;
} else {
2024-03-18 10:03:04 +00:00
// wait for the split backend to finish using the input before overwriting it
2024-03-13 17:54:21 +00:00
if ( sched - > events [ split_backend_id ] [ sched - > cur_copy ] ! = NULL ) {
ggml_backend_event_wait ( split_backend , sched - > events [ split_backend_id ] [ sched - > cur_copy ] ) ;
} else {
ggml_backend_synchronize ( split_backend ) ;
}
2024-08-07 11:29:02 +00:00
// try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events
// TODO: add public function to facilitate this, since applications do not have direct access to the backend interface
if ( ! split_backend - > iface . cpy_tensor_async | | ! split_backend - > iface . cpy_tensor_async ( input_backend , split_backend , input , input_cpy ) ) {
ggml_backend_synchronize ( input_backend ) ;
if ( sched - > events [ split_backend_id ] [ sched - > cur_copy ] ! = NULL ) {
ggml_backend_event_synchronize ( sched - > events [ split_backend_id ] [ sched - > cur_copy ] ) ;
} else {
ggml_backend_synchronize ( split_backend ) ;
}
ggml_backend_tensor_copy ( input , input_cpy ) ;
}
2024-03-13 17:54:21 +00:00
}
2023-11-13 12:16:23 +00:00
}
2024-01-17 16:39:41 +00:00
if ( ! sched - > callback_eval ) {
2024-03-13 17:54:21 +00:00
enum ggml_status ec = ggml_backend_graph_compute_async ( split_backend , & split - > graph ) ;
2024-03-04 09:05:42 +00:00
if ( ec ! = GGML_STATUS_SUCCESS ) {
return ec ;
2024-02-12 07:16:06 +00:00
}
2024-01-17 16:39:41 +00:00
} else {
// similar to ggml_backend_compare_graph_backend
for ( int j0 = 0 ; j0 < split - > graph . n_nodes ; j0 + + ) {
struct ggml_tensor * t = split - > graph . nodes [ j0 ] ;
// check if the user needs data from this node
bool need = sched - > callback_eval ( t , true , sched - > callback_eval_user_data ) ;
int j1 = j0 ;
// determine the range [j0, j1] of nodes that can be computed together
while ( ! need & & j1 < split - > graph . n_nodes - 1 ) {
t = split - > graph . nodes [ + + j1 ] ;
need = sched - > callback_eval ( t , true , sched - > callback_eval_user_data ) ;
}
struct ggml_cgraph gv = ggml_graph_view ( & split - > graph , j0 , j1 + 1 ) ;
2024-03-13 17:54:21 +00:00
enum ggml_status ec = ggml_backend_graph_compute_async ( split_backend , & gv ) ;
2024-03-04 09:05:42 +00:00
if ( ec ! = GGML_STATUS_SUCCESS ) {
return ec ;
2024-02-12 07:16:06 +00:00
}
2024-01-17 16:39:41 +00:00
2024-03-13 17:54:21 +00:00
// TODO: pass backend to the callback, then the user can decide if they want to synchronize
ggml_backend_synchronize ( split_backend ) ;
2024-01-17 16:39:41 +00:00
if ( need & & ! sched - > callback_eval ( t , false , sched - > callback_eval_user_data ) ) {
break ;
}
j0 = j1 ;
}
}
2023-11-13 12:16:23 +00:00
2024-03-13 17:54:21 +00:00
// record the event of this copy
if ( split - > n_inputs > 0 ) {
if ( sched - > events [ split_backend_id ] [ sched - > cur_copy ] ! = NULL ) {
2024-10-02 23:49:47 +00:00
ggml_backend_event_record ( sched - > events [ split_backend_id ] [ sched - > cur_copy ] , split_backend ) ;
2024-03-13 17:54:21 +00:00
}
2023-11-13 12:16:23 +00:00
}
}
2024-03-13 17:54:21 +00:00
sched - > cur_copy = ( sched - > cur_copy + 1 ) % sched - > n_copies ;
2024-01-12 19:07:38 +00:00
2024-03-04 09:05:42 +00:00
return GGML_STATUS_SUCCESS ;
2023-11-13 12:16:23 +00:00
}
2024-03-13 17:54:21 +00:00
ggml_backend_sched_t ggml_backend_sched_new (
ggml_backend_t * backends ,
ggml_backend_buffer_type_t * bufts ,
int n_backends ,
size_t graph_size ,
bool parallel ) {
2024-01-12 19:07:38 +00:00
GGML_ASSERT ( n_backends > 0 ) ;
2024-03-13 17:54:21 +00:00
GGML_ASSERT ( n_backends < = GGML_SCHED_MAX_BACKENDS ) ;
GGML_ASSERT ( ggml_backend_is_cpu ( backends [ n_backends - 1 ] ) ) ; // last backend must be CPU
2023-11-13 12:16:23 +00:00
2024-10-02 23:49:47 +00:00
struct ggml_backend_sched * sched = ( ggml_backend_sched * ) calloc ( 1 , sizeof ( struct ggml_backend_sched ) ) ;
2024-01-12 19:07:38 +00:00
2024-06-13 01:11:35 +00:00
sched - > debug = getenv ( " GGML_SCHED_DEBUG " ) ! = NULL ;
2024-07-27 02:41:55 +00:00
sched - > n_backends = n_backends ;
sched - > n_copies = parallel ? GGML_SCHED_MAX_COPIES : 1 ;
2024-06-13 01:11:35 +00:00
2024-01-12 19:07:38 +00:00
// initialize hash table
2024-07-27 02:41:55 +00:00
// FIXME: needs to be size*2 to account for leafs (do it in graph_split instead)
sched - > hash_set = ggml_hash_set_new ( graph_size ) ;
2024-10-02 23:49:47 +00:00
sched - > hv_tensor_backend_ids = ( int * ) malloc ( sched - > hash_set . size * sizeof ( sched - > hv_tensor_backend_ids [ 0 ] ) ) ;
sched - > hv_tensor_copies = ( ggml_tensor * * ) malloc ( sched - > hash_set . size * sched - > n_backends * sched - > n_copies * sizeof ( struct ggml_tensor * ) ) ;
2024-03-18 10:03:04 +00:00
2024-08-16 02:22:55 +00:00
const size_t ggml_sched_max_splits = graph_size ; // at most there is one split for each node in the graph
const size_t nodes_size = graph_size + ggml_sched_max_splits * GGML_SCHED_MAX_SPLIT_INPUTS * 2 ;
2024-10-02 23:49:47 +00:00
sched - > node_backend_ids = ( int * ) calloc ( nodes_size , sizeof ( sched - > node_backend_ids [ 0 ] ) ) ;
sched - > leaf_backend_ids = ( int * ) calloc ( nodes_size , sizeof ( sched - > leaf_backend_ids [ 0 ] ) ) ;
sched - > prev_node_backend_ids = ( int * ) calloc ( nodes_size , sizeof ( sched - > prev_node_backend_ids [ 0 ] ) ) ;
sched - > prev_leaf_backend_ids = ( int * ) calloc ( nodes_size , sizeof ( sched - > prev_leaf_backend_ids [ 0 ] ) ) ;
2023-11-13 12:16:23 +00:00
2024-08-16 02:22:55 +00:00
sched - > context_buffer_size = ggml_sched_max_splits * GGML_SCHED_MAX_SPLIT_INPUTS * 2 * sizeof ( struct ggml_tensor ) + ggml_graph_overhead_custom ( graph_size , false ) ;
2024-10-02 23:49:47 +00:00
sched - > context_buffer = ( char * ) malloc ( sched - > context_buffer_size ) ;
2024-03-13 17:54:21 +00:00
2024-03-18 10:03:04 +00:00
const int initial_splits_capacity = 16 ;
2024-10-02 23:49:47 +00:00
sched - > splits = ( ggml_backend_sched_split * ) calloc ( initial_splits_capacity , sizeof ( sched - > splits [ 0 ] ) ) ;
2024-03-18 10:03:04 +00:00
sched - > splits_capacity = initial_splits_capacity ;
2024-03-13 17:54:21 +00:00
for ( int b = 0 ; b < n_backends ; b + + ) {
sched - > backends [ b ] = backends [ b ] ;
sched - > bufts [ b ] = bufts ? bufts [ b ] : ggml_backend_get_default_buffer_type ( backends [ b ] ) ;
2024-06-13 01:11:35 +00:00
GGML_ASSERT ( ggml_backend_supports_buft ( backends [ b ] , sched - > bufts [ b ] ) ) ;
2024-10-18 05:46:16 +00:00
2024-03-13 17:54:21 +00:00
if ( sched - > n_copies > 1 ) {
for ( int c = 0 ; c < sched - > n_copies ; c + + ) {
2024-10-02 23:49:47 +00:00
sched - > events [ b ] [ c ] = ggml_backend_event_new ( backends [ b ] - > device ) ;
2024-03-13 17:54:21 +00:00
}
}
2023-11-13 12:16:23 +00:00
}
2024-02-12 07:16:06 +00:00
sched - > galloc = ggml_gallocr_new_n ( sched - > bufts , n_backends ) ;
2023-11-13 12:16:23 +00:00
2024-02-12 07:16:06 +00:00
ggml_backend_sched_reset ( sched ) ;
2024-01-12 19:07:38 +00:00
2023-11-13 12:16:23 +00:00
return sched ;
}
void ggml_backend_sched_free ( ggml_backend_sched_t sched ) {
if ( sched = = NULL ) {
return ;
}
2024-03-13 17:54:21 +00:00
for ( int b = 0 ; b < sched - > n_backends ; b + + ) {
for ( int c = 0 ; c < sched - > n_copies ; c + + ) {
ggml_backend_event_free ( sched - > events [ b ] [ c ] ) ;
}
}
2023-11-13 12:16:23 +00:00
ggml_gallocr_free ( sched - > galloc ) ;
2024-01-12 19:07:38 +00:00
ggml_free ( sched - > ctx ) ;
2024-07-27 02:41:55 +00:00
ggml_hash_set_free ( & sched - > hash_set ) ;
2024-03-18 10:03:04 +00:00
free ( sched - > splits ) ;
2024-07-27 02:41:55 +00:00
free ( sched - > hv_tensor_backend_ids ) ;
free ( sched - > hv_tensor_copies ) ;
2024-02-12 07:16:06 +00:00
free ( sched - > node_backend_ids ) ;
2024-03-13 17:54:21 +00:00
free ( sched - > leaf_backend_ids ) ;
2024-06-13 01:11:35 +00:00
free ( sched - > prev_node_backend_ids ) ;
free ( sched - > prev_leaf_backend_ids ) ;
2024-07-27 02:41:55 +00:00
free ( sched - > context_buffer ) ;
free ( sched - > graph . nodes ) ;
free ( sched - > graph . leafs ) ;
2023-11-13 12:16:23 +00:00
free ( sched ) ;
}
2024-02-12 07:16:06 +00:00
void ggml_backend_sched_reset ( ggml_backend_sched_t sched ) {
// reset state for the next run
2024-04-26 18:08:30 +00:00
if ( ! sched - > is_reset ) {
2024-07-27 02:41:55 +00:00
ggml_hash_set_reset ( & sched - > hash_set ) ;
memset ( sched - > hv_tensor_backend_ids , - 1 , sched - > hash_set . size * sizeof ( sched - > hv_tensor_backend_ids [ 0 ] ) ) ;
memset ( sched - > hv_tensor_copies , 0 , sched - > hash_set . size * sched - > n_backends * sched - > n_copies * sizeof ( struct ggml_tensor * ) ) ;
2024-04-26 18:08:30 +00:00
sched - > is_reset = true ;
}
2024-03-13 17:54:21 +00:00
sched - > is_alloc = false ;
2024-02-12 07:16:06 +00:00
}
2023-11-13 12:16:23 +00:00
2024-02-12 07:16:06 +00:00
bool ggml_backend_sched_reserve ( ggml_backend_sched_t sched , struct ggml_cgraph * measure_graph ) {
2024-07-27 02:41:55 +00:00
GGML_ASSERT ( ( int ) sched - > hash_set . size > = measure_graph - > n_nodes + measure_graph - > n_leafs ) ;
2024-03-18 10:03:04 +00:00
2024-02-12 07:16:06 +00:00
ggml_backend_sched_split_graph ( sched , measure_graph ) ;
2024-07-27 02:41:55 +00:00
if ( ! ggml_gallocr_reserve_n ( sched - > galloc , & sched - > graph , sched - > node_backend_ids , sched - > leaf_backend_ids ) ) {
2024-02-12 07:16:06 +00:00
return false ;
2023-11-13 12:16:23 +00:00
}
2024-02-12 07:16:06 +00:00
ggml_backend_sched_reset ( sched ) ;
2024-03-13 17:54:21 +00:00
ggml_backend_sched_synchronize ( sched ) ;
return true ;
}
bool ggml_backend_sched_alloc_graph ( ggml_backend_sched_t sched , struct ggml_cgraph * graph ) {
2024-07-27 02:41:55 +00:00
GGML_ASSERT ( ( int ) sched - > hash_set . size > = graph - > n_nodes + graph - > n_leafs ) ;
2024-03-13 17:54:21 +00:00
ggml_backend_sched_split_graph ( sched , graph ) ;
2024-07-27 02:41:55 +00:00
2024-03-13 17:54:21 +00:00
if ( ! ggml_backend_sched_alloc_splits ( sched ) ) {
return false ;
}
sched - > is_alloc = true ;
2024-02-12 07:16:06 +00:00
return true ;
2023-11-13 12:16:23 +00:00
}
2024-03-04 09:05:42 +00:00
enum ggml_status ggml_backend_sched_graph_compute ( ggml_backend_sched_t sched , struct ggml_cgraph * graph ) {
2024-03-13 17:54:21 +00:00
enum ggml_status err = ggml_backend_sched_graph_compute_async ( sched , graph ) ;
ggml_backend_sched_synchronize ( sched ) ;
return err ;
}
2024-01-12 19:07:38 +00:00
2024-03-13 17:54:21 +00:00
enum ggml_status ggml_backend_sched_graph_compute_async ( ggml_backend_sched_t sched , struct ggml_cgraph * graph ) {
if ( ! sched - > is_reset & & ! sched - > is_alloc ) {
2024-02-12 07:16:06 +00:00
ggml_backend_sched_reset ( sched ) ;
2024-01-12 19:07:38 +00:00
}
2023-11-13 12:16:23 +00:00
2024-03-13 17:54:21 +00:00
if ( ! sched - > is_alloc ) {
if ( ! ggml_backend_sched_alloc_graph ( sched , graph ) ) {
return GGML_STATUS_ALLOC_FAILED ;
}
2024-02-12 07:16:06 +00:00
}
2024-01-12 19:07:38 +00:00
2024-03-04 09:05:42 +00:00
return ggml_backend_sched_compute_splits ( sched ) ;
2024-02-12 07:16:06 +00:00
}
2024-01-17 16:39:41 +00:00
2024-03-13 17:54:21 +00:00
void ggml_backend_sched_synchronize ( ggml_backend_sched_t sched ) {
for ( int i = 0 ; i < sched - > n_backends ; i + + ) {
ggml_backend_synchronize ( sched - > backends [ i ] ) ;
}
}
2024-01-17 16:39:41 +00:00
void ggml_backend_sched_set_eval_callback ( ggml_backend_sched_t sched , ggml_backend_sched_eval_callback callback , void * user_data ) {
sched - > callback_eval = callback ;
sched - > callback_eval_user_data = user_data ;
}
2024-01-12 19:07:38 +00:00
int ggml_backend_sched_get_n_splits ( ggml_backend_sched_t sched ) {
return sched - > n_splits ;
}
2024-03-13 17:54:21 +00:00
int ggml_backend_sched_get_n_copies ( ggml_backend_sched_t sched ) {
return sched - > n_copies ;
}
2024-06-18 06:37:20 +00:00
int ggml_backend_sched_get_n_backends ( ggml_backend_sched_t sched ) {
return sched - > n_backends ;
}
ggml_backend_t ggml_backend_sched_get_backend ( ggml_backend_sched_t sched , int i ) {
GGML_ASSERT ( i > = 0 & & i < sched - > n_backends ) ;
return sched - > backends [ i ] ;
}
2024-02-12 07:16:06 +00:00
size_t ggml_backend_sched_get_buffer_size ( ggml_backend_sched_t sched , ggml_backend_t backend ) {
int backend_index = ggml_backend_sched_backend_id ( sched , backend ) ;
2024-01-12 19:07:38 +00:00
GGML_ASSERT ( backend_index > = 0 & & backend_index < sched - > n_backends ) ;
2024-03-13 17:54:21 +00:00
2024-02-12 07:16:06 +00:00
return ggml_gallocr_get_buffer_size ( sched - > galloc , backend_index ) ;
2023-11-13 12:16:23 +00:00
}
2024-03-13 17:54:21 +00:00
void ggml_backend_sched_set_tensor_backend ( ggml_backend_sched_t sched , struct ggml_tensor * node , ggml_backend_t backend ) {
2024-02-12 07:16:06 +00:00
int backend_index = ggml_backend_sched_backend_id ( sched , backend ) ;
2023-11-13 12:16:23 +00:00
GGML_ASSERT ( backend_index > = 0 & & backend_index < sched - > n_backends ) ;
2024-02-12 07:16:06 +00:00
tensor_backend_id ( node ) = backend_index ;
2024-06-13 01:11:35 +00:00
SET_CAUSE ( node , " usr " ) ;
2024-07-27 02:41:55 +00:00
sched - > is_reset = false ;
2023-11-13 12:16:23 +00:00
}
2023-12-07 20:26:54 +00:00
2024-03-13 17:54:21 +00:00
ggml_backend_t ggml_backend_sched_get_tensor_backend ( ggml_backend_sched_t sched , struct ggml_tensor * node ) {
2024-02-12 07:16:06 +00:00
int backend_index = tensor_backend_id ( node ) ;
if ( backend_index = = - 1 ) {
2024-01-12 19:07:38 +00:00
return NULL ;
}
2024-02-12 07:16:06 +00:00
return sched - > backends [ backend_index ] ;
2024-01-12 19:07:38 +00:00
}
2023-12-07 20:26:54 +00:00
// utils
2024-01-12 19:07:38 +00:00
2024-06-03 17:03:26 +00:00
void ggml_backend_view_init ( struct ggml_tensor * tensor ) {
2023-12-07 20:26:54 +00:00
GGML_ASSERT ( tensor - > buffer = = NULL ) ;
GGML_ASSERT ( tensor - > view_src ! = NULL ) ;
GGML_ASSERT ( tensor - > view_src - > buffer ! = NULL ) ;
GGML_ASSERT ( tensor - > view_src - > data ! = NULL ) ;
2024-06-03 17:03:26 +00:00
tensor - > buffer = tensor - > view_src - > buffer ;
2023-12-07 20:26:54 +00:00
tensor - > data = ( char * ) tensor - > view_src - > data + tensor - > view_offs ;
2024-06-03 17:03:26 +00:00
ggml_backend_buffer_init_tensor ( tensor - > buffer , tensor ) ;
2023-12-07 20:26:54 +00:00
}
void ggml_backend_tensor_alloc ( ggml_backend_buffer_t buffer , struct ggml_tensor * tensor , void * addr ) {
GGML_ASSERT ( tensor - > buffer = = NULL ) ;
GGML_ASSERT ( tensor - > data = = NULL ) ;
GGML_ASSERT ( tensor - > view_src = = NULL ) ;
GGML_ASSERT ( addr > = ggml_backend_buffer_get_base ( buffer ) ) ;
GGML_ASSERT ( ( char * ) addr + ggml_backend_buffer_get_alloc_size ( buffer , tensor ) < =
( char * ) ggml_backend_buffer_get_base ( buffer ) + ggml_backend_buffer_get_size ( buffer ) ) ;
tensor - > buffer = buffer ;
tensor - > data = addr ;
ggml_backend_buffer_init_tensor ( buffer , tensor ) ;
}
2024-02-12 07:16:06 +00:00
static struct ggml_tensor * graph_copy_dup_tensor ( struct ggml_hash_set hash_set , struct ggml_tensor * * node_copies ,
2023-12-07 20:26:54 +00:00
struct ggml_context * ctx_allocated , struct ggml_context * ctx_unallocated , struct ggml_tensor * src ) {
GGML_ASSERT ( src ! = NULL ) ;
GGML_ASSERT ( src - > data & & " graph must be allocated " ) ;
2024-07-27 02:41:55 +00:00
size_t id = ggml_hash_insert ( & hash_set , src ) ;
if ( id = = GGML_HASHSET_ALREADY_EXISTS ) {
return node_copies [ ggml_hash_find ( & hash_set , src ) ] ;
2023-12-07 20:26:54 +00:00
}
struct ggml_tensor * dst = ggml_dup_tensor_layout ( src - > data & & ! src - > view_src ? ctx_allocated : ctx_unallocated , src ) ;
if ( src - > view_src ! = NULL ) {
2024-02-12 07:16:06 +00:00
dst - > view_src = graph_copy_dup_tensor ( hash_set , node_copies , ctx_allocated , ctx_unallocated , src - > view_src ) ;
2023-12-07 20:26:54 +00:00
dst - > view_offs = src - > view_offs ;
}
dst - > op = src - > op ;
memcpy ( dst - > op_params , src - > op_params , sizeof ( dst - > op_params ) ) ;
ggml_set_name ( dst , src - > name ) ;
// copy src
for ( int i = 0 ; i < GGML_MAX_SRC ; i + + ) {
struct ggml_tensor * s = src - > src [ i ] ;
if ( s = = NULL ) {
2024-02-17 21:04:16 +00:00
continue ;
2023-12-07 20:26:54 +00:00
}
2024-02-12 07:16:06 +00:00
dst - > src [ i ] = graph_copy_dup_tensor ( hash_set , node_copies , ctx_allocated , ctx_unallocated , s ) ;
2023-12-07 20:26:54 +00:00
}
node_copies [ id ] = dst ;
return dst ;
}
2024-07-27 02:41:55 +00:00
static void graph_copy_init_tensor ( struct ggml_hash_set * hash_set , struct ggml_tensor * * node_copies , bool * node_init , struct ggml_tensor * src ) {
2023-12-07 20:26:54 +00:00
size_t id = ggml_hash_find ( hash_set , src ) ;
if ( node_init [ id ] ) {
return ;
}
node_init [ id ] = true ;
struct ggml_tensor * dst = node_copies [ id ] ;
if ( dst - > view_src ! = NULL ) {
2024-02-12 07:16:06 +00:00
graph_copy_init_tensor ( hash_set , node_copies , node_init , src - > view_src ) ;
2024-06-03 17:03:26 +00:00
ggml_backend_view_init ( dst ) ;
2023-12-07 20:26:54 +00:00
}
else {
ggml_backend_tensor_copy ( src , dst ) ;
}
// init src
for ( int i = 0 ; i < GGML_MAX_SRC ; i + + ) {
struct ggml_tensor * s = src - > src [ i ] ;
if ( s = = NULL ) {
2024-02-17 21:04:16 +00:00
continue ;
2023-12-07 20:26:54 +00:00
}
2024-02-12 07:16:06 +00:00
graph_copy_init_tensor ( hash_set , node_copies , node_init , s ) ;
2023-12-07 20:26:54 +00:00
}
}
struct ggml_backend_graph_copy ggml_backend_graph_copy ( ggml_backend_t backend , struct ggml_cgraph * graph ) {
2024-07-27 02:41:55 +00:00
struct ggml_hash_set hash_set = ggml_hash_set_new ( graph - > visited_hash_set . size ) ;
2024-10-02 23:49:47 +00:00
struct ggml_tensor * * node_copies = ( ggml_tensor * * ) calloc ( hash_set . size , sizeof ( node_copies [ 0 ] ) ) ; // NOLINT
bool * node_init = ( bool * ) calloc ( hash_set . size , sizeof ( node_init [ 0 ] ) ) ;
2023-12-07 20:26:54 +00:00
struct ggml_init_params params = {
/* .mem_size = */ ggml_tensor_overhead ( ) * hash_set . size + ggml_graph_overhead_custom ( graph - > size , false ) ,
/* .mem_buffer = */ NULL ,
/* .no_alloc = */ true
} ;
struct ggml_context * ctx_allocated = ggml_init ( params ) ;
struct ggml_context * ctx_unallocated = ggml_init ( params ) ;
2024-01-12 19:07:38 +00:00
if ( ctx_allocated = = NULL | | ctx_unallocated = = NULL ) {
2024-10-11 13:34:45 +00:00
GGML_LOG_ERROR ( " %s: failed to allocate context for graph copy \n " , __func__ ) ;
2024-07-27 02:41:55 +00:00
ggml_hash_set_free ( & hash_set ) ;
2024-01-12 19:07:38 +00:00
free ( node_copies ) ;
free ( node_init ) ;
ggml_free ( ctx_allocated ) ;
ggml_free ( ctx_unallocated ) ;
2024-10-02 23:49:47 +00:00
return {
2024-01-12 19:07:38 +00:00
/* .buffer = */ NULL ,
/* .ctx_allocated = */ NULL ,
/* .ctx_unallocated = */ NULL ,
/* .graph = */ NULL ,
} ;
}
2023-12-07 20:26:54 +00:00
// dup nodes
for ( int i = 0 ; i < graph - > n_nodes ; i + + ) {
struct ggml_tensor * node = graph - > nodes [ i ] ;
2024-02-12 07:16:06 +00:00
graph_copy_dup_tensor ( hash_set , node_copies , ctx_allocated , ctx_unallocated , node ) ;
2023-12-07 20:26:54 +00:00
}
// allocate nodes
ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors ( ctx_allocated , backend ) ;
2024-01-12 19:07:38 +00:00
if ( buffer = = NULL ) {
2024-10-11 13:34:45 +00:00
GGML_LOG_ERROR ( " %s: failed to allocate buffer for graph copy \n " , __func__ ) ;
2024-07-27 02:41:55 +00:00
ggml_hash_set_free ( & hash_set ) ;
2024-01-12 19:07:38 +00:00
free ( node_copies ) ;
free ( node_init ) ;
ggml_free ( ctx_allocated ) ;
ggml_free ( ctx_unallocated ) ;
2024-10-02 23:49:47 +00:00
return {
2024-01-12 19:07:38 +00:00
/* .buffer = */ NULL ,
/* .ctx_allocated = */ NULL ,
/* .ctx_unallocated = */ NULL ,
/* .graph = */ NULL ,
} ;
}
2023-12-07 20:26:54 +00:00
//printf("copy buffer size: %zu MB\n", ggml_backend_buffer_get_size(buffer) / 1024 / 1024);
// copy data and init views
for ( int i = 0 ; i < graph - > n_nodes ; i + + ) {
struct ggml_tensor * node = graph - > nodes [ i ] ;
2024-07-27 02:41:55 +00:00
graph_copy_init_tensor ( & hash_set , node_copies , node_init , node ) ;
2023-12-07 20:26:54 +00:00
}
// build graph copy
struct ggml_cgraph * graph_copy = ggml_new_graph_custom ( ctx_allocated , graph - > size , false ) ;
for ( int i = 0 ; i < graph - > n_nodes ; i + + ) {
struct ggml_tensor * node = graph - > nodes [ i ] ;
2024-07-27 02:41:55 +00:00
struct ggml_tensor * node_copy = node_copies [ ggml_hash_find ( & hash_set , node ) ] ;
2023-12-07 20:26:54 +00:00
graph_copy - > nodes [ i ] = node_copy ;
}
graph_copy - > n_nodes = graph - > n_nodes ;
2024-07-27 02:41:55 +00:00
ggml_hash_set_free ( & hash_set ) ;
2023-12-07 20:26:54 +00:00
free ( node_copies ) ;
free ( node_init ) ;
2024-10-02 23:49:47 +00:00
return {
2023-12-07 20:26:54 +00:00
/* .buffer = */ buffer ,
/* .ctx_allocated = */ ctx_allocated ,
/* .ctx_unallocated = */ ctx_unallocated ,
/* .graph = */ graph_copy ,
} ;
}
void ggml_backend_graph_copy_free ( struct ggml_backend_graph_copy copy ) {
ggml_backend_buffer_free ( copy . buffer ) ;
ggml_free ( copy . ctx_allocated ) ;
ggml_free ( copy . ctx_unallocated ) ;
}
2024-01-12 19:07:38 +00:00
bool ggml_backend_compare_graph_backend ( ggml_backend_t backend1 , ggml_backend_t backend2 , struct ggml_cgraph * graph , ggml_backend_eval_callback callback , void * user_data ) {
2023-12-07 20:26:54 +00:00
struct ggml_backend_graph_copy copy = ggml_backend_graph_copy ( backend2 , graph ) ;
2024-01-12 19:07:38 +00:00
if ( copy . buffer = = NULL ) {
return false ;
}
2023-12-07 20:26:54 +00:00
struct ggml_cgraph * g1 = graph ;
struct ggml_cgraph * g2 = copy . graph ;
assert ( g1 - > n_nodes = = g2 - > n_nodes ) ;
for ( int i = 0 ; i < g1 - > n_nodes ; i + + ) {
//printf("eval %d/%d\n", i, g1->n_nodes);
struct ggml_tensor * t1 = g1 - > nodes [ i ] ;
struct ggml_tensor * t2 = g2 - > nodes [ i ] ;
assert ( t1 - > op = = t2 - > op & & ggml_are_same_layout ( t1 , t2 ) ) ;
struct ggml_cgraph g1v = ggml_graph_view ( g1 , i , i + 1 ) ;
struct ggml_cgraph g2v = ggml_graph_view ( g2 , i , i + 1 ) ;
ggml_backend_graph_compute ( backend1 , & g1v ) ;
ggml_backend_graph_compute ( backend2 , & g2v ) ;
if ( ggml_is_view_op ( t1 - > op ) ) {
continue ;
}
// compare results, calculate rms etc
if ( ! callback ( i , t1 , t2 , user_data ) ) {
break ;
}
}
ggml_backend_graph_copy_free ( copy ) ;
2024-01-12 19:07:38 +00:00
return true ;
2023-12-07 20:26:54 +00:00
}