diff --git a/ggml/src/ggml-cann/kernels/CMakeLists.txt b/ggml/src/ggml-cann/kernels/CMakeLists.txt index 5b4fef91b..6b3c1a4f4 100644 --- a/ggml/src/ggml-cann/kernels/CMakeLists.txt +++ b/ggml/src/ggml-cann/kernels/CMakeLists.txt @@ -3,8 +3,7 @@ if (NOT SOC_TYPE) endif() file(GLOB SRC_FILES - get_row_f32.cpp - get_row_f16.cpp + get_row_float.cpp get_row_q4_0.cpp get_row_q8_0.cpp quantize_f32_q8_0.cpp diff --git a/ggml/src/ggml-cann/kernels/get_row_f32.cpp b/ggml/src/ggml-cann/kernels/get_row_f32.cpp deleted file mode 100644 index 9db080af3..000000000 --- a/ggml/src/ggml-cann/kernels/get_row_f32.cpp +++ /dev/null @@ -1,180 +0,0 @@ -#include "kernel_operator.h" - -// optimize me. Use template to avoid copy code. -using namespace AscendC; - -#define BUFFER_NUM 2 - -class GET_ROW_F32 { - public: - __aicore__ inline GET_ROW_F32() {} - __aicore__ inline void init(GM_ADDR input, GM_ADDR indices, GM_ADDR output, - int64_t *input_ne_ub, size_t *input_nb_ub, - int64_t *indices_ne_ub, size_t *indices_nb_ub, - int64_t *output_ne_ub, size_t *output_nb_ub) { - int64_t op_block_num = GetBlockNum(); - int64_t op_block_idx = GetBlockIdx(); - - for (int i = 0; i < 4; i++) { - input_ne[i] = input_ne_ub[i]; - input_stride[i] = input_nb_ub[i] / input_nb_ub[0]; - - indices_ne[i] = indices_ne_ub[i]; - indices_stride[i] = indices_nb_ub[i] / indices_nb_ub[0]; - - output_ne[i] = output_ne_ub[i]; - output_stride[i] = output_nb_ub[i] / output_nb_ub[0]; - } - - // Indices has two dims. n_elements = all rows should get. - // dr, all rows should this thread get. - uint64_t n_elements = - indices_ne[0] * indices_ne[1] * indices_ne[2] * indices_ne[3]; - dr = n_elements / op_block_num; - - uint64_t tails = n_elements % op_block_num; - if (op_block_idx < tails) { - dr += 1; - ir = dr * op_block_idx; - } else { - ir = dr * op_block_idx + tails; - } - - input_gm.SetGlobalBuffer((__gm__ float *)input); - indices_gm.SetGlobalBuffer((__gm__ int32_t *)indices); - output_gm.SetGlobalBuffer((__gm__ float *)output); - - uint64_t local_buffer_size = ((input_ne[0] * sizeof(float) + 31) & ~31); - local_buffer_elems = local_buffer_size / sizeof(float); - - // TODO, consider long row that can't put in UB. - // All data should asign to 32. It's ok because all data is align to 32. - pipe.InitBuffer(input_queue, BUFFER_NUM, local_buffer_size); - pipe.InitBuffer(output_queue, BUFFER_NUM, local_buffer_size); - } - - __aicore__ inline void copy_in(uint32_t offset, size_t len) { - LocalTensor input_local = input_queue.AllocTensor(); - size_t tail = len % 32; - len = len & ~31; - DataCopy(input_local, input_gm[offset], len); - if(tail != 0) { - DataCopyExtParams dataCopyParams; - dataCopyParams.blockCount = 1; - dataCopyParams.blockLen = tail * sizeof(float); - DataCopyPadExtParams padParams; - DataCopyPad(input_local[len], input_gm[offset + len], - dataCopyParams, padParams); - } - input_queue.EnQue(input_local); - } - - __aicore__ inline void copy_out(uint32_t offset, size_t len) { - LocalTensor output_local = output_queue.DeQue(); - size_t tail = len % 32; - len = len & ~31; - DataCopy(output_gm[offset], output_local, len); - if(tail != 0) { - DataCopyExtParams dataCopyParams; - dataCopyParams.blockCount = 1; - dataCopyParams.blockLen = tail * sizeof(float); - DataCopyPad(output_gm[offset + len], output_local[len], - dataCopyParams); - } - output_queue.FreeTensor(output_local); - } - - __aicore__ inline void calculate_row(int64_t idx) { - const int64_t indices_ne2_idx = idx / (indices_ne[0] * indices_ne[1]); - const int64_t indices_ne1_idx = - (idx - indices_ne2_idx * indices_ne[0] * indices_ne[1]) / - indices_ne[0]; - const int64_t indices_ne0_idx = - (idx - indices_ne2_idx * indices_ne[0] * indices_ne[1] - - indices_ne1_idx * indices_ne[0]); - - const int64_t indices_offset = indices_ne0_idx * indices_stride[0] + - indices_ne1_idx * indices_stride[1] + - indices_ne2_idx * indices_stride[2]; - const int32_t selected_row_idx = indices_gm.GetValue(indices_offset); - - const int64_t input_offset = selected_row_idx * input_stride[1] + - indices_ne1_idx * input_stride[2] + - indices_ne2_idx * input_stride[3]; - - const int64_t output_offset = indices_ne0_idx * output_stride[1] + - indices_ne1_idx * output_stride[2] + - indices_ne2_idx * output_stride[3]; - - copy_in(input_offset, input_ne[0]); - LocalTensor input_local = input_queue.DeQue(); - LocalTensor output_local = output_queue.AllocTensor(); - - DataCopy(output_local, input_local, local_buffer_elems); - output_queue.EnQue(output_local); - copy_out(output_offset, input_ne[0]); - - input_queue.FreeTensor(input_local); - } - - __aicore__ inline void calculate() { - for (int64_t i = ir; i < ir + dr; i++) { - calculate_row(i); - } - } - - private: - int64_t input_ne[4]; - size_t input_stride[4]; - - int64_t indices_ne[4]; - size_t indices_stride[4]; - - int64_t output_ne[4]; - size_t output_stride[4]; - - size_t local_buffer_elems; - - int64_t ir; - int64_t dr; - - TPipe pipe; - GlobalTensor input_gm; - GlobalTensor indices_gm; - GlobalTensor output_gm; - TQue input_queue; - TQue output_queue; -}; - -template -__aicore__ inline void copy_to_ub(GM_ADDR gm, T *ub, size_t size) { - auto gm_ptr = (__gm__ uint8_t *)gm; - auto ub_ptr = (uint8_t *)(ub); - for (int32_t i = 0; i < size; ++i, ++ub_ptr, ++gm_ptr) { - *ub_ptr = *gm_ptr; - } -} - -extern "C" __global__ __aicore__ void ascendc_get_row_f32( - GM_ADDR input_gm, GM_ADDR indices_gm, GM_ADDR output_gm, - GM_ADDR input_ne_gm, GM_ADDR input_nb_gm, GM_ADDR indices_ne_gm, - GM_ADDR indices_nb_gm, GM_ADDR output_ne_gm, GM_ADDR output_nb_gm) { - int64_t input_ne_ub[4]; - size_t input_nb_ub[4]; - int64_t indices_ne_ub[4]; - size_t indices_nb_ub[4]; - int64_t output_ne_ub[4]; - size_t output_nb_ub[4]; - - copy_to_ub(input_ne_gm, input_ne_ub, 32); - copy_to_ub(input_nb_gm, input_nb_ub, 32); - copy_to_ub(indices_ne_gm, indices_ne_ub, 32); - copy_to_ub(indices_nb_gm, indices_nb_ub, 32); - copy_to_ub(output_ne_gm, output_ne_ub, 32); - copy_to_ub(output_nb_gm, output_nb_ub, 32); - - GET_ROW_F32 op; - op.init(input_gm, indices_gm, output_gm, input_ne_ub, input_nb_ub, - indices_ne_ub, indices_nb_ub, output_ne_ub, output_nb_ub); - op.calculate(); -} diff --git a/ggml/src/ggml-cann/kernels/get_row_f16.cpp b/ggml/src/ggml-cann/kernels/get_row_float.cpp similarity index 73% rename from ggml/src/ggml-cann/kernels/get_row_f16.cpp rename to ggml/src/ggml-cann/kernels/get_row_float.cpp index c704b5b2e..16dc5cb87 100644 --- a/ggml/src/ggml-cann/kernels/get_row_f16.cpp +++ b/ggml/src/ggml-cann/kernels/get_row_float.cpp @@ -5,14 +5,14 @@ using namespace AscendC; #define BUFFER_NUM 2 -class GET_ROW_F16 { - public: - __aicore__ inline GET_ROW_F16() {} +template +class GET_ROW_FLOAT { +public: + __aicore__ inline GET_ROW_FLOAT() {} __aicore__ inline void init(GM_ADDR input, GM_ADDR indices, GM_ADDR output, int64_t *input_ne_ub, size_t *input_nb_ub, int64_t *indices_ne_ub, size_t *indices_nb_ub, int64_t *output_ne_ub, size_t *output_nb_ub) { - // TODO, use template for F16/f32 int64_t op_block_num = GetBlockNum(); int64_t op_block_idx = GetBlockIdx(); @@ -41,16 +41,18 @@ class GET_ROW_F16 { ir = dr * op_block_idx + tails; } - input_gm.SetGlobalBuffer((__gm__ half *)input); + input_gm.SetGlobalBuffer((__gm__ ROW_TYPE *)input); indices_gm.SetGlobalBuffer((__gm__ int32_t *)indices); output_gm.SetGlobalBuffer((__gm__ float *)output); - uint64_t input_local_buffer_size = ((input_ne[0] * sizeof(half) + 31) - & ~31); - uint64_t output_local_buffer_size = ((input_ne[0] * sizeof(float) + 31) - & ~31); - - local_buffer_elems = input_local_buffer_size / sizeof(half); + uint64_t input_local_buffer_size = ((input_ne[0] * sizeof(ROW_TYPE) + 31) & ~31); + uint64_t output_local_buffer_size = 0; + if constexpr (std::is_same::value) { + output_local_buffer_size = ((input_ne[0] * sizeof(float) + 31) & ~31); + } else if constexpr (std::is_same::value) { + output_local_buffer_size = input_local_buffer_size; + } + local_buffer_elems = input_local_buffer_size / sizeof(ROW_TYPE); // TODO, consider long row that can't put in UB. // All data should asign to 32. It's ok because all data is align to 32. @@ -59,15 +61,15 @@ class GET_ROW_F16 { } __aicore__ inline void copy_in(uint32_t offset, size_t len) { - LocalTensor input_local = input_queue.AllocTensor(); + LocalTensor input_local = input_queue.AllocTensor(); size_t tail = len % 32; len = len & ~31; DataCopy(input_local, input_gm[offset], len); if(tail != 0) { DataCopyExtParams dataCopyParams; dataCopyParams.blockCount = 1; - dataCopyParams.blockLen = tail * sizeof(half); - DataCopyPadExtParams padParams; + dataCopyParams.blockLen = tail * sizeof(ROW_TYPE); + DataCopyPadExtParams padParams; DataCopyPad(input_local[len], input_gm[offset + len], dataCopyParams, padParams); } @@ -112,11 +114,16 @@ class GET_ROW_F16 { indices_ne2_idx * output_stride[3]; copy_in(input_offset, input_ne[0]); - LocalTensor input_local = input_queue.DeQue(); + LocalTensor input_local = input_queue.DeQue(); LocalTensor output_local = output_queue.AllocTensor(); - Cast(output_local, input_local, RoundMode::CAST_NONE, - local_buffer_elems); + if constexpr (std::is_same::value) { + Cast(output_local, input_local, RoundMode::CAST_NONE, + local_buffer_elems); + } else if constexpr (std::is_same::value) { + DataCopy(output_local, input_local, local_buffer_elems); + } + output_queue.EnQue(output_local); copy_out(output_offset, input_ne[0]); @@ -145,7 +152,7 @@ class GET_ROW_F16 { int64_t dr; TPipe pipe; - GlobalTensor input_gm; + GlobalTensor input_gm; GlobalTensor indices_gm; GlobalTensor output_gm; TQue input_queue; @@ -179,7 +186,31 @@ extern "C" __global__ __aicore__ void ascendc_get_row_f16( copy_to_ub(output_ne_gm, output_ne_ub, 32); copy_to_ub(output_nb_gm, output_nb_ub, 32); - GET_ROW_F16 op; + GET_ROW_FLOAT op; + op.init(input_gm, indices_gm, output_gm, input_ne_ub, input_nb_ub, + indices_ne_ub, indices_nb_ub, output_ne_ub, output_nb_ub); + op.calculate(); +} + +extern "C" __global__ __aicore__ void ascendc_get_row_f32( + GM_ADDR input_gm, GM_ADDR indices_gm, GM_ADDR output_gm, + GM_ADDR input_ne_gm, GM_ADDR input_nb_gm, GM_ADDR indices_ne_gm, + GM_ADDR indices_nb_gm, GM_ADDR output_ne_gm, GM_ADDR output_nb_gm) { + int64_t input_ne_ub[4]; + size_t input_nb_ub[4]; + int64_t indices_ne_ub[4]; + size_t indices_nb_ub[4]; + int64_t output_ne_ub[4]; + size_t output_nb_ub[4]; + + copy_to_ub(input_ne_gm, input_ne_ub, 32); + copy_to_ub(input_nb_gm, input_nb_ub, 32); + copy_to_ub(indices_ne_gm, indices_ne_ub, 32); + copy_to_ub(indices_nb_gm, indices_nb_ub, 32); + copy_to_ub(output_ne_gm, output_ne_ub, 32); + copy_to_ub(output_nb_gm, output_nb_ub, 32); + + GET_ROW_FLOAT op; op.init(input_gm, indices_gm, output_gm, input_ne_ub, input_nb_ub, indices_ne_ub, indices_nb_ub, output_ne_ub, output_nb_ub); op.calculate();