mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 20:04:35 +00:00
5dc9dd7152
* Add Command R Plus GGUF * Add Command R Plus GGUF * Loading works up to LayerNorm2D * Export new tensors in 1D so they are not quantized. * Fix embedding layer based on Noeda's example * Whitespace * Add line * Fix unexpected tokens on MPS. Re-add F16 fix. ((Noeda) * dranger003: Fix block index overflow in CUDA dequantizing. * Reverted blocked multiplication code as it still has issues and could affect other Llama arches * export norms as f32 * fix overflow issues during quant and other cleanup * Type convention Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * dranger003: Fix more int overflow during quant. --------- Co-authored-by: S <seast@Ss-Mac-Studio.local> Co-authored-by: S <s@example.com> Co-authored-by: slaren <slarengh@gmail.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
14 lines
391 B
Plaintext
14 lines
391 B
Plaintext
#include "common.cuh"
|
|
|
|
#define CUDA_DEQUANTIZE_BLOCK_SIZE 256
|
|
|
|
template<typename T>
|
|
using to_t_cuda_t = void (*)(const void * __restrict__ x, T * __restrict__ y, int64_t k, cudaStream_t stream);
|
|
|
|
typedef to_t_cuda_t<float> to_fp32_cuda_t;
|
|
typedef to_t_cuda_t<half> to_fp16_cuda_t;
|
|
|
|
to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type);
|
|
|
|
to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type);
|