mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 11:24:35 +00:00
ggml : refactor online repacking (#10446)
* rename ggml-cpu-aarch64.c to .cpp * reformat extra cpu backend. - clean Q4_0_N_M and IQ4_0_N_M - remove from "file" tensor type - allow only with dynamic repack - extract cpu extra bufts and convert to C++ - hbm - "aarch64" - more generic use of extra buffer - generalise extra_supports_op - new API for "cpu-accel": - amx - aarch64 * clang-format * Clean Q4_0_N_M ref Enable restrict on C++ * add op GGML_OP_MUL_MAT_ID for Q4_0_N_M with runtime repack * added/corrected control on tensor size for Q4 repacking. * Update ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Update ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * add debug logs on repacks. --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
parent
c2a16c0bdb
commit
19d8762ab6
22
Makefile
22
Makefile
@ -445,6 +445,10 @@ ifeq ($(UNAME_M),$(filter $(UNAME_M),x86_64 i686 amd64))
|
|||||||
MK_CFLAGS += -march=native -mtune=native
|
MK_CFLAGS += -march=native -mtune=native
|
||||||
HOST_CXXFLAGS += -march=native -mtune=native
|
HOST_CXXFLAGS += -march=native -mtune=native
|
||||||
|
|
||||||
|
# Usage AMX build test
|
||||||
|
#MK_CFLAGS += -march=graniterapids -mtune=graniterapids
|
||||||
|
#HOST_CXXFLAGS += -march=graniterapids -mtune=graniterapids
|
||||||
|
|
||||||
# Usage AVX-only
|
# Usage AVX-only
|
||||||
#MK_CFLAGS += -mfma -mf16c -mavx
|
#MK_CFLAGS += -mfma -mf16c -mavx
|
||||||
#MK_CXXFLAGS += -mfma -mf16c -mavx
|
#MK_CXXFLAGS += -mfma -mf16c -mavx
|
||||||
@ -948,7 +952,6 @@ DIR_COMMON = common
|
|||||||
|
|
||||||
OBJ_GGML = \
|
OBJ_GGML = \
|
||||||
$(DIR_GGML)/src/ggml.o \
|
$(DIR_GGML)/src/ggml.o \
|
||||||
$(DIR_GGML)/src/ggml-aarch64.o \
|
|
||||||
$(DIR_GGML)/src/ggml-alloc.o \
|
$(DIR_GGML)/src/ggml-alloc.o \
|
||||||
$(DIR_GGML)/src/ggml-backend.o \
|
$(DIR_GGML)/src/ggml-backend.o \
|
||||||
$(DIR_GGML)/src/ggml-backend-reg.o \
|
$(DIR_GGML)/src/ggml-backend-reg.o \
|
||||||
@ -956,9 +959,11 @@ OBJ_GGML = \
|
|||||||
$(DIR_GGML)/src/ggml-quants.o \
|
$(DIR_GGML)/src/ggml-quants.o \
|
||||||
$(DIR_GGML)/src/ggml-threading.o \
|
$(DIR_GGML)/src/ggml-threading.o \
|
||||||
$(DIR_GGML)/src/ggml-cpu/ggml-cpu.o \
|
$(DIR_GGML)/src/ggml-cpu/ggml-cpu.o \
|
||||||
$(DIR_GGML)/src/ggml-cpu/ggml-cpu-cpp.o \
|
$(DIR_GGML)/src/ggml-cpu/ggml-cpu_cpp.o \
|
||||||
$(DIR_GGML)/src/ggml-cpu/ggml-cpu-aarch64.o \
|
$(DIR_GGML)/src/ggml-cpu/ggml-cpu-aarch64.o \
|
||||||
|
$(DIR_GGML)/src/ggml-cpu/ggml-cpu-hbm.o \
|
||||||
$(DIR_GGML)/src/ggml-cpu/ggml-cpu-quants.o \
|
$(DIR_GGML)/src/ggml-cpu/ggml-cpu-quants.o \
|
||||||
|
$(DIR_GGML)/src/ggml-cpu/ggml-cpu-traits.o \
|
||||||
$(OBJ_GGML_EXT)
|
$(OBJ_GGML_EXT)
|
||||||
|
|
||||||
OBJ_LLAMA = \
|
OBJ_LLAMA = \
|
||||||
@ -1098,17 +1103,10 @@ DEP_FILES = $(OBJ_GGML:.o=.d) $(OBJ_LLAMA:.o=.d) $(OBJ_COMMON:.o=.d)
|
|||||||
# Default target
|
# Default target
|
||||||
all: $(BUILD_TARGETS)
|
all: $(BUILD_TARGETS)
|
||||||
|
|
||||||
|
# force c++ build for source file that have same name as c file
|
||||||
# Note: need this exception because `ggml-cpu.c` and `ggml-cpu.cpp` both produce the same obj/dep files
|
# Note: need this exception because `ggml-cpu.c` and `ggml-cpu.cpp` both produce the same obj/dep files
|
||||||
# g++ -M -I ./ggml/include/ -I ./ggml/src ggml/src/ggml-cpu/ggml-cpu.cpp | grep ggml
|
$(DIR_GGML)/%_cpp.o: $(DIR_GGML)/%.cpp
|
||||||
$(DIR_GGML)/src/ggml-cpu/ggml-cpu-cpp.o: \
|
$(CXX) $(CXXFLAGS) -MMD -c $< -o $@
|
||||||
ggml/src/ggml-cpu/ggml-cpu.cpp \
|
|
||||||
ggml/include/ggml-backend.h \
|
|
||||||
ggml/include/ggml.h \
|
|
||||||
ggml/include/ggml-alloc.h \
|
|
||||||
ggml/src/ggml-backend-impl.h \
|
|
||||||
ggml/include/ggml-cpu.h \
|
|
||||||
ggml/src/ggml-impl.h
|
|
||||||
$(CXX) $(CXXFLAGS) -c $< -o $@
|
|
||||||
|
|
||||||
# Rules for building object files
|
# Rules for building object files
|
||||||
$(DIR_GGML)/%.o: $(DIR_GGML)/%.c
|
$(DIR_GGML)/%.o: $(DIR_GGML)/%.c
|
||||||
|
@ -10,14 +10,15 @@ var sources = [
|
|||||||
"src/unicode.cpp",
|
"src/unicode.cpp",
|
||||||
"src/unicode-data.cpp",
|
"src/unicode-data.cpp",
|
||||||
"ggml/src/ggml.c",
|
"ggml/src/ggml.c",
|
||||||
"ggml/src/ggml-aarch64.c",
|
|
||||||
"ggml/src/ggml-alloc.c",
|
"ggml/src/ggml-alloc.c",
|
||||||
"ggml/src/ggml-backend.cpp",
|
"ggml/src/ggml-backend.cpp",
|
||||||
"ggml/src/ggml-backend-reg.cpp",
|
"ggml/src/ggml-backend-reg.cpp",
|
||||||
"ggml/src/ggml-cpu/ggml-cpu.c",
|
"ggml/src/ggml-cpu/ggml-cpu.c",
|
||||||
"ggml/src/ggml-cpu/ggml-cpu.cpp",
|
"ggml/src/ggml-cpu/ggml-cpu.cpp",
|
||||||
"ggml/src/ggml-cpu/ggml-cpu-aarch64.c",
|
"ggml/src/ggml-cpu/ggml-cpu-aarch64.cpp",
|
||||||
|
"ggml/src/ggml-cpu/ggml-cpu-hbm.cpp",
|
||||||
"ggml/src/ggml-cpu/ggml-cpu-quants.c",
|
"ggml/src/ggml-cpu/ggml-cpu-quants.c",
|
||||||
|
"ggml/src/ggml-cpu/ggml-cpu-traits.cpp",
|
||||||
"ggml/src/ggml-threading.cpp",
|
"ggml/src/ggml-threading.cpp",
|
||||||
"ggml/src/ggml-quants.c",
|
"ggml/src/ggml-quants.c",
|
||||||
]
|
]
|
||||||
|
@ -55,7 +55,7 @@ cmake --build build --config Release
|
|||||||
cmake --preset arm64-windows-llvm-release -D GGML_OPENMP=OFF
|
cmake --preset arm64-windows-llvm-release -D GGML_OPENMP=OFF
|
||||||
cmake --build build-arm64-windows-llvm-release
|
cmake --build build-arm64-windows-llvm-release
|
||||||
```
|
```
|
||||||
Building for arm64 can also be done with the MSVC compiler with the build-arm64-windows-MSVC preset, or the standard CMake build instructions. However, note that the MSVC compiler does not support inline ARM assembly code, used e.g. for the accelerated Q4_0_4_8 CPU kernels.
|
Building for arm64 can also be done with the MSVC compiler with the build-arm64-windows-MSVC preset, or the standard CMake build instructions. However, note that the MSVC compiler does not support inline ARM assembly code, used e.g. for the accelerated Q4_0_N_M CPU kernels.
|
||||||
|
|
||||||
## BLAS Build
|
## BLAS Build
|
||||||
|
|
||||||
|
@ -54,8 +54,6 @@ As the models are currently fully loaded into memory, you will need adequate dis
|
|||||||
|
|
||||||
Several quantization methods are supported. They differ in the resulting model disk size and inference speed.
|
Several quantization methods are supported. They differ in the resulting model disk size and inference speed.
|
||||||
|
|
||||||
The quantization formats `Q4_0_4_4`, `Q4_0_4_8` and `Q4_0_8_8` are block interleaved variants of the `Q4_0` format, providing a data layout that is better suited for specific implementations of optimized mulmat kernels. Since these formats differ only in data layout, they have the same quantized size as the `Q4_0` format.
|
|
||||||
|
|
||||||
*(outdated)*
|
*(outdated)*
|
||||||
|
|
||||||
| Model | Measure | F16 | Q4_0 | Q4_1 | Q5_0 | Q5_1 | Q8_0 |
|
| Model | Measure | F16 | Q4_0 | Q4_1 | Q5_0 | Q5_1 | Q8_0 |
|
||||||
|
@ -48,9 +48,6 @@ static const std::vector<struct quant_option> QUANT_OPTIONS = {
|
|||||||
{ "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 5.33G, +0.0569 ppl @ Llama-3-8B", },
|
{ "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 5.33G, +0.0569 ppl @ Llama-3-8B", },
|
||||||
{ "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 6.14G, +0.0217 ppl @ Llama-3-8B", },
|
{ "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 6.14G, +0.0217 ppl @ Llama-3-8B", },
|
||||||
{ "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 7.96G, +0.0026 ppl @ Llama-3-8B", },
|
{ "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 7.96G, +0.0026 ppl @ Llama-3-8B", },
|
||||||
{ "Q4_0_4_4", LLAMA_FTYPE_MOSTLY_Q4_0_4_4, " 4.34G, +0.4685 ppl @ Llama-3-8B", },
|
|
||||||
{ "Q4_0_4_8", LLAMA_FTYPE_MOSTLY_Q4_0_4_8, " 4.34G, +0.4685 ppl @ Llama-3-8B", },
|
|
||||||
{ "Q4_0_8_8", LLAMA_FTYPE_MOSTLY_Q4_0_8_8, " 4.34G, +0.4685 ppl @ Llama-3-8B", },
|
|
||||||
{ "F16", LLAMA_FTYPE_MOSTLY_F16, "14.00G, +0.0020 ppl @ Mistral-7B", },
|
{ "F16", LLAMA_FTYPE_MOSTLY_F16, "14.00G, +0.0020 ppl @ Mistral-7B", },
|
||||||
{ "BF16", LLAMA_FTYPE_MOSTLY_BF16, "14.00G, -0.0050 ppl @ Mistral-7B", },
|
{ "BF16", LLAMA_FTYPE_MOSTLY_BF16, "14.00G, -0.0050 ppl @ Mistral-7B", },
|
||||||
{ "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", },
|
{ "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", },
|
||||||
|
@ -103,24 +103,14 @@ extern "C" {
|
|||||||
|
|
||||||
// Internal types and functions exposed for tests and benchmarks
|
// Internal types and functions exposed for tests and benchmarks
|
||||||
|
|
||||||
typedef void (*ggml_from_float_to_mat_t)
|
|
||||||
(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t nr, int64_t k, int64_t bs);
|
|
||||||
typedef void (*ggml_vec_dot_t) (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT x, size_t bx,
|
typedef void (*ggml_vec_dot_t) (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT x, size_t bx,
|
||||||
const void * GGML_RESTRICT y, size_t by, int nrc);
|
const void * GGML_RESTRICT y, size_t by, int nrc);
|
||||||
typedef void (*ggml_gemv_t) (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT x,
|
|
||||||
const void * GGML_RESTRICT y, int nr, int nc);
|
|
||||||
typedef void (*ggml_gemm_t) (int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT x,
|
|
||||||
const void * GGML_RESTRICT y, int nr, int nc);
|
|
||||||
|
|
||||||
struct ggml_type_traits_cpu {
|
struct ggml_type_traits_cpu {
|
||||||
ggml_from_float_t from_float;
|
ggml_from_float_t from_float;
|
||||||
ggml_from_float_to_mat_t from_float_to_mat;
|
|
||||||
ggml_vec_dot_t vec_dot;
|
ggml_vec_dot_t vec_dot;
|
||||||
enum ggml_type vec_dot_type;
|
enum ggml_type vec_dot_type;
|
||||||
int64_t nrows; // number of rows to process simultaneously
|
int64_t nrows; // number of rows to process simultaneously
|
||||||
int64_t ncols; // number of columns to process simultaneously
|
|
||||||
ggml_gemv_t gemv;
|
|
||||||
ggml_gemm_t gemm;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
GGML_BACKEND_API const struct ggml_type_traits_cpu * ggml_get_type_traits_cpu(enum ggml_type type);
|
GGML_BACKEND_API const struct ggml_type_traits_cpu * ggml_get_type_traits_cpu(enum ggml_type type);
|
||||||
@ -140,13 +130,6 @@ extern "C" {
|
|||||||
|
|
||||||
GGML_BACKEND_API ggml_backend_reg_t ggml_backend_cpu_reg(void);
|
GGML_BACKEND_API ggml_backend_reg_t ggml_backend_cpu_reg(void);
|
||||||
|
|
||||||
#ifdef GGML_USE_CPU_HBM
|
|
||||||
GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
GGML_BACKEND_API ggml_backend_buffer_type_t ggml_backend_cpu_aarch64_buffer_type(void);
|
|
||||||
GGML_BACKEND_API bool ggml_backend_cpu_buft_is_aarch64(ggml_backend_buffer_type_t buft);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -384,15 +384,15 @@ extern "C" {
|
|||||||
GGML_TYPE_F64 = 28,
|
GGML_TYPE_F64 = 28,
|
||||||
GGML_TYPE_IQ1_M = 29,
|
GGML_TYPE_IQ1_M = 29,
|
||||||
GGML_TYPE_BF16 = 30,
|
GGML_TYPE_BF16 = 30,
|
||||||
GGML_TYPE_Q4_0_4_4 = 31,
|
// GGML_TYPE_Q4_0_4_4 = 31, support has been removed from gguf files
|
||||||
GGML_TYPE_Q4_0_4_8 = 32,
|
// GGML_TYPE_Q4_0_4_8 = 32,
|
||||||
GGML_TYPE_Q4_0_8_8 = 33,
|
// GGML_TYPE_Q4_0_8_8 = 33,
|
||||||
GGML_TYPE_TQ1_0 = 34,
|
GGML_TYPE_TQ1_0 = 34,
|
||||||
GGML_TYPE_TQ2_0 = 35,
|
GGML_TYPE_TQ2_0 = 35,
|
||||||
GGML_TYPE_IQ4_NL_4_4 = 36,
|
// GGML_TYPE_IQ4_NL_4_4 = 36,
|
||||||
// GGML_TYPE_IQ4_NL_4_8 = 37,
|
// GGML_TYPE_IQ4_NL_4_8 = 37,
|
||||||
// GGML_TYPE_IQ4_NL_8_8 = 38,
|
// GGML_TYPE_IQ4_NL_8_8 = 38,
|
||||||
GGML_TYPE_COUNT,
|
GGML_TYPE_COUNT = 39,
|
||||||
};
|
};
|
||||||
|
|
||||||
// precision
|
// precision
|
||||||
@ -433,9 +433,6 @@ extern "C" {
|
|||||||
GGML_FTYPE_MOSTLY_IQ4_XS = 22, // except 1d tensors
|
GGML_FTYPE_MOSTLY_IQ4_XS = 22, // except 1d tensors
|
||||||
GGML_FTYPE_MOSTLY_IQ1_M = 23, // except 1d tensors
|
GGML_FTYPE_MOSTLY_IQ1_M = 23, // except 1d tensors
|
||||||
GGML_FTYPE_MOSTLY_BF16 = 24, // except 1d tensors
|
GGML_FTYPE_MOSTLY_BF16 = 24, // except 1d tensors
|
||||||
GGML_FTYPE_MOSTLY_Q4_0_4_4 = 25, // except 1d tensors
|
|
||||||
GGML_FTYPE_MOSTLY_Q4_0_4_8 = 26, // except 1d tensors
|
|
||||||
GGML_FTYPE_MOSTLY_Q4_0_8_8 = 27, // except 1d tensors
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// available tensor operations:
|
// available tensor operations:
|
||||||
@ -2207,7 +2204,15 @@ extern "C" {
|
|||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
// restrict not standard in C++
|
// restrict not standard in C++
|
||||||
|
# if defined(__GNUC__)
|
||||||
|
# define GGML_RESTRICT __restrict__
|
||||||
|
# elif defined(__clang__)
|
||||||
|
# define GGML_RESTRICT __restrict
|
||||||
|
# elif defined(_MSC_VER)
|
||||||
|
# define GGML_RESTRICT __restrict
|
||||||
|
# else
|
||||||
# define GGML_RESTRICT
|
# define GGML_RESTRICT
|
||||||
|
# endif
|
||||||
#else
|
#else
|
||||||
# define GGML_RESTRICT restrict
|
# define GGML_RESTRICT restrict
|
||||||
#endif
|
#endif
|
||||||
|
@ -220,9 +220,7 @@ add_library(ggml-base
|
|||||||
ggml-threading.cpp
|
ggml-threading.cpp
|
||||||
ggml-threading.h
|
ggml-threading.h
|
||||||
ggml-quants.c
|
ggml-quants.c
|
||||||
ggml-quants.h
|
ggml-quants.h)
|
||||||
ggml-aarch64.c
|
|
||||||
ggml-aarch64.h)
|
|
||||||
|
|
||||||
target_include_directories(ggml-base PRIVATE .)
|
target_include_directories(ggml-base PRIVATE .)
|
||||||
|
|
||||||
|
@ -1,129 +0,0 @@
|
|||||||
#define GGML_COMMON_DECL_C
|
|
||||||
#include "ggml-common.h"
|
|
||||||
|
|
||||||
#include "ggml-aarch64.h"
|
|
||||||
#include "ggml-impl.h"
|
|
||||||
#include "ggml-quants.h"
|
|
||||||
#include <assert.h>
|
|
||||||
|
|
||||||
#define UNUSED GGML_UNUSED
|
|
||||||
|
|
||||||
static block_q4_0x4 make_block_q4_0x4(block_q4_0 * in, unsigned int blck_size_interleave) {
|
|
||||||
block_q4_0x4 out;
|
|
||||||
|
|
||||||
for (int i = 0; i < 4; i++) {
|
|
||||||
out.d[i] = in[i].d;
|
|
||||||
}
|
|
||||||
|
|
||||||
const int end = QK4_0 * 2 / blck_size_interleave;
|
|
||||||
|
|
||||||
if (blck_size_interleave == 8) {
|
|
||||||
const uint64_t xor_mask = 0x8888888888888888ULL;
|
|
||||||
for (int i = 0; i < end; ++i) {
|
|
||||||
int src_id = i % 4;
|
|
||||||
int src_offset = (i / 4) * blck_size_interleave;
|
|
||||||
int dst_offset = i * blck_size_interleave;
|
|
||||||
|
|
||||||
uint64_t elems;
|
|
||||||
// Using memcpy to avoid unaligned memory accesses
|
|
||||||
memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t));
|
|
||||||
elems ^= xor_mask;
|
|
||||||
memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t));
|
|
||||||
}
|
|
||||||
} else if (blck_size_interleave == 4) {
|
|
||||||
const uint32_t xor_mask = 0x88888888;
|
|
||||||
for (int i = 0; i < end; ++i) {
|
|
||||||
int src_id = i % 4;
|
|
||||||
int src_offset = (i / 4) * blck_size_interleave;
|
|
||||||
int dst_offset = i * blck_size_interleave;
|
|
||||||
|
|
||||||
uint32_t elems;
|
|
||||||
memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint32_t));
|
|
||||||
elems ^= xor_mask;
|
|
||||||
memcpy(&out.qs[dst_offset], &elems, sizeof(uint32_t));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
GGML_ASSERT(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
return out;
|
|
||||||
}
|
|
||||||
|
|
||||||
// interleave 8 block_q4_0s in blocks of blck_size_interleave
|
|
||||||
// returns an interleaved block_q4_0x8
|
|
||||||
// in the interleaved block_q4_0x8, place deltas for 8 block_q4_0 blocks
|
|
||||||
// first, then interleave quants from 8 block_q4_0s in blocks of blck_size_interleave
|
|
||||||
static block_q4_0x8 make_block_q4_0x8(block_q4_0 * in, unsigned int blck_size_interleave) {
|
|
||||||
block_q4_0x8 out;
|
|
||||||
|
|
||||||
for (int i = 0; i < 8; i++) {
|
|
||||||
out.d[i] = in[i].d;
|
|
||||||
}
|
|
||||||
|
|
||||||
const int end = QK4_0 * 4 / blck_size_interleave;
|
|
||||||
const uint64_t xor_mask = 0x8888888888888888ULL;
|
|
||||||
|
|
||||||
for (int i = 0; i < end; ++i) {
|
|
||||||
int src_id = i % 8;
|
|
||||||
int src_offset = (i / 8) * blck_size_interleave;
|
|
||||||
int dst_offset = i * blck_size_interleave;
|
|
||||||
|
|
||||||
uint64_t elems;
|
|
||||||
memcpy(&elems, &in[src_id].qs[src_offset], sizeof(uint64_t));
|
|
||||||
elems ^= xor_mask;
|
|
||||||
memcpy(&out.qs[dst_offset], &elems, sizeof(uint64_t));
|
|
||||||
}
|
|
||||||
|
|
||||||
return out;
|
|
||||||
}
|
|
||||||
|
|
||||||
static size_t quantize_q4_0_nr_bl(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, int nrows_interleaved, int blck_size_interleave) {
|
|
||||||
assert(n_per_row % QK4_0 == 0);
|
|
||||||
const int nb = n_per_row / QK4_0;
|
|
||||||
|
|
||||||
void * out_ptr = NULL;
|
|
||||||
if (nrows_interleaved == 8) {
|
|
||||||
out_ptr = (block_q4_0x8 *) dst;
|
|
||||||
}
|
|
||||||
else if (nrows_interleaved == 4) {
|
|
||||||
out_ptr = (block_q4_0x4 *) dst;
|
|
||||||
}
|
|
||||||
assert(nrows_interleaved <= 8);
|
|
||||||
block_q4_0 dst_tmp[8];
|
|
||||||
|
|
||||||
for (int b = 0; b < (nrow * n_per_row); b += nrows_interleaved * n_per_row) {
|
|
||||||
|
|
||||||
for (int64_t x = 0; x < nb; x++) {
|
|
||||||
|
|
||||||
for (int i = 0; i < nrows_interleaved; i++ ) {
|
|
||||||
quantize_row_q4_0_ref(src + b + i * n_per_row + x * QK4_0, (block_q4_0 *) dst_tmp + i, QK4_0);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (nrows_interleaved == 8) {
|
|
||||||
*(block_q4_0x8 *) out_ptr = make_block_q4_0x8(dst_tmp, blck_size_interleave);
|
|
||||||
out_ptr = (block_q4_0x8 *) out_ptr + 1;
|
|
||||||
}
|
|
||||||
else if (nrows_interleaved == 4) {
|
|
||||||
*(block_q4_0x4 *) out_ptr = make_block_q4_0x4(dst_tmp, blck_size_interleave);
|
|
||||||
out_ptr = (block_q4_0x4 *) out_ptr + 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ((nrow * n_per_row) / QK4_0 * sizeof(block_q4_0));
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t quantize_q4_0_4x4(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
|
|
||||||
UNUSED(quant_weights);
|
|
||||||
return quantize_q4_0_nr_bl(src, dst, nrow, n_per_row, 4, 4);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t quantize_q4_0_4x8(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
|
|
||||||
UNUSED(quant_weights);
|
|
||||||
return quantize_q4_0_nr_bl(src, dst, nrow, n_per_row, 4, 8);
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t quantize_q4_0_8x8(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
|
|
||||||
UNUSED(quant_weights);
|
|
||||||
return quantize_q4_0_nr_bl(src, dst, nrow, n_per_row, 8, 8);
|
|
||||||
}
|
|
@ -1,19 +0,0 @@
|
|||||||
#pragma once
|
|
||||||
|
|
||||||
#include "ggml.h"
|
|
||||||
|
|
||||||
// GGML internal header
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Quantization utilizing an importance matrix (a.k.a. "Activation aWare Quantization")
|
|
||||||
size_t quantize_q4_0_4x4(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
size_t quantize_q4_0_4x8(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
size_t quantize_q4_0_8x8(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
@ -2089,7 +2089,7 @@ static void * ggml_backend_cann_reg_get_proc_address(ggml_backend_reg_t reg, con
|
|||||||
static const ggml_backend_reg_i ggml_backend_cann_reg_interface = {
|
static const ggml_backend_reg_i ggml_backend_cann_reg_interface = {
|
||||||
/* .get_name = */ ggml_backend_cann_reg_get_name,
|
/* .get_name = */ ggml_backend_cann_reg_get_name,
|
||||||
/* .get_device_count = */ ggml_backend_cann_reg_get_device_count,
|
/* .get_device_count = */ ggml_backend_cann_reg_get_device_count,
|
||||||
/* .get_device_get = */ ggml_backend_cann_reg_get_device,
|
/* .get_device = */ ggml_backend_cann_reg_get_device,
|
||||||
/* .get_proc_address = */ ggml_backend_cann_reg_get_proc_address,
|
/* .get_proc_address = */ ggml_backend_cann_reg_get_proc_address,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -6,7 +6,20 @@
|
|||||||
typedef uint16_t ggml_half;
|
typedef uint16_t ggml_half;
|
||||||
typedef uint32_t ggml_half2;
|
typedef uint32_t ggml_half2;
|
||||||
|
|
||||||
#define GGML_COMMON_AGGR
|
#define GGML_COMMON_AGGR_U
|
||||||
|
#define GGML_COMMON_AGGR_S
|
||||||
|
|
||||||
|
#define GGML_COMMON_DECL
|
||||||
|
#elif defined(GGML_COMMON_DECL_CPP)
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
typedef uint16_t ggml_half;
|
||||||
|
typedef uint32_t ggml_half2;
|
||||||
|
|
||||||
|
// std-c++ allow anonymous unions but some compiler warn on it
|
||||||
|
#define GGML_COMMON_AGGR_U data
|
||||||
|
// std-c++ do not allow it.
|
||||||
|
#define GGML_COMMON_AGGR_S data
|
||||||
|
|
||||||
#define GGML_COMMON_DECL
|
#define GGML_COMMON_DECL
|
||||||
#elif defined(GGML_COMMON_DECL_METAL)
|
#elif defined(GGML_COMMON_DECL_METAL)
|
||||||
@ -15,7 +28,8 @@ typedef uint32_t ggml_half2;
|
|||||||
typedef half ggml_half;
|
typedef half ggml_half;
|
||||||
typedef half2 ggml_half2;
|
typedef half2 ggml_half2;
|
||||||
|
|
||||||
#define GGML_COMMON_AGGR
|
#define GGML_COMMON_AGGR_U
|
||||||
|
#define GGML_COMMON_AGGR_S
|
||||||
|
|
||||||
#define GGML_COMMON_DECL
|
#define GGML_COMMON_DECL
|
||||||
#elif defined(GGML_COMMON_DECL_CUDA)
|
#elif defined(GGML_COMMON_DECL_CUDA)
|
||||||
@ -29,7 +43,8 @@ typedef half2 ggml_half2;
|
|||||||
typedef half ggml_half;
|
typedef half ggml_half;
|
||||||
typedef half2 ggml_half2;
|
typedef half2 ggml_half2;
|
||||||
|
|
||||||
#define GGML_COMMON_AGGR data
|
#define GGML_COMMON_AGGR_U
|
||||||
|
#define GGML_COMMON_AGGR_S data
|
||||||
|
|
||||||
#define GGML_COMMON_DECL
|
#define GGML_COMMON_DECL
|
||||||
#elif defined(GGML_COMMON_DECL_HIP)
|
#elif defined(GGML_COMMON_DECL_HIP)
|
||||||
@ -39,7 +54,8 @@ typedef half2 ggml_half2;
|
|||||||
typedef half ggml_half;
|
typedef half ggml_half;
|
||||||
typedef half2 ggml_half2;
|
typedef half2 ggml_half2;
|
||||||
|
|
||||||
#define GGML_COMMON_AGGR data
|
#define GGML_COMMON_AGGR_U
|
||||||
|
#define GGML_COMMON_AGGR_S data
|
||||||
|
|
||||||
#define GGML_COMMON_DECL
|
#define GGML_COMMON_DECL
|
||||||
#elif defined(GGML_COMMON_DECL_SYCL)
|
#elif defined(GGML_COMMON_DECL_SYCL)
|
||||||
@ -49,7 +65,8 @@ typedef half2 ggml_half2;
|
|||||||
typedef sycl::half ggml_half;
|
typedef sycl::half ggml_half;
|
||||||
typedef sycl::half2 ggml_half2;
|
typedef sycl::half2 ggml_half2;
|
||||||
|
|
||||||
#define GGML_COMMON_AGGR data
|
#define GGML_COMMON_AGGR_U
|
||||||
|
#define GGML_COMMON_AGGR_S data
|
||||||
|
|
||||||
#define GGML_COMMON_DECL
|
#define GGML_COMMON_DECL
|
||||||
#endif
|
#endif
|
||||||
@ -154,9 +171,9 @@ typedef struct {
|
|||||||
struct {
|
struct {
|
||||||
ggml_half d; // delta
|
ggml_half d; // delta
|
||||||
ggml_half m; // min
|
ggml_half m; // min
|
||||||
} GGML_COMMON_AGGR;
|
} GGML_COMMON_AGGR_S;
|
||||||
ggml_half2 dm;
|
ggml_half2 dm;
|
||||||
};
|
} GGML_COMMON_AGGR_U;
|
||||||
uint8_t qs[QK4_1 / 2]; // nibbles / quants
|
uint8_t qs[QK4_1 / 2]; // nibbles / quants
|
||||||
} block_q4_1;
|
} block_q4_1;
|
||||||
static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_half) + QK4_1 / 2, "wrong q4_1 block size/padding");
|
static_assert(sizeof(block_q4_1) == 2 * sizeof(ggml_half) + QK4_1 / 2, "wrong q4_1 block size/padding");
|
||||||
@ -175,9 +192,9 @@ typedef struct {
|
|||||||
struct {
|
struct {
|
||||||
ggml_half d; // delta
|
ggml_half d; // delta
|
||||||
ggml_half m; // min
|
ggml_half m; // min
|
||||||
} GGML_COMMON_AGGR;
|
} GGML_COMMON_AGGR_S;
|
||||||
ggml_half2 dm;
|
ggml_half2 dm;
|
||||||
};
|
} GGML_COMMON_AGGR_U;
|
||||||
uint8_t qh[4]; // 5-th bit of quants
|
uint8_t qh[4]; // 5-th bit of quants
|
||||||
uint8_t qs[QK5_1 / 2]; // nibbles / quants
|
uint8_t qs[QK5_1 / 2]; // nibbles / quants
|
||||||
} block_q5_1;
|
} block_q5_1;
|
||||||
@ -196,37 +213,13 @@ typedef struct {
|
|||||||
struct {
|
struct {
|
||||||
ggml_half d; // delta
|
ggml_half d; // delta
|
||||||
ggml_half s; // d * sum(qs[i])
|
ggml_half s; // d * sum(qs[i])
|
||||||
} GGML_COMMON_AGGR;
|
} GGML_COMMON_AGGR_S;
|
||||||
ggml_half2 ds;
|
ggml_half2 ds;
|
||||||
};
|
} GGML_COMMON_AGGR_U;
|
||||||
int8_t qs[QK8_1]; // quants
|
int8_t qs[QK8_1]; // quants
|
||||||
} block_q8_1;
|
} block_q8_1;
|
||||||
static_assert(sizeof(block_q8_1) == 2*sizeof(ggml_half) + QK8_1, "wrong q8_1 block size/padding");
|
static_assert(sizeof(block_q8_1) == 2*sizeof(ggml_half) + QK8_1, "wrong q8_1 block size/padding");
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
ggml_half d[4]; // deltas for 4 q4_0 blocks
|
|
||||||
uint8_t qs[QK4_0 * 2]; // nibbles / quants for 4 q4_0 blocks
|
|
||||||
} block_q4_0x4;
|
|
||||||
static_assert(sizeof(block_q4_0x4) == 4 * sizeof(ggml_half) + QK4_0 * 2, "wrong q4_0x4 block size/padding");
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
ggml_half d[8]; // deltas for 8 q4_0 blocks
|
|
||||||
uint8_t qs[QK4_0 * 4]; // nibbles / quants for 8 q4_0 blocks
|
|
||||||
} block_q4_0x8;
|
|
||||||
static_assert(sizeof(block_q4_0x8) == 8 * sizeof(ggml_half) + QK4_0 * 4, "wrong q4_0x8 block size/padding");
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
ggml_half d[4]; // deltas for 4 q8_0 blocks
|
|
||||||
int8_t qs[QK8_0 * 4]; // quants for 4 q8_0 blocks
|
|
||||||
} block_q8_0x4;
|
|
||||||
static_assert(sizeof(block_q8_0x4) == 4 * sizeof(ggml_half) + QK8_0 * 4, "wrong q8_0x4 block size/padding");
|
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
ggml_half d[8]; // deltas for 8 q8_0 blocks
|
|
||||||
int8_t qs[QK8_0 * 8]; // quants for 8 q8_0 blocks
|
|
||||||
} block_q8_0x8;
|
|
||||||
static_assert(sizeof(block_q8_0x8) == 8 * sizeof(ggml_half) + QK8_0 * 8, "wrong q8_0x8 block size/padding");
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// Ternary quantization
|
// Ternary quantization
|
||||||
//
|
//
|
||||||
@ -261,9 +254,9 @@ typedef struct {
|
|||||||
struct {
|
struct {
|
||||||
ggml_half d; // super-block scale for quantized scales
|
ggml_half d; // super-block scale for quantized scales
|
||||||
ggml_half dmin; // super-block scale for quantized mins
|
ggml_half dmin; // super-block scale for quantized mins
|
||||||
} GGML_COMMON_AGGR;
|
} GGML_COMMON_AGGR_S;
|
||||||
ggml_half2 dm;
|
ggml_half2 dm;
|
||||||
};
|
} GGML_COMMON_AGGR_U;
|
||||||
} block_q2_K;
|
} block_q2_K;
|
||||||
static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_half) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding");
|
static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_half) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding");
|
||||||
|
|
||||||
@ -288,9 +281,9 @@ typedef struct {
|
|||||||
struct {
|
struct {
|
||||||
ggml_half d; // super-block scale for quantized scales
|
ggml_half d; // super-block scale for quantized scales
|
||||||
ggml_half dmin; // super-block scale for quantized mins
|
ggml_half dmin; // super-block scale for quantized mins
|
||||||
} GGML_COMMON_AGGR;
|
} GGML_COMMON_AGGR_S;
|
||||||
ggml_half2 dm;
|
ggml_half2 dm;
|
||||||
};
|
} GGML_COMMON_AGGR_U;
|
||||||
uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
|
uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
|
||||||
uint8_t qs[QK_K/2]; // 4--bit quants
|
uint8_t qs[QK_K/2]; // 4--bit quants
|
||||||
} block_q4_K;
|
} block_q4_K;
|
||||||
@ -305,9 +298,9 @@ typedef struct {
|
|||||||
struct {
|
struct {
|
||||||
ggml_half d; // super-block scale for quantized scales
|
ggml_half d; // super-block scale for quantized scales
|
||||||
ggml_half dmin; // super-block scale for quantized mins
|
ggml_half dmin; // super-block scale for quantized mins
|
||||||
} GGML_COMMON_AGGR;
|
} GGML_COMMON_AGGR_S;
|
||||||
ggml_half2 dm;
|
ggml_half2 dm;
|
||||||
};
|
} GGML_COMMON_AGGR_U;
|
||||||
uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
|
uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
|
||||||
uint8_t qh[QK_K/8]; // quants, high bit
|
uint8_t qh[QK_K/8]; // quants, high bit
|
||||||
uint8_t qs[QK_K/2]; // quants, low 4 bits
|
uint8_t qs[QK_K/2]; // quants, low 4 bits
|
||||||
@ -418,12 +411,6 @@ typedef struct {
|
|||||||
} block_iq4_xs;
|
} block_iq4_xs;
|
||||||
static_assert(sizeof(block_iq4_xs) == sizeof(ggml_half) + sizeof(uint16_t) + QK_K/64 + QK_K/2, "wrong iq4_xs block size/padding");
|
static_assert(sizeof(block_iq4_xs) == sizeof(ggml_half) + sizeof(uint16_t) + QK_K/64 + QK_K/2, "wrong iq4_xs block size/padding");
|
||||||
|
|
||||||
typedef struct {
|
|
||||||
ggml_half d[4]; // deltas for 4 iq4_nl blocks
|
|
||||||
uint8_t qs[QK4_NL * 2];// nibbles / quants for 4 iq4_nl blocks
|
|
||||||
} block_iq4_nlx4;
|
|
||||||
static_assert(sizeof(block_iq4_nlx4) == 4 * sizeof(ggml_half) + QK4_NL * 2, "wrong iq4_nlx4 block size/padding");
|
|
||||||
|
|
||||||
#endif // GGML_COMMON_DECL
|
#endif // GGML_COMMON_DECL
|
||||||
#endif // GGML_COMMON_DECL
|
#endif // GGML_COMMON_DECL
|
||||||
|
|
||||||
@ -437,6 +424,13 @@ static_assert(sizeof(block_iq4_nlx4) == 4 * sizeof(ggml_half) + QK4_NL * 2, "wro
|
|||||||
#define GGML_TABLE_BEGIN(type, name, size) static const type name[size] = {
|
#define GGML_TABLE_BEGIN(type, name, size) static const type name[size] = {
|
||||||
#define GGML_TABLE_END() };
|
#define GGML_TABLE_END() };
|
||||||
|
|
||||||
|
#define GGML_COMMON_IMPL
|
||||||
|
#elif defined(GGML_COMMON_IMPL_CPP)
|
||||||
|
#include <cstdint>
|
||||||
|
|
||||||
|
#define GGML_TABLE_BEGIN(type, name, size) static const type name[size] = {
|
||||||
|
#define GGML_TABLE_END() };
|
||||||
|
|
||||||
#define GGML_COMMON_IMPL
|
#define GGML_COMMON_IMPL
|
||||||
#elif defined(GGML_COMMON_IMPL_METAL)
|
#elif defined(GGML_COMMON_IMPL_METAL)
|
||||||
#include <metal_stdlib>
|
#include <metal_stdlib>
|
||||||
|
@ -10,10 +10,14 @@ function(ggml_add_cpu_backend_variant_impl tag_name)
|
|||||||
list (APPEND GGML_CPU_SOURCES
|
list (APPEND GGML_CPU_SOURCES
|
||||||
ggml-cpu/ggml-cpu.c
|
ggml-cpu/ggml-cpu.c
|
||||||
ggml-cpu/ggml-cpu.cpp
|
ggml-cpu/ggml-cpu.cpp
|
||||||
ggml-cpu/ggml-cpu-aarch64.c
|
ggml-cpu/ggml-cpu-aarch64.cpp
|
||||||
ggml-cpu/ggml-cpu-aarch64.h
|
ggml-cpu/ggml-cpu-aarch64.h
|
||||||
|
ggml-cpu/ggml-cpu-hbm.cpp
|
||||||
|
ggml-cpu/ggml-cpu-hbm.h
|
||||||
ggml-cpu/ggml-cpu-quants.c
|
ggml-cpu/ggml-cpu-quants.c
|
||||||
ggml-cpu/ggml-cpu-quants.h
|
ggml-cpu/ggml-cpu-quants.h
|
||||||
|
ggml-cpu/ggml-cpu-traits.cpp
|
||||||
|
ggml-cpu/ggml-cpu-traits.h
|
||||||
ggml-cpu/amx/amx.cpp
|
ggml-cpu/amx/amx.cpp
|
||||||
ggml-cpu/amx/amx.h
|
ggml-cpu/amx/amx.h
|
||||||
ggml-cpu/amx/mmq.cpp
|
ggml-cpu/amx/mmq.cpp
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
#include "ggml-backend.h"
|
#include "ggml-backend.h"
|
||||||
#include "ggml-impl.h"
|
#include "ggml-impl.h"
|
||||||
#include "ggml-cpu.h"
|
#include "ggml-cpu.h"
|
||||||
|
#include "ggml-cpu-traits.h"
|
||||||
|
|
||||||
#if defined(__gnu_linux__)
|
#if defined(__gnu_linux__)
|
||||||
#include <sys/syscall.h>
|
#include <sys/syscall.h>
|
||||||
@ -17,6 +18,29 @@
|
|||||||
|
|
||||||
#if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
|
#if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
|
||||||
|
|
||||||
|
// AMX type_trais
|
||||||
|
namespace ggml::cpu::amx {
|
||||||
|
class tensor_traits : public ggml::cpu::tensor_traits {
|
||||||
|
bool work_size(int /* n_threads */, const struct ggml_tensor * op, size_t & size) override {
|
||||||
|
size = ggml_backend_amx_desired_wsize(op);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) override {
|
||||||
|
if (op->op == GGML_OP_MUL_MAT) {
|
||||||
|
ggml_backend_amx_mul_mat(params, op);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static ggml::cpu::tensor_traits * get_tensor_traits(ggml_backend_buffer_t, struct ggml_tensor *) {
|
||||||
|
static tensor_traits traits;
|
||||||
|
return &traits;
|
||||||
|
}
|
||||||
|
} // namespace ggml::cpu::amx
|
||||||
|
|
||||||
// AMX buffer interface
|
// AMX buffer interface
|
||||||
static void ggml_backend_amx_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
static void ggml_backend_amx_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
||||||
free(buffer->context);
|
free(buffer->context);
|
||||||
@ -26,14 +50,23 @@ static void * ggml_backend_amx_buffer_get_base(ggml_backend_buffer_t buffer) {
|
|||||||
return (void *) (buffer->context);
|
return (void *) (buffer->context);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_backend_amx_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
|
static void ggml_backend_amx_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
|
||||||
|
tensor->extra = (void *) ggml::cpu::amx::get_tensor_traits(buffer, tensor);
|
||||||
|
|
||||||
|
GGML_UNUSED(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_backend_amx_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor,
|
||||||
|
uint8_t value, size_t offset, size_t size) {
|
||||||
memset((char *) tensor->data + offset, value, size);
|
memset((char *) tensor->data + offset, value, size);
|
||||||
|
|
||||||
GGML_UNUSED(buffer);
|
GGML_UNUSED(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_backend_amx_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
|
static void ggml_backend_amx_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor,
|
||||||
|
const void * data, size_t offset, size_t size) {
|
||||||
if (qtype_has_amx_kernels(tensor->type)) {
|
if (qtype_has_amx_kernels(tensor->type)) {
|
||||||
|
GGML_LOG_DEBUG("%s: amx repack tensor %s of type %s\n", __func__, tensor->name, ggml_type_name(tensor->type));
|
||||||
ggml_backend_amx_convert_weight(tensor, data, offset, size);
|
ggml_backend_amx_convert_weight(tensor, data, offset, size);
|
||||||
} else {
|
} else {
|
||||||
memcpy((char *) tensor->data + offset, data, size);
|
memcpy((char *) tensor->data + offset, data, size);
|
||||||
@ -42,6 +75,8 @@ static void ggml_backend_amx_buffer_set_tensor(ggml_backend_buffer_t buffer, str
|
|||||||
GGML_UNUSED(buffer);
|
GGML_UNUSED(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
// need to figure what we need to do with buffer->extra.
|
||||||
static void ggml_backend_amx_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
|
static void ggml_backend_amx_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
|
||||||
GGML_ASSERT(!qtype_has_amx_kernels(tensor->type));
|
GGML_ASSERT(!qtype_has_amx_kernels(tensor->type));
|
||||||
memcpy(data, (const char *)tensor->data + offset, size);
|
memcpy(data, (const char *)tensor->data + offset, size);
|
||||||
@ -62,6 +97,7 @@ static bool ggml_backend_amx_buffer_cpy_tensor(ggml_backend_buffer_t buffer, con
|
|||||||
|
|
||||||
GGML_UNUSED(buffer);
|
GGML_UNUSED(buffer);
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
static void ggml_backend_amx_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
|
static void ggml_backend_amx_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
|
||||||
memset(buffer->context, value, buffer->size);
|
memset(buffer->context, value, buffer->size);
|
||||||
@ -70,13 +106,13 @@ static void ggml_backend_amx_buffer_clear(ggml_backend_buffer_t buffer, uint8_t
|
|||||||
static ggml_backend_buffer_i ggml_backend_amx_buffer_interface = {
|
static ggml_backend_buffer_i ggml_backend_amx_buffer_interface = {
|
||||||
/* .free_buffer = */ ggml_backend_amx_buffer_free_buffer,
|
/* .free_buffer = */ ggml_backend_amx_buffer_free_buffer,
|
||||||
/* .get_base = */ ggml_backend_amx_buffer_get_base,
|
/* .get_base = */ ggml_backend_amx_buffer_get_base,
|
||||||
/* .init_tensor = */ NULL, // no initialization required
|
/* .init_tensor = */ ggml_backend_amx_buffer_init_tensor,
|
||||||
/* .memset_tensor = */ ggml_backend_amx_buffer_memset_tensor,
|
/* .memset_tensor = */ ggml_backend_amx_buffer_memset_tensor,
|
||||||
/* .set_tensor = */ ggml_backend_amx_buffer_set_tensor,
|
/* .set_tensor = */ ggml_backend_amx_buffer_set_tensor,
|
||||||
/* .get_tensor = */ ggml_backend_amx_buffer_get_tensor,
|
/* .get_tensor = */ nullptr,
|
||||||
/* .cpy_tensor = */ ggml_backend_amx_buffer_cpy_tensor,
|
/* .cpy_tensor = */ nullptr,
|
||||||
/* .clear = */ ggml_backend_amx_buffer_clear,
|
/* .clear = */ ggml_backend_amx_buffer_clear,
|
||||||
/* .reset = */ NULL,
|
/* .reset = */ nullptr,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const char * ggml_backend_amx_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
|
static const char * ggml_backend_amx_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
|
||||||
@ -101,14 +137,44 @@ static size_t ggml_backend_amx_buffer_type_get_alignment(ggml_backend_buffer_typ
|
|||||||
GGML_UNUSED(buft);
|
GGML_UNUSED(buft);
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t ggml_backend_amx_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor* tensor) {
|
namespace ggml::cpu::amx {
|
||||||
return ggml_backend_amx_get_alloc_size(tensor);
|
class extra_buffer_type : ggml::cpu::extra_buffer_type {
|
||||||
|
bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override {
|
||||||
|
// handle only 2d gemm for now
|
||||||
|
auto is_contiguous_2d = [](const struct ggml_tensor * t) {
|
||||||
|
return ggml_is_contiguous(t) && t->ne[3] == 1 && t->ne[2] == 1;
|
||||||
|
};
|
||||||
|
|
||||||
GGML_UNUSED(buft);
|
if (op->op == GGML_OP_MUL_MAT && is_contiguous_2d(op->src[0]) && // src0 must be contiguous
|
||||||
|
is_contiguous_2d(op->src[1]) && // src1 must be contiguous
|
||||||
|
op->src[0]->buffer && op->src[0]->buffer->buft == ggml_backend_amx_buffer_type() &&
|
||||||
|
op->ne[0] % (TILE_N * 2) == 0 && // out_features is 32x
|
||||||
|
(qtype_has_amx_kernels(op->src[0]->type) || (op->src[0]->type == GGML_TYPE_F16))) {
|
||||||
|
// src1 must be host buffer
|
||||||
|
if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// src1 must be float32
|
||||||
|
if (op->src[1]->type == GGML_TYPE_F32) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ggml_backend_amx_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
|
ggml::cpu::tensor_traits * get_tensor_traits(const struct ggml_tensor * op) override {
|
||||||
return false;
|
if (op->op == GGML_OP_MUL_MAT && op->src[0]->buffer &&
|
||||||
|
op->src[0]->buffer->buft == ggml_backend_amx_buffer_type()) {
|
||||||
|
return (ggml::cpu::tensor_traits *) op->src[0]->extra;
|
||||||
|
}
|
||||||
|
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} // namespace ggml::cpu::amx
|
||||||
|
|
||||||
|
static size_t ggml_backend_amx_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
|
||||||
|
return ggml_backend_amx_get_alloc_size(tensor);
|
||||||
|
|
||||||
GGML_UNUSED(buft);
|
GGML_UNUSED(buft);
|
||||||
}
|
}
|
||||||
@ -129,68 +195,26 @@ static bool ggml_amx_init() {
|
|||||||
return true;
|
return true;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_backend_buffer_type_t ggml_backend_amx_buffer_type() {
|
ggml_backend_buffer_type_t ggml_backend_amx_buffer_type() {
|
||||||
static struct ggml_backend_buffer_type ggml_backend_buffer_type_amx = {
|
static struct ggml_backend_buffer_type ggml_backend_buffer_type_amx = {
|
||||||
/* .iface = */ {
|
/* .iface = */ {
|
||||||
/* .get_name = */ ggml_backend_amx_buffer_type_get_name,
|
/* .get_name = */ ggml_backend_amx_buffer_type_get_name,
|
||||||
/* .alloc_buffer = */ ggml_backend_amx_buffer_type_alloc_buffer,
|
/* .alloc_buffer = */ ggml_backend_amx_buffer_type_alloc_buffer,
|
||||||
/* .get_alignment = */ ggml_backend_amx_buffer_type_get_alignment,
|
/* .get_alignment = */ ggml_backend_amx_buffer_type_get_alignment,
|
||||||
/* .get_max_size = */ NULL, // defaults to SIZE_MAX
|
/* .get_max_size = */ nullptr, // defaults to SIZE_MAX
|
||||||
/* .get_alloc_size = */ ggml_backend_amx_buffer_type_get_alloc_size,
|
/* .get_alloc_size = */ ggml_backend_amx_buffer_type_get_alloc_size,
|
||||||
/* .is_host = */ ggml_backend_amx_buffer_type_is_host,
|
/* .is_host = */ nullptr,
|
||||||
},
|
},
|
||||||
/* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
|
/* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
|
||||||
/* .context = */ NULL,
|
/* .context = */ new ggml::cpu::amx::extra_buffer_type(),
|
||||||
};
|
};
|
||||||
|
|
||||||
if (!ggml_amx_init()) {
|
if (!ggml_amx_init()) {
|
||||||
return NULL;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
return &ggml_backend_buffer_type_amx;
|
return &ggml_backend_buffer_type_amx;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ggml_backend_amx_buft_is_amx(ggml_backend_buffer_type_t buft) {
|
|
||||||
return buft->iface.get_name == ggml_backend_amx_buffer_type_get_name;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ggml_backend_amx_device_supports_op(const struct ggml_tensor * op) {
|
|
||||||
// handle only 2d gemm for now
|
|
||||||
auto is_contiguous_2d = [](const struct ggml_tensor * t) {
|
|
||||||
return ggml_is_contiguous(t) && t->ne[3] == 1 && t->ne[2] == 1;
|
|
||||||
};
|
|
||||||
|
|
||||||
switch (op->op) {
|
|
||||||
case GGML_OP_NONE:
|
|
||||||
case GGML_OP_RESHAPE:
|
|
||||||
case GGML_OP_VIEW:
|
|
||||||
case GGML_OP_PERMUTE:
|
|
||||||
case GGML_OP_TRANSPOSE:
|
|
||||||
return true;
|
|
||||||
|
|
||||||
case GGML_OP_MUL_MAT: {
|
|
||||||
const struct ggml_tensor * src0 = op->src[0];
|
|
||||||
const struct ggml_tensor * src1 = op->src[1];
|
|
||||||
|
|
||||||
const enum ggml_type type = src0->type;
|
|
||||||
const int64_t ne0 = op->ne[0];
|
|
||||||
|
|
||||||
// amx kernels enables for Q4_0, Q4_1, Q8_0, F16
|
|
||||||
// Q4_K, Q5_K, Q6_K, IQ4_XS enabled for QK_K = 256
|
|
||||||
bool has_amx_kernels = qtype_has_amx_kernels(type) || (type == GGML_TYPE_F16);
|
|
||||||
|
|
||||||
bool can_use_amx =
|
|
||||||
is_contiguous_2d(src0) && // src0 must be contiguous
|
|
||||||
is_contiguous_2d(src1) && // src1 must be contiguous
|
|
||||||
src1->type == GGML_TYPE_F32 && // src1 must be float32
|
|
||||||
has_amx_kernels && // with amx kernel impls
|
|
||||||
ne0 % (TILE_N * 2) == 0; // out_features is 32x
|
|
||||||
|
|
||||||
return can_use_amx;
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif // defined(__AMX_INT8__) && defined(__AVX512VNNI__)
|
#endif // defined(__AMX_INT8__) && defined(__AVX512VNNI__)
|
||||||
|
@ -1,20 +1,8 @@
|
|||||||
#include "ggml-backend.h"
|
#include "ggml-backend.h"
|
||||||
#include "ggml-cpu-impl.h"
|
#include "ggml-cpu-impl.h"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
// GGML internal header
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
|
#if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
|
||||||
|
|
||||||
ggml_backend_buffer_type_t ggml_backend_amx_buffer_type(void);
|
ggml_backend_buffer_type_t ggml_backend_amx_buffer_type(void);
|
||||||
bool ggml_backend_amx_buft_is_amx(ggml_backend_buffer_type_t buft);
|
|
||||||
bool ggml_backend_amx_device_supports_op(const struct ggml_tensor * op);
|
|
||||||
void ggml_backend_amx_mul_mat(const struct ggml_compute_params * params, struct ggml_tensor * dst);
|
|
||||||
size_t ggml_backend_amx_desired_wsize(const struct ggml_tensor * dst);
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
#include <memory>
|
#include <memory>
|
||||||
#include <type_traits>
|
#include <type_traits>
|
||||||
|
|
||||||
#if defined(_OPENMP)
|
#if defined(GGML_USE_OPENMP)
|
||||||
#include <omp.h>
|
#include <omp.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -56,11 +56,11 @@ inline void balance211(T n, T nth, T ith, T& n_start, T& n_end) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename func_t>
|
template <typename func_t>
|
||||||
inline void parallel_for(int nth, int n, const func_t& f) {
|
inline void parallel_for(int n, const func_t& f) {
|
||||||
#if defined(_OPENMP)
|
#if defined(GGML_USE_OPENMP)
|
||||||
#pragma omp parallel num_threads(nth)
|
#pragma omp parallel
|
||||||
{
|
{
|
||||||
//int nth = omp_get_num_threads();
|
int nth = omp_get_num_threads();
|
||||||
int ith = omp_get_thread_num();
|
int ith = omp_get_thread_num();
|
||||||
int tbegin, tend;
|
int tbegin, tend;
|
||||||
balance211(n, nth, ith, tbegin, tend);
|
balance211(n, nth, ith, tbegin, tend);
|
||||||
@ -68,8 +68,6 @@ inline void parallel_for(int nth, int n, const func_t& f) {
|
|||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
f(0, n);
|
f(0, n);
|
||||||
|
|
||||||
GGML_UNUSED(nth);
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -91,10 +89,3 @@ inline bool qtype_has_amx_kernels(const enum ggml_type type) {
|
|||||||
(type == GGML_TYPE_Q6_K) ||
|
(type == GGML_TYPE_Q6_K) ||
|
||||||
(type == GGML_TYPE_IQ4_XS);
|
(type == GGML_TYPE_IQ4_XS);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ggml backend context
|
|
||||||
struct ggml_backend_amx_context {
|
|
||||||
int n_threads = GGML_DEFAULT_N_THREADS;
|
|
||||||
std::unique_ptr<char[]> work_data;
|
|
||||||
size_t work_size = 0;
|
|
||||||
};
|
|
||||||
|
@ -18,10 +18,6 @@
|
|||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(_OPENMP)
|
|
||||||
#include <omp.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if (defined(_WIN32) || defined(_WIN64))
|
#if (defined(_WIN32) || defined(_WIN64))
|
||||||
#define RESTRICT __restrict
|
#define RESTRICT __restrict
|
||||||
#else
|
#else
|
||||||
@ -1382,13 +1378,13 @@ struct tinygemm_kernel_avx<float, ggml_fp16_t, float, BLOCK_M, BLOCK_N, BLOCK_K>
|
|||||||
#define PACKED_INDEX(n, k, KB, tile_size) (n * KB + k) * tile_size
|
#define PACKED_INDEX(n, k, KB, tile_size) (n * KB + k) * tile_size
|
||||||
|
|
||||||
template<typename TB, int BLOCK_K>
|
template<typename TB, int BLOCK_K>
|
||||||
void convert_B_packed_format(void * RESTRICT packed_B, const TB * RESTRICT B, int N, int K, int n_threads) {
|
void convert_B_packed_format(void * RESTRICT packed_B, const TB * RESTRICT B, int N, int K) {
|
||||||
const int NB = N / TILE_N;
|
const int NB = N / TILE_N;
|
||||||
const int KB = K / BLOCK_K;
|
const int KB = K / BLOCK_K;
|
||||||
const int TILE_SIZE = get_tile_size<TB>();
|
const int TILE_SIZE = get_tile_size<TB>();
|
||||||
|
|
||||||
// parallel on NB should be enough
|
// parallel on NB should be enough
|
||||||
parallel_for(n_threads, NB, [&](int begin, int end) {
|
parallel_for(NB, [&](int begin, int end) {
|
||||||
for (int n = begin; n < end; ++n) {
|
for (int n = begin; n < end; ++n) {
|
||||||
for (int k = 0; k < KB; ++k) {
|
for (int k = 0; k < KB; ++k) {
|
||||||
int n0 = n * TILE_N;
|
int n0 = n * TILE_N;
|
||||||
@ -2334,15 +2330,8 @@ void ggml_backend_amx_convert_weight(struct ggml_tensor * tensor, const void * d
|
|||||||
const int K = tensor->ne[0]; // ne0: in_features
|
const int K = tensor->ne[0]; // ne0: in_features
|
||||||
const int N = tensor->ne[1]; // ne1: out_features
|
const int N = tensor->ne[1]; // ne1: out_features
|
||||||
|
|
||||||
#if defined(_OPENMP)
|
|
||||||
// the buffer ctx is not initialized when .set_tensor is called
|
|
||||||
int n_threads = omp_get_num_threads();
|
|
||||||
#else
|
|
||||||
int n_threads = 1;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
GGML_DISPATCH_QTYPES(TYPE, [&] {
|
GGML_DISPATCH_QTYPES(TYPE, [&] {
|
||||||
convert_B_packed_format<type, blck_size>((void *)((char *)tensor->data + offset), (const type *)data, N, K, n_threads);
|
convert_B_packed_format<type, blck_size>((void *)((char *)tensor->data + offset), (const type *)data, N, K);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,16 +1,10 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
#include "common.h"
|
#include "common.h"
|
||||||
|
|
||||||
#ifdef __cplusplus
|
size_t ggml_backend_amx_desired_wsize(const struct ggml_tensor * dst);
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
size_t ggml_backend_amx_get_alloc_size(const struct ggml_tensor * tensor);
|
size_t ggml_backend_amx_get_alloc_size(const struct ggml_tensor * tensor);
|
||||||
|
|
||||||
void ggml_backend_amx_convert_weight(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
void ggml_backend_amx_convert_weight(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
|
||||||
|
|
||||||
void ggml_backend_amx_mul_mat(const struct ggml_compute_params * params, struct ggml_tensor * dst);
|
void ggml_backend_amx_mul_mat(const struct ggml_compute_params * params, struct ggml_tensor * dst);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
@ -1,20 +1,57 @@
|
|||||||
#define GGML_COMMON_IMPL_C
|
#define GGML_COMMON_IMPL_CPP
|
||||||
|
#define GGML_COMMON_DECL_CPP
|
||||||
#include "ggml-common.h"
|
#include "ggml-common.h"
|
||||||
|
#include "ggml-backend-impl.h"
|
||||||
|
|
||||||
#include "ggml-quants.h"
|
#include "ggml-quants.h"
|
||||||
#include "ggml-impl.h"
|
#include "ggml-impl.h"
|
||||||
#include "ggml-cpu.h"
|
#include "ggml-cpu.h"
|
||||||
#include "ggml-cpu/ggml-cpu-impl.h"
|
#include "ggml-cpu-impl.h"
|
||||||
|
#include "ggml-cpu-traits.h"
|
||||||
|
|
||||||
#include <math.h>
|
#include <cmath>
|
||||||
#include <string.h>
|
#include <cstring>
|
||||||
#include <assert.h>
|
#include <cassert>
|
||||||
#include <float.h>
|
#include <cfloat>
|
||||||
#include <stdlib.h> // for qsort
|
#include <cstdlib> // for qsort
|
||||||
#include <stdio.h> // for GGML_ASSERT
|
#include <cstdio> // for GGML_ASSERT
|
||||||
|
|
||||||
#include "ggml-cpu-aarch64.h"
|
#include "ggml-cpu-aarch64.h"
|
||||||
|
|
||||||
|
// TODO: move to include file?
|
||||||
|
template <int K> constexpr int QK_0() {
|
||||||
|
if constexpr (K == 4) {
|
||||||
|
return QK4_0;
|
||||||
|
}
|
||||||
|
if constexpr (K == 8) {
|
||||||
|
return QK8_0;
|
||||||
|
}
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <int K, int N> struct block {
|
||||||
|
ggml_half d[N]; // deltas for N qK_0 blocks
|
||||||
|
int8_t qs[(QK_0<K>() * N * K) / 8]; // quants for N qK_0 blocks
|
||||||
|
};
|
||||||
|
|
||||||
|
// control size
|
||||||
|
static_assert(sizeof(block<4, 4>) == 4 * sizeof(ggml_half) + QK8_0 * 2, "wrong block<4,4> size/padding");
|
||||||
|
static_assert(sizeof(block<4, 8>) == 8 * sizeof(ggml_half) + QK8_0 * 4, "wrong block<4,8> size/padding");
|
||||||
|
static_assert(sizeof(block<8, 4>) == 4 * sizeof(ggml_half) + QK8_0 * 4, "wrong block<8,4> size/padding");
|
||||||
|
static_assert(sizeof(block<8, 8>) == 8 * sizeof(ggml_half) + QK8_0 * 8, "wrong block<8,8> size/padding");
|
||||||
|
|
||||||
|
using block_q4_0x4 = block<4, 4>;
|
||||||
|
using block_q4_0x8 = block<4, 8>;
|
||||||
|
using block_q8_0x4 = block<8, 4>;
|
||||||
|
using block_q8_0x8 = block<8, 8>;
|
||||||
|
|
||||||
|
struct block_iq4_nlx4 {
|
||||||
|
ggml_half d[4]; // deltas for 4 iq4_nl blocks
|
||||||
|
uint8_t qs[QK4_NL * 2]; // nibbles / quants for 4 iq4_nl blocks
|
||||||
|
};
|
||||||
|
|
||||||
|
static_assert(sizeof(block_iq4_nlx4) == 4 * sizeof(ggml_half) + QK4_NL * 2, "wrong iq4_nlx4 block size/padding");
|
||||||
|
|
||||||
#if defined(__GNUC__)
|
#if defined(__GNUC__)
|
||||||
#pragma GCC diagnostic ignored "-Woverlength-strings"
|
#pragma GCC diagnostic ignored "-Woverlength-strings"
|
||||||
#elif defined(_MSC_VER)
|
#elif defined(_MSC_VER)
|
||||||
@ -185,12 +222,12 @@ static inline __m256i mul_sum_i8_pairs_int32x8(const __m256i x, const __m256i y)
|
|||||||
|
|
||||||
static const int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113};
|
static const int8_t kvalues_iq4nl[16] = {-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113};
|
||||||
|
|
||||||
static void quantize_q8_0_4x4(const float * restrict x, void * restrict vy, int64_t k) {
|
static void quantize_q8_0_4x4(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) {
|
||||||
assert(QK8_0 == 32);
|
assert(QK8_0 == 32);
|
||||||
assert(k % QK8_0 == 0);
|
assert(k % QK8_0 == 0);
|
||||||
const int nb = k / QK8_0;
|
const int nb = k / QK8_0;
|
||||||
|
|
||||||
block_q8_0x4 * restrict y = (block_q8_0x4 *) vy;
|
block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy;
|
||||||
|
|
||||||
#if defined(__ARM_NEON)
|
#if defined(__ARM_NEON)
|
||||||
float32x4_t srcv[4][8];
|
float32x4_t srcv[4][8];
|
||||||
@ -279,12 +316,12 @@ static void quantize_q8_0_4x4(const float * restrict x, void * restrict vy, int6
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static void quantize_q8_0_4x8(const float * restrict x, void * restrict vy, int64_t k) {
|
static void quantize_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) {
|
||||||
assert(QK8_0 == 32);
|
assert(QK8_0 == 32);
|
||||||
assert(k % QK8_0 == 0);
|
assert(k % QK8_0 == 0);
|
||||||
const int nb = k / QK8_0;
|
const int nb = k / QK8_0;
|
||||||
|
|
||||||
block_q8_0x4 * restrict y = (block_q8_0x4 *) vy;
|
block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy;
|
||||||
|
|
||||||
#if defined(__ARM_NEON)
|
#if defined(__ARM_NEON)
|
||||||
float32x4_t srcv[4][8];
|
float32x4_t srcv[4][8];
|
||||||
@ -494,7 +531,7 @@ static void quantize_q8_0_4x8(const float * restrict x, void * restrict vy, int6
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void quantize_mat_q8_0(const float * restrict x, void * restrict vy, int64_t nrow, int64_t n_per_row, int64_t blck_size_interleave) {
|
static void quantize_mat_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t nrow, int64_t n_per_row, int64_t blck_size_interleave) {
|
||||||
assert(nrow == 4);
|
assert(nrow == 4);
|
||||||
UNUSED(nrow);
|
UNUSED(nrow);
|
||||||
if (blck_size_interleave == 4) {
|
if (blck_size_interleave == 4) {
|
||||||
@ -506,7 +543,7 @@ void quantize_mat_q8_0(const float * restrict x, void * restrict vy, int64_t nro
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_gemv_q4_0_4x4_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) {
|
static void ggml_gemv_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
|
||||||
const int qk = QK8_0;
|
const int qk = QK8_0;
|
||||||
const int nb = n / qk;
|
const int nb = n / qk;
|
||||||
const int ncols_interleaved = 4;
|
const int ncols_interleaved = 4;
|
||||||
@ -591,7 +628,7 @@ void ggml_gemv_q4_0_4x4_q8_0(int n, float * restrict s, size_t bs, const void *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_gemv_q4_0_4x8_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) {
|
static void ggml_gemv_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
|
||||||
const int qk = QK8_0;
|
const int qk = QK8_0;
|
||||||
const int nb = n / qk;
|
const int nb = n / qk;
|
||||||
const int ncols_interleaved = 4;
|
const int ncols_interleaved = 4;
|
||||||
@ -701,7 +738,7 @@ void ggml_gemv_q4_0_4x8_q8_0(int n, float * restrict s, size_t bs, const void *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_gemv_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) {
|
static void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
|
||||||
const int qk = QK8_0;
|
const int qk = QK8_0;
|
||||||
const int nb = n / qk;
|
const int nb = n / qk;
|
||||||
const int ncols_interleaved = 8;
|
const int ncols_interleaved = 8;
|
||||||
@ -974,7 +1011,7 @@ void ggml_gemv_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) {
|
static void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
|
||||||
const int qk = QK8_0;
|
const int qk = QK8_0;
|
||||||
const int nb = n / qk;
|
const int nb = n / qk;
|
||||||
const int ncols_interleaved = 4;
|
const int ncols_interleaved = 4;
|
||||||
@ -1070,7 +1107,7 @@ void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * restrict s, size_t bs, const void
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_gemm_q4_0_4x4_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) {
|
static void ggml_gemm_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
|
||||||
const int qk = QK8_0;
|
const int qk = QK8_0;
|
||||||
const int nb = n / qk;
|
const int nb = n / qk;
|
||||||
const int ncols_interleaved = 4;
|
const int ncols_interleaved = 4;
|
||||||
@ -1586,7 +1623,7 @@ void ggml_gemm_q4_0_4x4_q8_0(int n, float * restrict s, size_t bs, const void *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_gemm_q4_0_4x8_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) {
|
static void ggml_gemm_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
|
||||||
const int qk = QK8_0;
|
const int qk = QK8_0;
|
||||||
const int nb = n / qk;
|
const int nb = n / qk;
|
||||||
const int ncols_interleaved = 4;
|
const int ncols_interleaved = 4;
|
||||||
@ -2040,7 +2077,7 @@ void ggml_gemm_q4_0_4x8_q8_0(int n, float * restrict s, size_t bs, const void *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_gemm_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) {
|
static void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
|
||||||
const int qk = QK8_0;
|
const int qk = QK8_0;
|
||||||
const int nb = n / qk;
|
const int nb = n / qk;
|
||||||
const int ncols_interleaved = 8;
|
const int ncols_interleaved = 8;
|
||||||
@ -2560,31 +2597,31 @@ void ggml_gemm_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void *
|
|||||||
const __m512i rhs_mat_2367ABEF_3 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m4bexpanded)); //B2(24-31) B3(24-31) B6(24-31) B7(24-31) BA(24-31) BB(24-31) BE(24-31) BF(24-31)
|
const __m512i rhs_mat_2367ABEF_3 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m4bexpanded)); //B2(24-31) B3(24-31) B6(24-31) B7(24-31) BA(24-31) BB(24-31) BE(24-31) BF(24-31)
|
||||||
|
|
||||||
// Shuffle pattern one - right side input
|
// Shuffle pattern one - right side input
|
||||||
const __m512i rhs_mat_014589CD_0_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, 136); //B0(0-3) B1(0-3) B0(0-3) B1(0-3) B4(0-3) B5(0-3) B4(0-3) B5(0-3) B8(0-3) B9(0-3) B8(0-3) B9(0-3) BC(0-3) BD(0-3) BC(0-3) BD(0-3)
|
const __m512i rhs_mat_014589CD_0_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, (_MM_PERM_ENUM)136); //B0(0-3) B1(0-3) B0(0-3) B1(0-3) B4(0-3) B5(0-3) B4(0-3) B5(0-3) B8(0-3) B9(0-3) B8(0-3) B9(0-3) BC(0-3) BD(0-3) BC(0-3) BD(0-3)
|
||||||
const __m512i rhs_mat_2367ABEF_0_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, 136); //B2(0-3) B3(0-3) B2(0-3) B3(0-3) B6(0-3) B7(0-3) B6(0-3) B7(0-3) BA(0-3) BB(0-3) BA(0-3) BB(0-3) BE(0-3) BF(0-3) BE(0-3) BF(0-3)
|
const __m512i rhs_mat_2367ABEF_0_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, (_MM_PERM_ENUM)136); //B2(0-3) B3(0-3) B2(0-3) B3(0-3) B6(0-3) B7(0-3) B6(0-3) B7(0-3) BA(0-3) BB(0-3) BA(0-3) BB(0-3) BE(0-3) BF(0-3) BE(0-3) BF(0-3)
|
||||||
|
|
||||||
const __m512i rhs_mat_014589CD_1_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, 136); //B0(8-11) B1(8-11) B0(8-11) B1(8-11) B4(8-11) B5(8-11) B4(8-11) B5(8-11) B8(8-11) B9(8-11) B8(8-11) B9(8-11) BC(8-11) BD(8-11) BC(8-11) BD(8-11)
|
const __m512i rhs_mat_014589CD_1_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, (_MM_PERM_ENUM)136); //B0(8-11) B1(8-11) B0(8-11) B1(8-11) B4(8-11) B5(8-11) B4(8-11) B5(8-11) B8(8-11) B9(8-11) B8(8-11) B9(8-11) BC(8-11) BD(8-11) BC(8-11) BD(8-11)
|
||||||
const __m512i rhs_mat_2367ABEF_1_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, 136); //B2(8-11) B3(8-11) B2(8-11) B3(8-11) B6(8-11) B7(8-11) B6(8-11) B7(8-11) BA(8-11) BB(8-11) BA(8-11) BB(8-11) BE(8-11) BF(8-11) BE(8-11) BF(8-11)
|
const __m512i rhs_mat_2367ABEF_1_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, (_MM_PERM_ENUM)136); //B2(8-11) B3(8-11) B2(8-11) B3(8-11) B6(8-11) B7(8-11) B6(8-11) B7(8-11) BA(8-11) BB(8-11) BA(8-11) BB(8-11) BE(8-11) BF(8-11) BE(8-11) BF(8-11)
|
||||||
|
|
||||||
const __m512i rhs_mat_014589CD_2_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, 136); //B0(16-19) B1(16-19) B0(16-19) B1(16-19) B4(16-19) B5(16-19) B4(16-19) B5(16-19) B8(16-19) B9(16-19) B8(16-19) B9(16-19) BC(16-19) BD(16-19) BC(16-19) BD(16-19)
|
const __m512i rhs_mat_014589CD_2_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, (_MM_PERM_ENUM)136); //B0(16-19) B1(16-19) B0(16-19) B1(16-19) B4(16-19) B5(16-19) B4(16-19) B5(16-19) B8(16-19) B9(16-19) B8(16-19) B9(16-19) BC(16-19) BD(16-19) BC(16-19) BD(16-19)
|
||||||
const __m512i rhs_mat_2367ABEF_2_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, 136); //B2(16-19) B3(16-19) B2(16-19) B3(16-19) B6(16-19) B7(16-19) B6(16-19) B7(16-19) BA(16-19) BB(16-19) BA(16-19) BB(16-19) BE(16-19) BF(16-19) BE(16-19) BF(16-19)
|
const __m512i rhs_mat_2367ABEF_2_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, (_MM_PERM_ENUM)136); //B2(16-19) B3(16-19) B2(16-19) B3(16-19) B6(16-19) B7(16-19) B6(16-19) B7(16-19) BA(16-19) BB(16-19) BA(16-19) BB(16-19) BE(16-19) BF(16-19) BE(16-19) BF(16-19)
|
||||||
|
|
||||||
const __m512i rhs_mat_014589CD_3_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, 136); //B0(24-27) B1(24-27) B0(24-27) B1(24-27) B4(24-27) B5(24-27) B4(24-27) B5(24-27) B8(24-27) B9(24-27) B8(24-27) B9(24-27) BC(24-27) BD(24-27) BC(24-27) BD(24-27)
|
const __m512i rhs_mat_014589CD_3_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, (_MM_PERM_ENUM)136); //B0(24-27) B1(24-27) B0(24-27) B1(24-27) B4(24-27) B5(24-27) B4(24-27) B5(24-27) B8(24-27) B9(24-27) B8(24-27) B9(24-27) BC(24-27) BD(24-27) BC(24-27) BD(24-27)
|
||||||
const __m512i rhs_mat_2367ABEF_3_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, 136); //B2(24-27) B3(24-27) B2(24-27) B3(24-27) B6(24-27) B7(24-27) B6(24-27) B7(24-27) BA(24-27) BB(24-27) BA(24-27) BB(24-27) BE(24-27) BF(24-27) BE(24-27) BF(24-27)
|
const __m512i rhs_mat_2367ABEF_3_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, (_MM_PERM_ENUM)136); //B2(24-27) B3(24-27) B2(24-27) B3(24-27) B6(24-27) B7(24-27) B6(24-27) B7(24-27) BA(24-27) BB(24-27) BA(24-27) BB(24-27) BE(24-27) BF(24-27) BE(24-27) BF(24-27)
|
||||||
|
|
||||||
// Shuffle pattern two - right side input
|
// Shuffle pattern two - right side input
|
||||||
|
|
||||||
const __m512i rhs_mat_014589CD_0_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, 221); //B0(4-7) B1(4-7) B0(4-7) B1(4-7) B4(4-7) B5(4-7) B4(4-7) B5(4-7) B8(4-7) B9(4-7) B8(4-7) B9(4-7) BC(4-7) BD(4-7) BC(4-7) BD(4-7)
|
const __m512i rhs_mat_014589CD_0_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, (_MM_PERM_ENUM)221); //B0(4-7) B1(4-7) B0(4-7) B1(4-7) B4(4-7) B5(4-7) B4(4-7) B5(4-7) B8(4-7) B9(4-7) B8(4-7) B9(4-7) BC(4-7) BD(4-7) BC(4-7) BD(4-7)
|
||||||
const __m512i rhs_mat_2367ABEF_0_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, 221); //B2(4-7) B3(4-7) B2(4-7) B3(4-7) B6(4-7) B7(4-7) B6(4-7) B7(4-7) BA(4-7) BB(4-7) BA(4-7) BB(4-7) BE(4-7) BF(4-7) BE(4-7) BF(4-7)
|
const __m512i rhs_mat_2367ABEF_0_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, (_MM_PERM_ENUM)221); //B2(4-7) B3(4-7) B2(4-7) B3(4-7) B6(4-7) B7(4-7) B6(4-7) B7(4-7) BA(4-7) BB(4-7) BA(4-7) BB(4-7) BE(4-7) BF(4-7) BE(4-7) BF(4-7)
|
||||||
|
|
||||||
const __m512i rhs_mat_014589CD_1_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, 221); //B0(12-15) B1(12-15) B0(12-15) B1(12-15) B4(12-15) B5(12-15) B4(12-15) B5(12-15) B8(12-15) B9(12-15) B8(12-15) B9(12-15) BC(12-15) BD(12-15) BC(12-15) BD(12-15)
|
const __m512i rhs_mat_014589CD_1_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, (_MM_PERM_ENUM)221); //B0(12-15) B1(12-15) B0(12-15) B1(12-15) B4(12-15) B5(12-15) B4(12-15) B5(12-15) B8(12-15) B9(12-15) B8(12-15) B9(12-15) BC(12-15) BD(12-15) BC(12-15) BD(12-15)
|
||||||
const __m512i rhs_mat_2367ABEF_1_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, 221); //B2(12-15) B3(12-15) B2(12-15) B3(12-15) B6(12-15) B7(12-15) B6(12-15) B7(12-15) BA(12-15) BB(12-15) BA(12-15) BB(12-15) BE(12-15) BF(12-15) BE(12-15) BF(12-15)
|
const __m512i rhs_mat_2367ABEF_1_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, (_MM_PERM_ENUM)221); //B2(12-15) B3(12-15) B2(12-15) B3(12-15) B6(12-15) B7(12-15) B6(12-15) B7(12-15) BA(12-15) BB(12-15) BA(12-15) BB(12-15) BE(12-15) BF(12-15) BE(12-15) BF(12-15)
|
||||||
|
|
||||||
const __m512i rhs_mat_014589CD_2_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, 221); //B0(20-23) B1(20-23) B0(20-23) B1(20-23) B4(20-23) B5(20-23) B4(20-23) B5(20-23) B8(20-23) B9(20-23) B8(20-23) B9(20-23) BC(20-23) BD(20-23) BC(20-23) BD(20-23)
|
const __m512i rhs_mat_014589CD_2_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, (_MM_PERM_ENUM)221); //B0(20-23) B1(20-23) B0(20-23) B1(20-23) B4(20-23) B5(20-23) B4(20-23) B5(20-23) B8(20-23) B9(20-23) B8(20-23) B9(20-23) BC(20-23) BD(20-23) BC(20-23) BD(20-23)
|
||||||
const __m512i rhs_mat_2367ABEF_2_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, 221); //B2(20-23) B3(20-23) B2(20-23) B3(20-23) B6(20-23) B7(20-23) B6(20-23) B7(20-23) BA(20-23) BB(20-23) BA(20-23) BB(20-23) BE(20-23) BF(20-23) BE(20-23) BF(20-23)
|
const __m512i rhs_mat_2367ABEF_2_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, (_MM_PERM_ENUM)221); //B2(20-23) B3(20-23) B2(20-23) B3(20-23) B6(20-23) B7(20-23) B6(20-23) B7(20-23) BA(20-23) BB(20-23) BA(20-23) BB(20-23) BE(20-23) BF(20-23) BE(20-23) BF(20-23)
|
||||||
|
|
||||||
const __m512i rhs_mat_014589CD_3_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, 221); //B0(28-31) B1(28-31) B0(28-31) B1(28-31) B4(28-31) B5(28-31) B4(28-31) B5(28-31) B8(28-31) B9(28-31) B8(28-31) B9(28-31) BC(28-31) BD(28-31) BC(28-31) BD(28-31)
|
const __m512i rhs_mat_014589CD_3_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, (_MM_PERM_ENUM)221); //B0(28-31) B1(28-31) B0(28-31) B1(28-31) B4(28-31) B5(28-31) B4(28-31) B5(28-31) B8(28-31) B9(28-31) B8(28-31) B9(28-31) BC(28-31) BD(28-31) BC(28-31) BD(28-31)
|
||||||
const __m512i rhs_mat_2367ABEF_3_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, 221); //B2(28-31) B3(28-31) B2(28-31) B3(28-31) B6(28-31) B7(28-31) B6(28-31) B7(28-31) BA(28-31) BB(28-31) BA(28-31) BB(28-31) BE(28-31) BF(28-31) BE(28-31) BF(28-31)
|
const __m512i rhs_mat_2367ABEF_3_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, (_MM_PERM_ENUM)221); //B2(28-31) B3(28-31) B2(28-31) B3(28-31) B6(28-31) B7(28-31) B6(28-31) B7(28-31) BA(28-31) BB(28-31) BA(28-31) BB(28-31) BE(28-31) BF(28-31) BE(28-31) BF(28-31)
|
||||||
|
|
||||||
// Scale values - Load the weight scale values of two block_q4_0x8
|
// Scale values - Load the weight scale values of two block_q4_0x8
|
||||||
const __m512 col_scale_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].d, b_ptr_1[b].d);
|
const __m512 col_scale_f32 = GGML_F32Cx8x2_LOAD(b_ptr_0[b].d, b_ptr_1[b].d);
|
||||||
@ -2618,31 +2655,31 @@ void ggml_gemm_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void *
|
|||||||
|
|
||||||
// Shuffle pattern one - left side input
|
// Shuffle pattern one - left side input
|
||||||
|
|
||||||
const __m512i lhs_mat_01_0_sp1 = _mm512_shuffle_epi32(lhs_mat_01_0, 160); //A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3)
|
const __m512i lhs_mat_01_0_sp1 = _mm512_shuffle_epi32(lhs_mat_01_0, (_MM_PERM_ENUM)160); //A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3)
|
||||||
const __m512i lhs_mat_23_0_sp1 = _mm512_shuffle_epi32(lhs_mat_23_0, 160); //A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3)
|
const __m512i lhs_mat_23_0_sp1 = _mm512_shuffle_epi32(lhs_mat_23_0, (_MM_PERM_ENUM)160); //A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3)
|
||||||
|
|
||||||
const __m512i lhs_mat_01_1_sp1 = _mm512_shuffle_epi32(lhs_mat_01_1, 160); //A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11)
|
const __m512i lhs_mat_01_1_sp1 = _mm512_shuffle_epi32(lhs_mat_01_1, (_MM_PERM_ENUM)160); //A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11)
|
||||||
const __m512i lhs_mat_23_1_sp1 = _mm512_shuffle_epi32(lhs_mat_23_1, 160); //A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11)
|
const __m512i lhs_mat_23_1_sp1 = _mm512_shuffle_epi32(lhs_mat_23_1, (_MM_PERM_ENUM)160); //A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11)
|
||||||
|
|
||||||
const __m512i lhs_mat_01_2_sp1 = _mm512_shuffle_epi32(lhs_mat_01_2, 160); //A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19)
|
const __m512i lhs_mat_01_2_sp1 = _mm512_shuffle_epi32(lhs_mat_01_2, (_MM_PERM_ENUM)160); //A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19)
|
||||||
const __m512i lhs_mat_23_2_sp1 = _mm512_shuffle_epi32(lhs_mat_23_2, 160); //A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19)
|
const __m512i lhs_mat_23_2_sp1 = _mm512_shuffle_epi32(lhs_mat_23_2, (_MM_PERM_ENUM)160); //A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19)
|
||||||
|
|
||||||
const __m512i lhs_mat_01_3_sp1 = _mm512_shuffle_epi32(lhs_mat_01_3, 160); //A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27)
|
const __m512i lhs_mat_01_3_sp1 = _mm512_shuffle_epi32(lhs_mat_01_3, (_MM_PERM_ENUM)160); //A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27)
|
||||||
const __m512i lhs_mat_23_3_sp1 = _mm512_shuffle_epi32(lhs_mat_23_3, 160); //A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27)
|
const __m512i lhs_mat_23_3_sp1 = _mm512_shuffle_epi32(lhs_mat_23_3, (_MM_PERM_ENUM)160); //A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27)
|
||||||
|
|
||||||
// Shuffle pattern two - left side input
|
// Shuffle pattern two - left side input
|
||||||
|
|
||||||
const __m512i lhs_mat_01_0_sp2 = _mm512_shuffle_epi32(lhs_mat_01_0, 245); //A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7)
|
const __m512i lhs_mat_01_0_sp2 = _mm512_shuffle_epi32(lhs_mat_01_0, (_MM_PERM_ENUM)245); //A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7)
|
||||||
const __m512i lhs_mat_23_0_sp2 = _mm512_shuffle_epi32(lhs_mat_23_0, 245); //A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7)
|
const __m512i lhs_mat_23_0_sp2 = _mm512_shuffle_epi32(lhs_mat_23_0, (_MM_PERM_ENUM)245); //A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7)
|
||||||
|
|
||||||
const __m512i lhs_mat_01_1_sp2 = _mm512_shuffle_epi32(lhs_mat_01_1, 245); //A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15)
|
const __m512i lhs_mat_01_1_sp2 = _mm512_shuffle_epi32(lhs_mat_01_1, (_MM_PERM_ENUM)245); //A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15)
|
||||||
const __m512i lhs_mat_23_1_sp2 = _mm512_shuffle_epi32(lhs_mat_23_1, 245); //A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15)
|
const __m512i lhs_mat_23_1_sp2 = _mm512_shuffle_epi32(lhs_mat_23_1, (_MM_PERM_ENUM)245); //A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15)
|
||||||
|
|
||||||
const __m512i lhs_mat_01_2_sp2 = _mm512_shuffle_epi32(lhs_mat_01_2, 245); //A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23)
|
const __m512i lhs_mat_01_2_sp2 = _mm512_shuffle_epi32(lhs_mat_01_2, (_MM_PERM_ENUM)245); //A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23)
|
||||||
const __m512i lhs_mat_23_2_sp2 = _mm512_shuffle_epi32(lhs_mat_23_2, 245); //A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23)
|
const __m512i lhs_mat_23_2_sp2 = _mm512_shuffle_epi32(lhs_mat_23_2, (_MM_PERM_ENUM)245); //A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23)
|
||||||
|
|
||||||
const __m512i lhs_mat_01_3_sp2 = _mm512_shuffle_epi32(lhs_mat_01_3, 245); //A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31)
|
const __m512i lhs_mat_01_3_sp2 = _mm512_shuffle_epi32(lhs_mat_01_3, (_MM_PERM_ENUM)245); //A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31)
|
||||||
const __m512i lhs_mat_23_3_sp2 = _mm512_shuffle_epi32(lhs_mat_23_3, 245); //A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31)
|
const __m512i lhs_mat_23_3_sp2 = _mm512_shuffle_epi32(lhs_mat_23_3, (_MM_PERM_ENUM)245); //A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31)
|
||||||
|
|
||||||
// The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane
|
// The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane
|
||||||
// Resembles MMLAs into 2x2 matrices in ARM Version
|
// Resembles MMLAs into 2x2 matrices in ARM Version
|
||||||
@ -2671,10 +2708,10 @@ void ggml_gemm_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void *
|
|||||||
|
|
||||||
|
|
||||||
// Straighten out to make 4 row vectors
|
// Straighten out to make 4 row vectors
|
||||||
__m512i iacc_row_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00, _mm512_shuffle_epi32(iacc_mat_01, 78));
|
__m512i iacc_row_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00, _mm512_shuffle_epi32(iacc_mat_01, (_MM_PERM_ENUM)78));
|
||||||
__m512i iacc_row_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00, 78), iacc_mat_01);
|
__m512i iacc_row_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00, (_MM_PERM_ENUM)78), iacc_mat_01);
|
||||||
__m512i iacc_row_2 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10, _mm512_shuffle_epi32(iacc_mat_11, 78));
|
__m512i iacc_row_2 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10, _mm512_shuffle_epi32(iacc_mat_11, (_MM_PERM_ENUM)78));
|
||||||
__m512i iacc_row_3 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10, 78), iacc_mat_11);
|
__m512i iacc_row_3 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10, (_MM_PERM_ENUM)78), iacc_mat_11);
|
||||||
|
|
||||||
// Load the scale(d) values for all the 4 Q8_0 blocks and repeat it across lanes
|
// Load the scale(d) values for all the 4 Q8_0 blocks and repeat it across lanes
|
||||||
const __m128i row_scale_f16 = _mm_shuffle_epi32(_mm_maskload_epi32((int const*)(a_ptrs[rp][b].d), loadMask), 68);
|
const __m128i row_scale_f16 = _mm_shuffle_epi32(_mm_maskload_epi32((int const*)(a_ptrs[rp][b].d), loadMask), 68);
|
||||||
@ -2753,31 +2790,31 @@ void ggml_gemm_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void *
|
|||||||
const __m512i rhs_mat_2367ABEF_3 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m4bexpanded)); //B2(24-31) B3(24-31) B6(24-31) B7(24-31) BA(24-31) BB(24-31) BE(24-31) BF(24-31)
|
const __m512i rhs_mat_2367ABEF_3 = _mm512_shuffle_epi8(signextendlutexpanded, _mm512_and_si512(_mm512_srli_epi16(rhs_raw_mat_2367ABEF_1, 4), m4bexpanded)); //B2(24-31) B3(24-31) B6(24-31) B7(24-31) BA(24-31) BB(24-31) BE(24-31) BF(24-31)
|
||||||
|
|
||||||
// Shuffle pattern one - right side input
|
// Shuffle pattern one - right side input
|
||||||
const __m512i rhs_mat_014589CD_0_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, 136); //B0(0-3) B1(0-3) B0(0-3) B1(0-3) B4(0-3) B5(0-3) B4(0-3) B5(0-3) B8(0-3) B9(0-3) B8(0-3) B9(0-3) BC(0-3) BD(0-3) BC(0-3) BD(0-3)
|
const __m512i rhs_mat_014589CD_0_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, (_MM_PERM_ENUM)136); //B0(0-3) B1(0-3) B0(0-3) B1(0-3) B4(0-3) B5(0-3) B4(0-3) B5(0-3) B8(0-3) B9(0-3) B8(0-3) B9(0-3) BC(0-3) BD(0-3) BC(0-3) BD(0-3)
|
||||||
const __m512i rhs_mat_2367ABEF_0_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, 136); //B2(0-3) B3(0-3) B2(0-3) B3(0-3) B6(0-3) B7(0-3) B6(0-3) B7(0-3) BA(0-3) BB(0-3) BA(0-3) BB(0-3) BE(0-3) BF(0-3) BE(0-3) BF(0-3)
|
const __m512i rhs_mat_2367ABEF_0_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, (_MM_PERM_ENUM)136); //B2(0-3) B3(0-3) B2(0-3) B3(0-3) B6(0-3) B7(0-3) B6(0-3) B7(0-3) BA(0-3) BB(0-3) BA(0-3) BB(0-3) BE(0-3) BF(0-3) BE(0-3) BF(0-3)
|
||||||
|
|
||||||
const __m512i rhs_mat_014589CD_1_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, 136); //B0(8-11) B1(8-11) B0(8-11) B1(8-11) B4(8-11) B5(8-11) B4(8-11) B5(8-11) B8(8-11) B9(8-11) B8(8-11) B9(8-11) BC(8-11) BD(8-11) BC(8-11) BD(8-11)
|
const __m512i rhs_mat_014589CD_1_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, (_MM_PERM_ENUM)136); //B0(8-11) B1(8-11) B0(8-11) B1(8-11) B4(8-11) B5(8-11) B4(8-11) B5(8-11) B8(8-11) B9(8-11) B8(8-11) B9(8-11) BC(8-11) BD(8-11) BC(8-11) BD(8-11)
|
||||||
const __m512i rhs_mat_2367ABEF_1_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, 136); //B2(8-11) B3(8-11) B2(8-11) B3(8-11) B6(8-11) B7(8-11) B6(8-11) B7(8-11) BA(8-11) BB(8-11) BA(8-11) BB(8-11) BE(8-11) BF(8-11) BE(8-11) BF(8-11)
|
const __m512i rhs_mat_2367ABEF_1_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, (_MM_PERM_ENUM)136); //B2(8-11) B3(8-11) B2(8-11) B3(8-11) B6(8-11) B7(8-11) B6(8-11) B7(8-11) BA(8-11) BB(8-11) BA(8-11) BB(8-11) BE(8-11) BF(8-11) BE(8-11) BF(8-11)
|
||||||
|
|
||||||
const __m512i rhs_mat_014589CD_2_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, 136); //B0(16-19) B1(16-19) B0(16-19) B1(16-19) B4(16-19) B5(16-19) B4(16-19) B5(16-19) B8(16-19) B9(16-19) B8(16-19) B9(16-19) BC(16-19) BD(16-19) BC(16-19) BD(16-19)
|
const __m512i rhs_mat_014589CD_2_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, (_MM_PERM_ENUM)136); //B0(16-19) B1(16-19) B0(16-19) B1(16-19) B4(16-19) B5(16-19) B4(16-19) B5(16-19) B8(16-19) B9(16-19) B8(16-19) B9(16-19) BC(16-19) BD(16-19) BC(16-19) BD(16-19)
|
||||||
const __m512i rhs_mat_2367ABEF_2_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, 136); //B2(16-19) B3(16-19) B2(16-19) B3(16-19) B6(16-19) B7(16-19) B6(16-19) B7(16-19) BA(16-19) BB(16-19) BA(16-19) BB(16-19) BE(16-19) BF(16-19) BE(16-19) BF(16-19)
|
const __m512i rhs_mat_2367ABEF_2_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, (_MM_PERM_ENUM)136); //B2(16-19) B3(16-19) B2(16-19) B3(16-19) B6(16-19) B7(16-19) B6(16-19) B7(16-19) BA(16-19) BB(16-19) BA(16-19) BB(16-19) BE(16-19) BF(16-19) BE(16-19) BF(16-19)
|
||||||
|
|
||||||
const __m512i rhs_mat_014589CD_3_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, 136); //B0(24-27) B1(24-27) B0(24-27) B1(24-27) B4(24-27) B5(24-27) B4(24-27) B5(24-27) B8(24-27) B9(24-27) B8(24-27) B9(24-27) BC(24-27) BD(24-27) BC(24-27) BD(24-27)
|
const __m512i rhs_mat_014589CD_3_sp1 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, (_MM_PERM_ENUM)136); //B0(24-27) B1(24-27) B0(24-27) B1(24-27) B4(24-27) B5(24-27) B4(24-27) B5(24-27) B8(24-27) B9(24-27) B8(24-27) B9(24-27) BC(24-27) BD(24-27) BC(24-27) BD(24-27)
|
||||||
const __m512i rhs_mat_2367ABEF_3_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, 136); //B2(24-27) B3(24-27) B2(24-27) B3(24-27) B6(24-27) B7(24-27) B6(24-27) B7(24-27) BA(24-27) BB(24-27) BA(24-27) BB(24-27) BE(24-27) BF(24-27) BE(24-27) BF(24-27)
|
const __m512i rhs_mat_2367ABEF_3_sp1 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, (_MM_PERM_ENUM)136); //B2(24-27) B3(24-27) B2(24-27) B3(24-27) B6(24-27) B7(24-27) B6(24-27) B7(24-27) BA(24-27) BB(24-27) BA(24-27) BB(24-27) BE(24-27) BF(24-27) BE(24-27) BF(24-27)
|
||||||
|
|
||||||
// Shuffle pattern two - right side input
|
// Shuffle pattern two - right side input
|
||||||
|
|
||||||
const __m512i rhs_mat_014589CD_0_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, 221); //B0(4-7) B1(4-7) B0(4-7) B1(4-7) B4(4-7) B5(4-7) B4(4-7) B5(4-7) B8(4-7) B9(4-7) B8(4-7) B9(4-7) BC(4-7) BD(4-7) BC(4-7) BD(4-7)
|
const __m512i rhs_mat_014589CD_0_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_0, (_MM_PERM_ENUM)221); //B0(4-7) B1(4-7) B0(4-7) B1(4-7) B4(4-7) B5(4-7) B4(4-7) B5(4-7) B8(4-7) B9(4-7) B8(4-7) B9(4-7) BC(4-7) BD(4-7) BC(4-7) BD(4-7)
|
||||||
const __m512i rhs_mat_2367ABEF_0_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, 221); //B2(4-7) B3(4-7) B2(4-7) B3(4-7) B6(4-7) B7(4-7) B6(4-7) B7(4-7) BA(4-7) BB(4-7) BA(4-7) BB(4-7) BE(4-7) BF(4-7) BE(4-7) BF(4-7)
|
const __m512i rhs_mat_2367ABEF_0_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_0, (_MM_PERM_ENUM)221); //B2(4-7) B3(4-7) B2(4-7) B3(4-7) B6(4-7) B7(4-7) B6(4-7) B7(4-7) BA(4-7) BB(4-7) BA(4-7) BB(4-7) BE(4-7) BF(4-7) BE(4-7) BF(4-7)
|
||||||
|
|
||||||
const __m512i rhs_mat_014589CD_1_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, 221); //B0(12-15) B1(12-15) B0(12-15) B1(12-15) B4(12-15) B5(12-15) B4(12-15) B5(12-15) B8(12-15) B9(12-15) B8(12-15) B9(12-15) BC(12-15) BD(12-15) BC(12-15) BD(12-15)
|
const __m512i rhs_mat_014589CD_1_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_1, (_MM_PERM_ENUM)221); //B0(12-15) B1(12-15) B0(12-15) B1(12-15) B4(12-15) B5(12-15) B4(12-15) B5(12-15) B8(12-15) B9(12-15) B8(12-15) B9(12-15) BC(12-15) BD(12-15) BC(12-15) BD(12-15)
|
||||||
const __m512i rhs_mat_2367ABEF_1_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, 221); //B2(12-15) B3(12-15) B2(12-15) B3(12-15) B6(12-15) B7(12-15) B6(12-15) B7(12-15) BA(12-15) BB(12-15) BA(12-15) BB(12-15) BE(12-15) BF(12-15) BE(12-15) BF(12-15)
|
const __m512i rhs_mat_2367ABEF_1_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_1, (_MM_PERM_ENUM)221); //B2(12-15) B3(12-15) B2(12-15) B3(12-15) B6(12-15) B7(12-15) B6(12-15) B7(12-15) BA(12-15) BB(12-15) BA(12-15) BB(12-15) BE(12-15) BF(12-15) BE(12-15) BF(12-15)
|
||||||
|
|
||||||
const __m512i rhs_mat_014589CD_2_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, 221); //B0(20-23) B1(20-23) B0(20-23) B1(20-23) B4(20-23) B5(20-23) B4(20-23) B5(20-23) B8(20-23) B9(20-23) B8(20-23) B9(20-23) BC(20-23) BD(20-23) BC(20-23) BD(20-23)
|
const __m512i rhs_mat_014589CD_2_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_2, (_MM_PERM_ENUM)221); //B0(20-23) B1(20-23) B0(20-23) B1(20-23) B4(20-23) B5(20-23) B4(20-23) B5(20-23) B8(20-23) B9(20-23) B8(20-23) B9(20-23) BC(20-23) BD(20-23) BC(20-23) BD(20-23)
|
||||||
const __m512i rhs_mat_2367ABEF_2_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, 221); //B2(20-23) B3(20-23) B2(20-23) B3(20-23) B6(20-23) B7(20-23) B6(20-23) B7(20-23) BA(20-23) BB(20-23) BA(20-23) BB(20-23) BE(20-23) BF(20-23) BE(20-23) BF(20-23)
|
const __m512i rhs_mat_2367ABEF_2_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_2, (_MM_PERM_ENUM)221); //B2(20-23) B3(20-23) B2(20-23) B3(20-23) B6(20-23) B7(20-23) B6(20-23) B7(20-23) BA(20-23) BB(20-23) BA(20-23) BB(20-23) BE(20-23) BF(20-23) BE(20-23) BF(20-23)
|
||||||
|
|
||||||
const __m512i rhs_mat_014589CD_3_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, 221); //B0(28-31) B1(28-31) B0(28-31) B1(28-31) B4(28-31) B5(28-31) B4(28-31) B5(28-31) B8(28-31) B9(28-31) B8(28-31) B9(28-31) BC(28-31) BD(28-31) BC(28-31) BD(28-31)
|
const __m512i rhs_mat_014589CD_3_sp2 = _mm512_shuffle_epi32(rhs_mat_014589CD_3, (_MM_PERM_ENUM)221); //B0(28-31) B1(28-31) B0(28-31) B1(28-31) B4(28-31) B5(28-31) B4(28-31) B5(28-31) B8(28-31) B9(28-31) B8(28-31) B9(28-31) BC(28-31) BD(28-31) BC(28-31) BD(28-31)
|
||||||
const __m512i rhs_mat_2367ABEF_3_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, 221); //B2(28-31) B3(28-31) B2(28-31) B3(28-31) B6(28-31) B7(28-31) B6(28-31) B7(28-31) BA(28-31) BB(28-31) BA(28-31) BB(28-31) BE(28-31) BF(28-31) BE(28-31) BF(28-31)
|
const __m512i rhs_mat_2367ABEF_3_sp2 = _mm512_shuffle_epi32(rhs_mat_2367ABEF_3, (_MM_PERM_ENUM)221); //B2(28-31) B3(28-31) B2(28-31) B3(28-31) B6(28-31) B7(28-31) B6(28-31) B7(28-31) BA(28-31) BB(28-31) BA(28-31) BB(28-31) BE(28-31) BF(28-31) BE(28-31) BF(28-31)
|
||||||
|
|
||||||
|
|
||||||
// Scale values - Load the weight scale values of two block_q4_0x8
|
// Scale values - Load the weight scale values of two block_q4_0x8
|
||||||
@ -2809,31 +2846,31 @@ void ggml_gemm_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void *
|
|||||||
|
|
||||||
// Shuffle pattern one - left side input
|
// Shuffle pattern one - left side input
|
||||||
|
|
||||||
const __m512i lhs_mat_01_0_sp1 = _mm512_shuffle_epi32(lhs_mat_01_0, 160); //A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3)
|
const __m512i lhs_mat_01_0_sp1 = _mm512_shuffle_epi32(lhs_mat_01_0, (_MM_PERM_ENUM)160); //A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3) A0(0-3) A0(0-3) A1(0-3) A1(0-3)
|
||||||
const __m512i lhs_mat_23_0_sp1 = _mm512_shuffle_epi32(lhs_mat_23_0, 160); //A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3)
|
const __m512i lhs_mat_23_0_sp1 = _mm512_shuffle_epi32(lhs_mat_23_0, (_MM_PERM_ENUM)160); //A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3) A2(0-3) A2(0-3) A3(0-3) A3(0-3)
|
||||||
|
|
||||||
const __m512i lhs_mat_01_1_sp1 = _mm512_shuffle_epi32(lhs_mat_01_1, 160); //A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11)
|
const __m512i lhs_mat_01_1_sp1 = _mm512_shuffle_epi32(lhs_mat_01_1, (_MM_PERM_ENUM)160); //A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11) A0(8-11) A0(8-11) A1(8-11) A1(8-11)
|
||||||
const __m512i lhs_mat_23_1_sp1 = _mm512_shuffle_epi32(lhs_mat_23_1, 160); //A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11)
|
const __m512i lhs_mat_23_1_sp1 = _mm512_shuffle_epi32(lhs_mat_23_1, (_MM_PERM_ENUM)160); //A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11) A2(8-11) A2(8-11) A3(8-11) A3(8-11)
|
||||||
|
|
||||||
const __m512i lhs_mat_01_2_sp1 = _mm512_shuffle_epi32(lhs_mat_01_2, 160); //A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19)
|
const __m512i lhs_mat_01_2_sp1 = _mm512_shuffle_epi32(lhs_mat_01_2, (_MM_PERM_ENUM)160); //A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19) A0(16-19) A0(16-19) A1(16-19) A1(16-19)
|
||||||
const __m512i lhs_mat_23_2_sp1 = _mm512_shuffle_epi32(lhs_mat_23_2, 160); //A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19)
|
const __m512i lhs_mat_23_2_sp1 = _mm512_shuffle_epi32(lhs_mat_23_2, (_MM_PERM_ENUM)160); //A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19) A2(16-19) A2(16-19) A3(16-19) A3(16-19)
|
||||||
|
|
||||||
const __m512i lhs_mat_01_3_sp1 = _mm512_shuffle_epi32(lhs_mat_01_3, 160); //A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27)
|
const __m512i lhs_mat_01_3_sp1 = _mm512_shuffle_epi32(lhs_mat_01_3, (_MM_PERM_ENUM)160); //A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27) A0(24-27) A0(24-27) A1(24-27) A1(24-27)
|
||||||
const __m512i lhs_mat_23_3_sp1 = _mm512_shuffle_epi32(lhs_mat_23_3, 160); //A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27)
|
const __m512i lhs_mat_23_3_sp1 = _mm512_shuffle_epi32(lhs_mat_23_3, (_MM_PERM_ENUM)160); //A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27) A2(24-27) A2(24-27) A3(24-27) A3(24-27)
|
||||||
|
|
||||||
// Shuffle pattern two - left side input
|
// Shuffle pattern two - left side input
|
||||||
|
|
||||||
const __m512i lhs_mat_01_0_sp2 = _mm512_shuffle_epi32(lhs_mat_01_0, 245); //A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7)
|
const __m512i lhs_mat_01_0_sp2 = _mm512_shuffle_epi32(lhs_mat_01_0, (_MM_PERM_ENUM)245); //A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7) A0(4-7) A0(4-7) A1(4-7) A1(4-7)
|
||||||
const __m512i lhs_mat_23_0_sp2 = _mm512_shuffle_epi32(lhs_mat_23_0, 245); //A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7)
|
const __m512i lhs_mat_23_0_sp2 = _mm512_shuffle_epi32(lhs_mat_23_0, (_MM_PERM_ENUM)245); //A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7) A2(4-7) A2(4-7) A3(4-7) A3(4-7)
|
||||||
|
|
||||||
const __m512i lhs_mat_01_1_sp2 = _mm512_shuffle_epi32(lhs_mat_01_1, 245); //A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15)
|
const __m512i lhs_mat_01_1_sp2 = _mm512_shuffle_epi32(lhs_mat_01_1, (_MM_PERM_ENUM)245); //A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15) A0(12-15) A0(12-15) A1(12-15) A1(12-15)
|
||||||
const __m512i lhs_mat_23_1_sp2 = _mm512_shuffle_epi32(lhs_mat_23_1, 245); //A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15)
|
const __m512i lhs_mat_23_1_sp2 = _mm512_shuffle_epi32(lhs_mat_23_1, (_MM_PERM_ENUM)245); //A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15) A2(12-15) A2(12-15) A3(12-15) A3(12-15)
|
||||||
|
|
||||||
const __m512i lhs_mat_01_2_sp2 = _mm512_shuffle_epi32(lhs_mat_01_2, 245); //A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23)
|
const __m512i lhs_mat_01_2_sp2 = _mm512_shuffle_epi32(lhs_mat_01_2, (_MM_PERM_ENUM)245); //A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23) A0(20-23) A0(20-23) A1(20-23) A1(20-23)
|
||||||
const __m512i lhs_mat_23_2_sp2 = _mm512_shuffle_epi32(lhs_mat_23_2, 245); //A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23)
|
const __m512i lhs_mat_23_2_sp2 = _mm512_shuffle_epi32(lhs_mat_23_2, (_MM_PERM_ENUM)245); //A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23) A2(20-23) A2(20-23) A3(20-23) A3(20-23)
|
||||||
|
|
||||||
const __m512i lhs_mat_01_3_sp2 = _mm512_shuffle_epi32(lhs_mat_01_3, 245); //A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31)
|
const __m512i lhs_mat_01_3_sp2 = _mm512_shuffle_epi32(lhs_mat_01_3, (_MM_PERM_ENUM)245); //A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31) A0(28-31) A0(28-31) A1(28-31) A1(28-31)
|
||||||
const __m512i lhs_mat_23_3_sp2 = _mm512_shuffle_epi32(lhs_mat_23_3, 245); //A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31)
|
const __m512i lhs_mat_23_3_sp2 = _mm512_shuffle_epi32(lhs_mat_23_3, (_MM_PERM_ENUM)245); //A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31) A2(28-31) A2(28-31) A3(28-31) A3(28-31)
|
||||||
|
|
||||||
// The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane
|
// The values arranged in shuffle patterns are operated with dot product operation within 32 bit lane i.e corresponding bytes and multiplied and added into 32 bit integers within 32 bit lane
|
||||||
// Resembles MMLAs into 2x2 matrices in ARM Version
|
// Resembles MMLAs into 2x2 matrices in ARM Version
|
||||||
@ -2862,10 +2899,10 @@ void ggml_gemm_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void *
|
|||||||
|
|
||||||
|
|
||||||
// Straighten out to make 4 row vectors
|
// Straighten out to make 4 row vectors
|
||||||
__m512i iacc_row_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00, _mm512_shuffle_epi32(iacc_mat_01, 78));
|
__m512i iacc_row_0 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_00, _mm512_shuffle_epi32(iacc_mat_01, (_MM_PERM_ENUM)78));
|
||||||
__m512i iacc_row_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00, 78), iacc_mat_01);
|
__m512i iacc_row_1 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_00, (_MM_PERM_ENUM)78), iacc_mat_01);
|
||||||
__m512i iacc_row_2 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10, _mm512_shuffle_epi32(iacc_mat_11, 78));
|
__m512i iacc_row_2 = _mm512_mask_blend_epi32(0xCCCC, iacc_mat_10, _mm512_shuffle_epi32(iacc_mat_11, (_MM_PERM_ENUM)78));
|
||||||
__m512i iacc_row_3 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10, 78), iacc_mat_11);
|
__m512i iacc_row_3 = _mm512_mask_blend_epi32(0xCCCC, _mm512_shuffle_epi32(iacc_mat_10, (_MM_PERM_ENUM)78), iacc_mat_11);
|
||||||
|
|
||||||
// Load the scale(d) values for all the 4 Q8_0 blocks and repeat it across lanes
|
// Load the scale(d) values for all the 4 Q8_0 blocks and repeat it across lanes
|
||||||
const __m128i row_scale_f16 = _mm_shuffle_epi32(_mm_maskload_epi32((int const*)(a_ptr[b].d), loadMask), 68);
|
const __m128i row_scale_f16 = _mm_shuffle_epi32(_mm_maskload_epi32((int const*)(a_ptr[b].d), loadMask), 68);
|
||||||
@ -3460,7 +3497,7 @@ void ggml_gemm_q4_0_8x8_q8_0(int n, float * restrict s, size_t bs, const void *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, const void * restrict vy, int nr, int nc) {
|
static void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
|
||||||
const int qk = QK8_0;
|
const int qk = QK8_0;
|
||||||
const int nb = n / qk;
|
const int nb = n / qk;
|
||||||
const int ncols_interleaved = 4;
|
const int ncols_interleaved = 4;
|
||||||
@ -3571,7 +3608,6 @@ void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * restrict s, size_t bs, const void
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: this code is duplicated from ggml-aarch64.c
|
|
||||||
static block_q4_0x4 make_block_q4_0x4(block_q4_0 * in, unsigned int blck_size_interleave) {
|
static block_q4_0x4 make_block_q4_0x4(block_q4_0 * in, unsigned int blck_size_interleave) {
|
||||||
block_q4_0x4 out;
|
block_q4_0x4 out;
|
||||||
|
|
||||||
@ -3641,20 +3677,20 @@ static block_q4_0x8 make_block_q4_0x8(block_q4_0 * in, unsigned int blck_size_in
|
|||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int repack_q4_0_to_q4_0_4_bl(struct ggml_tensor * t, int interleave_block, const void * restrict data, size_t data_size) {
|
static int repack_q4_0_to_q4_0_4_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) {
|
||||||
GGML_ASSERT(t->type == GGML_TYPE_Q4_0);
|
GGML_ASSERT(t->type == GGML_TYPE_Q4_0);
|
||||||
GGML_ASSERT(interleave_block == 4 || interleave_block == 8);
|
GGML_ASSERT(interleave_block == 4 || interleave_block == 8);
|
||||||
|
constexpr int nrows_interleaved = 4;
|
||||||
|
|
||||||
block_q4_0x4 * dst = (block_q4_0x4 *)t->data;
|
block_q4_0x4 * dst = (block_q4_0x4 *)t->data;
|
||||||
const block_q4_0 * src = (const block_q4_0 *)data;
|
const block_q4_0 * src = (const block_q4_0 *)data;
|
||||||
block_q4_0 dst_tmp[4];
|
block_q4_0 dst_tmp[4];
|
||||||
int nrow = t->ne[1]; // Number of rows
|
int nrow = ggml_nrows(t);
|
||||||
int nrows_interleaved = 4;
|
|
||||||
int nblocks = t->ne[0] / QK4_0;
|
int nblocks = t->ne[0] / QK4_0;
|
||||||
|
|
||||||
GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_0));
|
GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_0));
|
||||||
|
|
||||||
if (nrow % nrows_interleaved != 0 || t->ne[0] % 8 != 0) {
|
if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3672,20 +3708,20 @@ static int repack_q4_0_to_q4_0_4_bl(struct ggml_tensor * t, int interleave_block
|
|||||||
GGML_UNUSED(data_size);
|
GGML_UNUSED(data_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int repack_q4_0_to_q4_0_8_bl(struct ggml_tensor *t, int interleave_block, const void * restrict data, size_t data_size) {
|
static int repack_q4_0_to_q4_0_8_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) {
|
||||||
GGML_ASSERT(t->type == GGML_TYPE_Q4_0);
|
GGML_ASSERT(t->type == GGML_TYPE_Q4_0);
|
||||||
GGML_ASSERT(interleave_block == 8);
|
GGML_ASSERT(interleave_block == 8);
|
||||||
|
constexpr int nrows_interleaved = 8;
|
||||||
|
|
||||||
block_q4_0x8 * dst = (block_q4_0x8*)t->data;
|
block_q4_0x8 * dst = (block_q4_0x8*)t->data;
|
||||||
const block_q4_0 * src = (const block_q4_0*) data;
|
const block_q4_0 * src = (const block_q4_0*) data;
|
||||||
block_q4_0 dst_tmp[8];
|
block_q4_0 dst_tmp[8];
|
||||||
int nrow = t->ne[1]; // Number of rows
|
int nrow = ggml_nrows(t);
|
||||||
int nrows_interleaved = 8;
|
|
||||||
int nblocks = t->ne[0] / QK4_0;
|
int nblocks = t->ne[0] / QK4_0;
|
||||||
|
|
||||||
GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_0));
|
GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_q4_0));
|
||||||
|
|
||||||
if (nrow % nrows_interleaved != 0 || t->ne[0] % 8 != 0) {
|
if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3736,20 +3772,20 @@ static block_iq4_nlx4 make_block_iq4_nlx4(block_iq4_nl * in, unsigned int blck_s
|
|||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int repack_iq4_nl_to_iq4_nl_4_bl(struct ggml_tensor * t, int interleave_block, const void * restrict data, size_t data_size) {
|
static int repack_iq4_nl_to_iq4_nl_4_bl(struct ggml_tensor * t, int interleave_block, const void * GGML_RESTRICT data, size_t data_size) {
|
||||||
GGML_ASSERT(t->type == GGML_TYPE_IQ4_NL);
|
GGML_ASSERT(t->type == GGML_TYPE_IQ4_NL);
|
||||||
GGML_ASSERT(interleave_block == 4 || interleave_block == 8);
|
GGML_ASSERT(interleave_block == 4 || interleave_block == 8);
|
||||||
|
|
||||||
block_iq4_nlx4 * dst = (block_iq4_nlx4 *)t->data;
|
block_iq4_nlx4 * dst = (block_iq4_nlx4 *)t->data;
|
||||||
const block_iq4_nl * src = (const block_iq4_nl *)data;
|
const block_iq4_nl * src = (const block_iq4_nl *)data;
|
||||||
block_iq4_nl dst_tmp[4];
|
block_iq4_nl dst_tmp[4];
|
||||||
int nrow = t->ne[1]; // Number of rows
|
int nrow = ggml_nrows(t);
|
||||||
int nrows_interleaved = 4;
|
int nrows_interleaved = 4;
|
||||||
int nblocks = t->ne[0] / QK4_0;
|
int nblocks = t->ne[0] / QK4_0;
|
||||||
|
|
||||||
GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_iq4_nl));
|
GGML_ASSERT(data_size == nrow * nblocks * sizeof(block_iq4_nl));
|
||||||
|
|
||||||
if (nrow % nrows_interleaved != 0 || t->ne[0] % 8 != 0) {
|
if (t->ne[1] % nrows_interleaved != 0 || t->ne[0] % 8 != 0) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3767,57 +3803,456 @@ static int repack_iq4_nl_to_iq4_nl_4_bl(struct ggml_tensor * t, int interleave_b
|
|||||||
GGML_UNUSED(data_size);
|
GGML_UNUSED(data_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prepare for optimized kernels if applicable
|
namespace ggml::cpu::aarch64 {
|
||||||
void ggml_aarch64_repack_tensor(struct ggml_tensor * cur, enum ggml_type repack_type, const void * restrict data, size_t data_size) {
|
// repack
|
||||||
if (cur->type == repack_type) {
|
template <typename BLOC_TYPE, int64_t INTER_SIZE, int64_t NB_COLS>
|
||||||
memcpy(cur->data, data, data_size);
|
int repack(struct ggml_tensor *, const void *, size_t);
|
||||||
|
|
||||||
|
// TODO: generalise.
|
||||||
|
template <> int repack<block_q4_0, 4, 4>(struct ggml_tensor * t, const void * data, size_t data_size) {
|
||||||
|
return repack_q4_0_to_q4_0_4_bl(t, 4, data, data_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <> int repack<block_q4_0, 8, 4>(struct ggml_tensor * t, const void * data, size_t data_size) {
|
||||||
|
return repack_q4_0_to_q4_0_4_bl(t, 8, data, data_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <> int repack<block_q4_0, 8, 8>(struct ggml_tensor * t, const void * data, size_t data_size) {
|
||||||
|
return repack_q4_0_to_q4_0_8_bl(t, 8, data, data_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <> int repack<block_iq4_nl, 4, 4>(struct ggml_tensor * t, const void * data, size_t data_size) {
|
||||||
|
return repack_iq4_nl_to_iq4_nl_4_bl(t, 4, data, data_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <> int repack<block_iq4_nl, 8, 4>(struct ggml_tensor * t, const void * data, size_t data_size) {
|
||||||
|
return repack_iq4_nl_to_iq4_nl_4_bl(t, 8, data, data_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
// gemv
|
||||||
|
template <typename BLOC_TYPE, int64_t INTER_SIZE, int64_t NB_COLS>
|
||||||
|
void gemv(int, float *, size_t, const void *, const void *, int, int);
|
||||||
|
|
||||||
|
template <> void gemv<block_q4_0, 4, 4>(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) {
|
||||||
|
ggml_gemv_q4_0_4x4_q8_0(n, s, bs, vx, vy, nr, nc);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <> void gemv<block_q4_0, 8, 4>(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) {
|
||||||
|
ggml_gemv_q4_0_4x8_q8_0(n, s, bs, vx, vy, nr, nc);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <> void gemv<block_q4_0, 8, 8>(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) {
|
||||||
|
ggml_gemv_q4_0_8x8_q8_0(n, s, bs, vx, vy, nr, nc);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <>
|
||||||
|
void gemv<block_iq4_nl, 4, 4>(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) {
|
||||||
|
ggml_gemv_iq4_nl_4x4_q8_0(n, s, bs, vx, vy, nr, nc);
|
||||||
|
}
|
||||||
|
|
||||||
|
// gemm
|
||||||
|
template <typename BLOC_TYPE, int64_t INTER_SIZE, int64_t NB_COLS>
|
||||||
|
void gemm(int, float *, size_t, const void *, const void *, int, int);
|
||||||
|
|
||||||
|
template <> void gemm<block_q4_0, 4, 4>(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) {
|
||||||
|
ggml_gemm_q4_0_4x4_q8_0(n, s, bs, vx, vy, nr, nc);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <> void gemm<block_q4_0, 8, 4>(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) {
|
||||||
|
ggml_gemm_q4_0_4x8_q8_0(n, s, bs, vx, vy, nr, nc);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <> void gemm<block_q4_0, 8, 8>(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) {
|
||||||
|
ggml_gemm_q4_0_8x8_q8_0(n, s, bs, vx, vy, nr, nc);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <>
|
||||||
|
void gemm<block_iq4_nl, 4, 4>(int n, float * s, size_t bs, const void * vx, const void * vy, int nr, int nc) {
|
||||||
|
ggml_gemm_iq4_nl_4x4_q8_0(n, s, bs, vx, vy, nr, nc);
|
||||||
|
}
|
||||||
|
|
||||||
|
class tensor_traits_base : public ggml::cpu::tensor_traits {
|
||||||
|
public:
|
||||||
|
virtual int repack(struct ggml_tensor * t, const void * data, size_t data_size) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename BLOC_TYPE, int64_t INTER_SIZE, int64_t NB_COLS> class tensor_traits : public tensor_traits_base {
|
||||||
|
|
||||||
|
bool work_size(int /* n_threads */, const struct ggml_tensor * op, size_t & size) override {
|
||||||
|
// not realy a GGML_TYPE_Q8_0 but same size.
|
||||||
|
switch (op->op) {
|
||||||
|
case GGML_OP_MUL_MAT:
|
||||||
|
size = ggml_row_size(GGML_TYPE_Q8_0, ggml_nelements(op->src[1]));
|
||||||
|
return true;
|
||||||
|
case GGML_OP_MUL_MAT_ID:
|
||||||
|
size = ggml_row_size(GGML_TYPE_Q8_0, ggml_nelements(op->src[1]));
|
||||||
|
size = GGML_PAD(size, sizeof(int64_t)); // + padding for next bloc.
|
||||||
|
size += sizeof(int64_t) * (1+op->src[0]->ne[2]) * op->src[1]->ne[2];
|
||||||
|
return true;
|
||||||
|
default:
|
||||||
|
// GGML_ABORT("fatal error");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) override {
|
||||||
|
switch (op->op) {
|
||||||
|
case GGML_OP_MUL_MAT:
|
||||||
|
forward_mul_mat(params, op);
|
||||||
|
return true;
|
||||||
|
case GGML_OP_MUL_MAT_ID:
|
||||||
|
forward_mul_mat_id(params, op);
|
||||||
|
return true;
|
||||||
|
default:
|
||||||
|
// GGML_ABORT("fatal error");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void forward_mul_mat(ggml_compute_params * params, ggml_tensor * op) {
|
||||||
|
const ggml_tensor * src0 = op->src[0];
|
||||||
|
const ggml_tensor * src1 = op->src[1];
|
||||||
|
ggml_tensor * dst = op;
|
||||||
|
|
||||||
|
GGML_TENSOR_BINARY_OP_LOCALS
|
||||||
|
|
||||||
|
const int ith = params->ith;
|
||||||
|
const int nth = params->nth;
|
||||||
|
|
||||||
|
GGML_ASSERT(ne0 == ne01);
|
||||||
|
GGML_ASSERT(ne1 == ne11);
|
||||||
|
GGML_ASSERT(ne2 == ne12);
|
||||||
|
GGML_ASSERT(ne3 == ne13);
|
||||||
|
|
||||||
|
// dst cannot be transposed or permuted
|
||||||
|
GGML_ASSERT(nb0 == sizeof(float));
|
||||||
|
GGML_ASSERT(nb0 <= nb1);
|
||||||
|
GGML_ASSERT(nb1 <= nb2);
|
||||||
|
GGML_ASSERT(nb2 <= nb3);
|
||||||
|
|
||||||
|
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
GGML_ASSERT(ggml_n_dims(op->src[0]) == 2);
|
||||||
|
// GGML_ASSERT(ggml_n_dims(op->src[1]) == 2);
|
||||||
|
|
||||||
|
char * wdata = static_cast<char *>(params->wdata);
|
||||||
|
const size_t nbw1 = ggml_row_size(GGML_TYPE_Q8_0, ne10);
|
||||||
|
|
||||||
|
assert(params->wsize >= nbw1 * ne11);
|
||||||
|
|
||||||
|
const ggml_from_float_t from_float = ggml_get_type_traits_cpu(GGML_TYPE_Q8_0)->from_float;
|
||||||
|
|
||||||
|
int64_t i11_processed = 0;
|
||||||
|
for (int64_t i11 = ith * 4; i11 < ne11 - ne11 % 4; i11 += nth * 4) {
|
||||||
|
quantize_mat_q8_0((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), 4, ne10,
|
||||||
|
INTER_SIZE);
|
||||||
|
}
|
||||||
|
i11_processed = ne11 - ne11 % 4;
|
||||||
|
for (int64_t i11 = i11_processed + ith; i11 < ne11; i11 += nth) {
|
||||||
|
from_float((float *) ((char *) src1->data + i11 * nb11), (void *) (wdata + i11 * nbw1), ne10);
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_barrier(params->threadpool);
|
||||||
|
|
||||||
|
const void * src1_wdata = params->wdata;
|
||||||
|
const size_t src1_col_stride = ggml_row_size(GGML_TYPE_Q8_0, ne10);
|
||||||
|
int64_t src0_start = (ith * ne01) / nth;
|
||||||
|
int64_t src0_end = ((ith + 1) * ne01) / nth;
|
||||||
|
src0_start = (src0_start % NB_COLS) ? src0_start + NB_COLS - (src0_start % NB_COLS) : src0_start;
|
||||||
|
src0_end = (src0_end % NB_COLS) ? src0_end + NB_COLS - (src0_end % NB_COLS) : src0_end;
|
||||||
|
if (src0_start >= src0_end) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cur->type == GGML_TYPE_Q4_0) {
|
// If there are more than three rows in src1, use gemm; otherwise, use gemv.
|
||||||
switch (repack_type) {
|
if (ne11 > 3) {
|
||||||
case GGML_TYPE_Q4_0_8_8:
|
gemm<BLOC_TYPE, INTER_SIZE, NB_COLS>(ne00, (float *) ((char *) dst->data) + src0_start, ne01,
|
||||||
repack_q4_0_to_q4_0_8_bl(cur, 8, data, data_size);
|
(const char *) src0->data + src0_start * nb01,
|
||||||
break;
|
(const char *) src1_wdata, ne11 - ne11 % 4, src0_end - src0_start);
|
||||||
case GGML_TYPE_Q4_0_4_8:
|
|
||||||
repack_q4_0_to_q4_0_4_bl(cur, 8, data, data_size);
|
|
||||||
break;
|
|
||||||
case GGML_TYPE_Q4_0_4_4:
|
|
||||||
repack_q4_0_to_q4_0_4_bl(cur, 4, data, data_size);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
GGML_ABORT("Unsupported type");
|
|
||||||
}
|
}
|
||||||
} else if (cur->type == GGML_TYPE_IQ4_NL) {
|
for (int iter = ne11 - ne11 % 4; iter < ne11; iter++) {
|
||||||
switch (repack_type) {
|
gemv<BLOC_TYPE, INTER_SIZE, NB_COLS>(ne00, (float *) ((char *) dst->data + (iter * nb1)) + src0_start, ne01,
|
||||||
case GGML_TYPE_IQ4_NL_4_4:
|
(const char *) src0->data + src0_start * nb01,
|
||||||
repack_iq4_nl_to_iq4_nl_4_bl(cur, 4, data, data_size);
|
(const char *) src1_wdata + (src1_col_stride * iter), 1,
|
||||||
break;
|
src0_end - src0_start);
|
||||||
default:
|
|
||||||
GGML_ABORT("Unsupported type");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
GGML_ABORT("Unsupported type");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
enum ggml_type ggml_aarch64_get_optimal_repack_type(const struct ggml_tensor * cur) {
|
void forward_mul_mat_id(ggml_compute_params * params, ggml_tensor * op) {
|
||||||
|
const ggml_tensor * src0 = op->src[0];
|
||||||
|
const ggml_tensor * src1 = op->src[1];
|
||||||
|
const ggml_tensor * ids = op->src[2];
|
||||||
|
ggml_tensor * dst = op;
|
||||||
|
|
||||||
|
GGML_TENSOR_BINARY_OP_LOCALS
|
||||||
|
|
||||||
|
const int ith = params->ith;
|
||||||
|
const int nth = params->nth;
|
||||||
|
|
||||||
|
const ggml_from_float_t from_float = ggml_get_type_traits_cpu(GGML_TYPE_Q8_0)->from_float;
|
||||||
|
|
||||||
|
// we don't support permuted src0 or src1
|
||||||
|
GGML_ASSERT(nb00 == ggml_type_size(src0->type));
|
||||||
|
GGML_ASSERT(nb10 == ggml_type_size(src1->type));
|
||||||
|
|
||||||
|
// dst cannot be transposed or permuted
|
||||||
|
GGML_ASSERT(nb0 == sizeof(float));
|
||||||
|
GGML_ASSERT(nb0 <= nb1);
|
||||||
|
GGML_ASSERT(nb1 <= nb2);
|
||||||
|
GGML_ASSERT(nb2 <= nb3);
|
||||||
|
|
||||||
|
GGML_ASSERT(ne03 == 1);
|
||||||
|
GGML_ASSERT(ne13 == 1);
|
||||||
|
GGML_ASSERT(ne3 == 1);
|
||||||
|
|
||||||
|
GGML_ASSERT(src1->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
// row groups
|
||||||
|
const int n_ids = ids->ne[0]; // n_expert_used
|
||||||
|
const int n_as = ne02; // n_expert
|
||||||
|
|
||||||
|
const size_t nbw1 = ggml_row_size(GGML_TYPE_Q8_0, ne10);
|
||||||
|
const size_t nbw2 = nbw1*ne11;
|
||||||
|
const size_t nbw3 = nbw2*ne12;
|
||||||
|
|
||||||
|
struct mmid_row_mapping {
|
||||||
|
int32_t i1;
|
||||||
|
int32_t i2;
|
||||||
|
};
|
||||||
|
|
||||||
|
GGML_ASSERT(params->wsize >= (GGML_PAD(nbw3, sizeof(int64_t)) + n_as * sizeof(int64_t) +
|
||||||
|
n_as * ne12 * sizeof(mmid_row_mapping)));
|
||||||
|
|
||||||
|
auto wdata = (char *) params->wdata;
|
||||||
|
auto wdata_src1_end = (char *) wdata + GGML_PAD(nbw3, sizeof(int64_t));
|
||||||
|
int64_t * matrix_row_counts = (int64_t *) (wdata_src1_end); // [n_as]
|
||||||
|
struct mmid_row_mapping * matrix_rows = (struct mmid_row_mapping *) (matrix_row_counts + n_as); // [n_as][ne12]
|
||||||
|
|
||||||
|
// src1: float32 => block_q8_0
|
||||||
|
for (int64_t i12 = 0; i12 < ne12; ++i12) {
|
||||||
|
for (int64_t i11 = ith; i11 < ne11; i11 += nth) {
|
||||||
|
from_float((float *)((char *) src1->data + i12 * nb12 + i11 * nb11),
|
||||||
|
(void *) (wdata + i12 * nbw2 + i11 * nbw1),
|
||||||
|
ne10);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id) * ne12 + (i1)]
|
||||||
|
|
||||||
|
if (ith == 0) {
|
||||||
|
// initialize matrix_row_counts
|
||||||
|
memset(matrix_row_counts, 0, n_as * sizeof(int64_t));
|
||||||
|
|
||||||
|
// group rows by src0 matrix
|
||||||
|
for (int32_t iid1 = 0; iid1 < ids->ne[1]; ++iid1) {
|
||||||
|
for (int32_t id = 0; id < n_ids; ++id) {
|
||||||
|
const int32_t i02 =
|
||||||
|
*(const int32_t *) ((const char *) ids->data + iid1 * ids->nb[1] + id * ids->nb[0]);
|
||||||
|
|
||||||
|
GGML_ASSERT(i02 >= 0 && i02 < n_as);
|
||||||
|
|
||||||
|
MMID_MATRIX_ROW(i02, matrix_row_counts[i02]) = { id, iid1 };
|
||||||
|
matrix_row_counts[i02] += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_barrier(params->threadpool);
|
||||||
|
|
||||||
|
// compute each matrix multiplication in sequence
|
||||||
|
for (int cur_a = 0; cur_a < n_as; ++cur_a) {
|
||||||
|
const int64_t cne1 = matrix_row_counts[cur_a];
|
||||||
|
|
||||||
|
if (cne1 == 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto src0_cur = (const char *) src0->data + cur_a*nb02;
|
||||||
|
|
||||||
|
//const int64_t nr0 = ne01; // src0 rows
|
||||||
|
const int64_t nr1 = cne1; // src1 rows
|
||||||
|
|
||||||
|
int64_t src0_cur_start = (ith * ne01) / nth;
|
||||||
|
int64_t src0_cur_end = ((ith + 1) * ne01) / nth;
|
||||||
|
src0_cur_start =
|
||||||
|
(src0_cur_start % NB_COLS) ? src0_cur_start + NB_COLS - (src0_cur_start % NB_COLS) : src0_cur_start;
|
||||||
|
src0_cur_end = (src0_cur_end % NB_COLS) ? src0_cur_end + NB_COLS - (src0_cur_end % NB_COLS) : src0_cur_end;
|
||||||
|
|
||||||
|
if (src0_cur_start >= src0_cur_end) return;
|
||||||
|
|
||||||
|
for (int ir1 = 0; ir1 < nr1; ir1++) {
|
||||||
|
struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, ir1);
|
||||||
|
const int id = row_mapping.i1; // selected expert index
|
||||||
|
|
||||||
|
const int64_t i11 = id % ne11;
|
||||||
|
const int64_t i12 = row_mapping.i2; // row index in src1
|
||||||
|
|
||||||
|
const int64_t i1 = id; // selected expert index
|
||||||
|
const int64_t i2 = i12; // row
|
||||||
|
|
||||||
|
auto src1_col = (const char *) wdata + (i11 * nbw1 + i12 * nbw2);
|
||||||
|
|
||||||
|
gemv<BLOC_TYPE, INTER_SIZE, NB_COLS>(
|
||||||
|
ne00, (float *)((char *) dst->data + (i1 * nb1 + i2 * nb2)) + src0_cur_start,
|
||||||
|
ne01, src0_cur + src0_cur_start * nb01,
|
||||||
|
src1_col, 1, src0_cur_end - src0_cur_start);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#undef MMID_MATRIX_ROW
|
||||||
|
}
|
||||||
|
|
||||||
|
int repack(struct ggml_tensor * t, const void * data, size_t data_size) override {
|
||||||
|
GGML_LOG_DEBUG("%s: repack tensor %s with %s_%dx%d\n", __func__, t->name, ggml_type_name(t->type),
|
||||||
|
(int) NB_COLS, (int) INTER_SIZE);
|
||||||
|
return ggml::cpu::aarch64::repack<BLOC_TYPE, INTER_SIZE, NB_COLS>(t, data, data_size);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// instance for Q4
|
||||||
|
static const tensor_traits<block_q4_0, 4, 4> q4_0_4x4_q8_0;
|
||||||
|
static const tensor_traits<block_q4_0, 8, 4> q4_0_4x8_q8_0;
|
||||||
|
static const tensor_traits<block_q4_0, 8, 8> q4_0_8x8_q8_0;
|
||||||
|
|
||||||
|
// instance for IQ4
|
||||||
|
static const tensor_traits<block_iq4_nl, 4, 4> iq4_nl_4x4_q8_0;
|
||||||
|
|
||||||
|
} // namespace ggml::cpu::aarch64
|
||||||
|
|
||||||
|
static const ggml::cpu::tensor_traits * ggml_aarch64_get_optimal_repack_type(const struct ggml_tensor * cur) {
|
||||||
if (cur->type == GGML_TYPE_Q4_0) {
|
if (cur->type == GGML_TYPE_Q4_0) {
|
||||||
// TODO: enable for AVX2 - currently disabled due to bad gemv performance
|
if (ggml_cpu_has_avx2() || (ggml_cpu_has_sve() && ggml_cpu_has_matmul_int8() && ggml_cpu_get_sve_cnt() == QK8_0)) {
|
||||||
if (/* ggml_cpu_has_avx2() || */ (ggml_cpu_has_sve() && ggml_cpu_has_matmul_int8() && ggml_cpu_get_sve_cnt() == QK8_0)) {
|
if (cur->ne[1] % 8 == 0) {
|
||||||
return GGML_TYPE_Q4_0_8_8;
|
return &ggml::cpu::aarch64::q4_0_8x8_q8_0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) {
|
if (ggml_cpu_has_neon() && ggml_cpu_has_matmul_int8()) {
|
||||||
return GGML_TYPE_Q4_0_4_8;
|
if (cur->ne[1] % 4 == 0) {
|
||||||
|
return &ggml::cpu::aarch64::q4_0_4x8_q8_0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) {
|
if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) {
|
||||||
return GGML_TYPE_Q4_0_4_4;
|
if (cur->ne[1] % 4 == 0) {
|
||||||
|
return &ggml::cpu::aarch64::q4_0_4x4_q8_0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else if (cur->type == GGML_TYPE_IQ4_NL) {
|
} else if (cur->type == GGML_TYPE_IQ4_NL) {
|
||||||
if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) {
|
if (ggml_cpu_has_neon() && ggml_cpu_has_dotprod()) {
|
||||||
return GGML_TYPE_IQ4_NL_4_4;
|
if (cur->ne[1] % 4 == 0) {
|
||||||
|
return &ggml::cpu::aarch64::iq4_nl_4x4_q8_0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return cur->type;
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_backend_cpu_aarch64_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
|
||||||
|
tensor->extra = (void *) const_cast<ggml::cpu::tensor_traits *>(ggml_aarch64_get_optimal_repack_type(tensor));
|
||||||
|
|
||||||
|
GGML_UNUSED(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_backend_cpu_aarch64_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor,
|
||||||
|
const void * data, size_t offset, size_t size) {
|
||||||
|
GGML_ASSERT(offset == 0);
|
||||||
|
GGML_ASSERT(size == ggml_nbytes(tensor));
|
||||||
|
|
||||||
|
auto tensor_traits = (ggml::cpu::aarch64::tensor_traits_base *) tensor->extra;
|
||||||
|
auto OK = tensor_traits->repack(tensor, data, size);
|
||||||
|
|
||||||
|
GGML_ASSERT(OK == 0);
|
||||||
|
GGML_UNUSED(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const char * ggml_backend_cpu_aarch64_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
|
||||||
|
return "CPU_AARCH64";
|
||||||
|
|
||||||
|
GGML_UNUSED(buft);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ggml_backend_buffer_t ggml_backend_cpu_aarch64_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
|
||||||
|
ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
|
||||||
|
|
||||||
|
if (buffer == nullptr) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer->buft = buft;
|
||||||
|
buffer->iface.init_tensor = ggml_backend_cpu_aarch64_buffer_init_tensor;
|
||||||
|
buffer->iface.set_tensor = ggml_backend_cpu_aarch64_buffer_set_tensor;
|
||||||
|
return buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
static size_t ggml_backend_cpu_aarch64_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
|
||||||
|
return TENSOR_ALIGNMENT;
|
||||||
|
|
||||||
|
GGML_UNUSED(buft);
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace ggml::cpu::aarch64 {
|
||||||
|
class extra_buffer_type : ggml::cpu::extra_buffer_type {
|
||||||
|
bool supports_op(ggml_backend_dev_t, const struct ggml_tensor * op) override {
|
||||||
|
if ( op->op == GGML_OP_MUL_MAT &&
|
||||||
|
op->src[0]->buffer &&
|
||||||
|
(ggml_n_dims(op->src[0]) == 2) &&
|
||||||
|
op->src[0]->buffer->buft == ggml_backend_cpu_aarch64_buffer_type() &&
|
||||||
|
ggml_aarch64_get_optimal_repack_type(op->src[0])
|
||||||
|
) {
|
||||||
|
if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (op->src[1]->type == GGML_TYPE_F32) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
//if (op->src[1]->type == GGML_TYPE_Q8_0) {
|
||||||
|
// return true;
|
||||||
|
//}
|
||||||
|
// may be possible if Q8_0 packed...
|
||||||
|
} else if (op->op == GGML_OP_MUL_MAT_ID
|
||||||
|
&& op->src[0]->buffer
|
||||||
|
&& (ggml_n_dims(op->src[0]) == 3)
|
||||||
|
&& op->src[0]->buffer->buft == ggml_backend_cpu_aarch64_buffer_type()
|
||||||
|
&& ggml_aarch64_get_optimal_repack_type(op->src[0])
|
||||||
|
) {
|
||||||
|
if (op->src[1]->buffer && !ggml_backend_buft_is_host(op->src[1]->buffer->buft)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (op->src[1]->type == GGML_TYPE_F32) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
//if (op->src[1]->type == GGML_TYPE_Q8_0) {
|
||||||
|
// return true;
|
||||||
|
//}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml::cpu::tensor_traits * get_tensor_traits(const struct ggml_tensor * op) override {
|
||||||
|
if (op->op == GGML_OP_MUL_MAT || op->op == GGML_OP_MUL_MAT_ID) {
|
||||||
|
if (op->src[0]->buffer && op->src[0]->buffer->buft == ggml_backend_cpu_aarch64_buffer_type()) {
|
||||||
|
return (ggml::cpu::tensor_traits *) op->src[0]->extra;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
} // namespace ggml::cpu::aarch64
|
||||||
|
|
||||||
|
ggml_backend_buffer_type_t ggml_backend_cpu_aarch64_buffer_type(void) {
|
||||||
|
static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_aarch64 = {
|
||||||
|
/* .iface = */ {
|
||||||
|
/* .get_name = */ ggml_backend_cpu_aarch64_buffer_type_get_name,
|
||||||
|
/* .alloc_buffer = */ ggml_backend_cpu_aarch64_buffer_type_alloc_buffer,
|
||||||
|
/* .get_alignment = */ ggml_backend_cpu_aarch64_buffer_type_get_alignment,
|
||||||
|
/* .get_max_size = */ nullptr, // defaults to SIZE_MAX
|
||||||
|
/* .get_alloc_size = */ nullptr, // defaults to ggml_nbytes
|
||||||
|
/* .is_host = */ nullptr,
|
||||||
|
},
|
||||||
|
/* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
|
||||||
|
/* .context = */ new ggml::cpu::aarch64::extra_buffer_type(),
|
||||||
|
};
|
||||||
|
|
||||||
|
return &ggml_backend_cpu_buffer_type_aarch64;
|
||||||
}
|
}
|
@ -1,32 +1,8 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include "ggml-cpu-traits.h"
|
||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
|
|
||||||
// GGML internal header
|
// GGML internal header
|
||||||
|
|
||||||
#ifdef __cplusplus
|
ggml_backend_buffer_type_t ggml_backend_cpu_aarch64_buffer_type(void);
|
||||||
extern "C" {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Quantization
|
|
||||||
void quantize_mat_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t nrows, int64_t n_per_row, int64_t blck_size_interleave);
|
|
||||||
|
|
||||||
// GEMV
|
|
||||||
void ggml_gemv_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc);
|
|
||||||
void ggml_gemv_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc);
|
|
||||||
void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc);
|
|
||||||
void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc);
|
|
||||||
|
|
||||||
// GEMM
|
|
||||||
void ggml_gemm_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc);
|
|
||||||
void ggml_gemm_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc);
|
|
||||||
void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc);
|
|
||||||
void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc);
|
|
||||||
|
|
||||||
void ggml_aarch64_repack_tensor(struct ggml_tensor * cur, enum ggml_type repack_type, const void * data, size_t data_size);
|
|
||||||
enum ggml_type ggml_aarch64_get_optimal_repack_type(const struct ggml_tensor * cur);
|
|
||||||
|
|
||||||
#ifdef __cplusplus
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
55
ggml/src/ggml-cpu/ggml-cpu-hbm.cpp
Normal file
55
ggml/src/ggml-cpu/ggml-cpu-hbm.cpp
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
#ifdef GGML_USE_CPU_HBM
|
||||||
|
|
||||||
|
#include "ggml-backend.h"
|
||||||
|
#include "ggml-backend-impl.h"
|
||||||
|
#include "ggml-cpu.h"
|
||||||
|
#include "ggml-impl.h"
|
||||||
|
|
||||||
|
#include "ggml-cpu-hbm.h"
|
||||||
|
|
||||||
|
// buffer type HBM
|
||||||
|
|
||||||
|
#include <hbwmalloc.h>
|
||||||
|
|
||||||
|
static const char * ggml_backend_cpu_hbm_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
|
||||||
|
return "CPU_HBM";
|
||||||
|
|
||||||
|
GGML_UNUSED(buft);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_backend_cpu_hbm_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
||||||
|
hbw_free(buffer->context);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ggml_backend_buffer_t ggml_backend_cpu_hbm_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft,
|
||||||
|
size_t size) {
|
||||||
|
void * ptr;
|
||||||
|
int result = hbw_posix_memalign(&ptr, ggml_backend_cpu_buffer_type_get_alignment(buft), size);
|
||||||
|
if (result != 0) {
|
||||||
|
GGML_LOG_ERROR("failed to allocate HBM buffer of size %zu\n", size);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);
|
||||||
|
buffer->buft = buft;
|
||||||
|
buffer->iface.free_buffer = ggml_backend_cpu_hbm_buffer_free_buffer;
|
||||||
|
|
||||||
|
return buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void) {
|
||||||
|
static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_hbm = {
|
||||||
|
/* .iface = */ {
|
||||||
|
/* .get_name = */ ggml_backend_cpu_hbm_buffer_type_get_name,
|
||||||
|
/* .alloc_buffer = */ ggml_backend_cpu_hbm_buffer_type_alloc_buffer,
|
||||||
|
/* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment,
|
||||||
|
/* .get_max_size = */ nullptr, // defaults to SIZE_MAX
|
||||||
|
/* .get_alloc_size = */ nullptr, // defaults to ggml_nbytes
|
||||||
|
/* .is_host = */ ggml_backend_cpu_buffer_type_is_host,
|
||||||
|
},
|
||||||
|
/* .context = */ nullptr,
|
||||||
|
};
|
||||||
|
|
||||||
|
return &ggml_backend_cpu_buffer_type_hbm;
|
||||||
|
}
|
||||||
|
#endif
|
8
ggml/src/ggml-cpu/ggml-cpu-hbm.h
Normal file
8
ggml/src/ggml-cpu/ggml-cpu-hbm.h
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "ggml-backend.h"
|
||||||
|
#include "ggml.h"
|
||||||
|
|
||||||
|
// GGML CPU internal header
|
||||||
|
|
||||||
|
ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void);
|
36
ggml/src/ggml-cpu/ggml-cpu-traits.cpp
Normal file
36
ggml/src/ggml-cpu/ggml-cpu-traits.cpp
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
#include "ggml-cpu-traits.h"
|
||||||
|
|
||||||
|
#include "ggml-backend-impl.h"
|
||||||
|
#include "ggml-backend.h"
|
||||||
|
|
||||||
|
namespace ggml::cpu {
|
||||||
|
tensor_traits::~tensor_traits() {}
|
||||||
|
|
||||||
|
extra_buffer_type::~extra_buffer_type() {}
|
||||||
|
} // namespace ggml::cpu
|
||||||
|
|
||||||
|
bool ggml_cpu_extra_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) {
|
||||||
|
for (auto extra : ggml_backend_cpu_get_extra_buffers_type()) {
|
||||||
|
if (extra && extra->context) {
|
||||||
|
auto buf_extra = (ggml::cpu::extra_buffer_type *) extra->context;
|
||||||
|
auto tensor_traits = buf_extra->get_tensor_traits(op);
|
||||||
|
if (tensor_traits && tensor_traits->compute_forward(params, op)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ggml_cpu_extra_work_size(int n_threads, const struct ggml_tensor * op, size_t * size) {
|
||||||
|
for (auto extra : ggml_backend_cpu_get_extra_buffers_type()) {
|
||||||
|
if (extra && extra->context) {
|
||||||
|
auto buf_extra = (ggml::cpu::extra_buffer_type *) extra->context;
|
||||||
|
auto tensor_traits = buf_extra->get_tensor_traits(op);
|
||||||
|
if (tensor_traits && tensor_traits->work_size(n_threads, op, *size)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
38
ggml/src/ggml-cpu/ggml-cpu-traits.h
Normal file
38
ggml/src/ggml-cpu/ggml-cpu-traits.h
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
#pragma once
|
||||||
|
#include "ggml-backend-impl.h"
|
||||||
|
#include "ggml-cpu-impl.h"
|
||||||
|
#include "ggml.h"
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
# include <vector>
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// return true if op part of extra "accelerator"
|
||||||
|
bool ggml_cpu_extra_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op);
|
||||||
|
bool ggml_cpu_extra_work_size(int n_threads, const struct ggml_tensor * op, size_t * size);
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace ggml::cpu {
|
||||||
|
// register in tensor->extra
|
||||||
|
class tensor_traits {
|
||||||
|
public:
|
||||||
|
virtual ~tensor_traits();
|
||||||
|
virtual bool work_size(int n_threads, const struct ggml_tensor * op, size_t & size) = 0;
|
||||||
|
virtual bool compute_forward(struct ggml_compute_params * params, struct ggml_tensor * op) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
class extra_buffer_type {
|
||||||
|
public:
|
||||||
|
virtual ~extra_buffer_type();
|
||||||
|
virtual bool supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) = 0;
|
||||||
|
virtual tensor_traits * get_tensor_traits(const struct ggml_tensor * op) = 0;
|
||||||
|
};
|
||||||
|
} // namespace ggml::cpu
|
||||||
|
|
||||||
|
// implemented in ggml-cpu.cpp.
|
||||||
|
std::vector<ggml_backend_buffer_type_t> & ggml_backend_cpu_get_extra_buffers_type();
|
||||||
|
|
||||||
|
#endif
|
@ -3,7 +3,7 @@
|
|||||||
|
|
||||||
#include "ggml-backend-impl.h"
|
#include "ggml-backend-impl.h"
|
||||||
#include "ggml-backend.h"
|
#include "ggml-backend.h"
|
||||||
#include "ggml-cpu-aarch64.h"
|
#include "ggml-cpu-traits.h"
|
||||||
#include "ggml-cpu-impl.h"
|
#include "ggml-cpu-impl.h"
|
||||||
#include "ggml-cpu.h"
|
#include "ggml-cpu.h"
|
||||||
#include "ggml-impl.h"
|
#include "ggml-impl.h"
|
||||||
@ -224,10 +224,6 @@ typedef void * thread_ret_t;
|
|||||||
|
|
||||||
typedef pthread_t ggml_thread_t;
|
typedef pthread_t ggml_thread_t;
|
||||||
|
|
||||||
#ifdef GGML_USE_CPU_HBM
|
|
||||||
#include <hbwmalloc.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(__APPLE__)
|
#if defined(__APPLE__)
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <mach/mach.h>
|
#include <mach/mach.h>
|
||||||
@ -301,7 +297,6 @@ static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = {
|
|||||||
},
|
},
|
||||||
[GGML_TYPE_Q8_0] = {
|
[GGML_TYPE_Q8_0] = {
|
||||||
.from_float = quantize_row_q8_0,
|
.from_float = quantize_row_q8_0,
|
||||||
.from_float_to_mat = quantize_mat_q8_0,
|
|
||||||
.vec_dot = ggml_vec_dot_q8_0_q8_0,
|
.vec_dot = ggml_vec_dot_q8_0_q8_0,
|
||||||
.vec_dot_type = GGML_TYPE_Q8_0,
|
.vec_dot_type = GGML_TYPE_Q8_0,
|
||||||
#if defined (__ARM_FEATURE_MATMUL_INT8)
|
#if defined (__ARM_FEATURE_MATMUL_INT8)
|
||||||
@ -409,33 +404,6 @@ static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = {
|
|||||||
.vec_dot_type = GGML_TYPE_BF16,
|
.vec_dot_type = GGML_TYPE_BF16,
|
||||||
.nrows = 1,
|
.nrows = 1,
|
||||||
},
|
},
|
||||||
[GGML_TYPE_Q4_0_4_4] = {
|
|
||||||
.from_float = NULL,
|
|
||||||
.vec_dot = NULL,
|
|
||||||
.vec_dot_type = GGML_TYPE_Q8_0,
|
|
||||||
.nrows = 1,
|
|
||||||
.ncols = 4,
|
|
||||||
.gemv = ggml_gemv_q4_0_4x4_q8_0,
|
|
||||||
.gemm = ggml_gemm_q4_0_4x4_q8_0,
|
|
||||||
},
|
|
||||||
[GGML_TYPE_Q4_0_4_8] = {
|
|
||||||
.from_float = NULL,
|
|
||||||
.vec_dot = NULL,
|
|
||||||
.vec_dot_type = GGML_TYPE_Q8_0,
|
|
||||||
.nrows = 1,
|
|
||||||
.ncols = 4,
|
|
||||||
.gemv = ggml_gemv_q4_0_4x8_q8_0,
|
|
||||||
.gemm = ggml_gemm_q4_0_4x8_q8_0,
|
|
||||||
},
|
|
||||||
[GGML_TYPE_Q4_0_8_8] = {
|
|
||||||
.from_float = NULL,
|
|
||||||
.vec_dot = NULL,
|
|
||||||
.vec_dot_type = GGML_TYPE_Q8_0,
|
|
||||||
.nrows = 1,
|
|
||||||
.ncols = 8,
|
|
||||||
.gemv = ggml_gemv_q4_0_8x8_q8_0,
|
|
||||||
.gemm = ggml_gemm_q4_0_8x8_q8_0,
|
|
||||||
},
|
|
||||||
[GGML_TYPE_TQ1_0] = {
|
[GGML_TYPE_TQ1_0] = {
|
||||||
.from_float = quantize_row_tq1_0,
|
.from_float = quantize_row_tq1_0,
|
||||||
.vec_dot = ggml_vec_dot_tq1_0_q8_K,
|
.vec_dot = ggml_vec_dot_tq1_0_q8_K,
|
||||||
@ -448,15 +416,6 @@ static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = {
|
|||||||
.vec_dot_type = GGML_TYPE_Q8_K,
|
.vec_dot_type = GGML_TYPE_Q8_K,
|
||||||
.nrows = 1,
|
.nrows = 1,
|
||||||
},
|
},
|
||||||
[GGML_TYPE_IQ4_NL_4_4] = {
|
|
||||||
.from_float = NULL,
|
|
||||||
.vec_dot = NULL,
|
|
||||||
.vec_dot_type = GGML_TYPE_Q8_0,
|
|
||||||
.nrows = 1,
|
|
||||||
.ncols = 4,
|
|
||||||
.gemv = ggml_gemv_iq4_nl_4x4_q8_0,
|
|
||||||
.gemm = ggml_gemm_iq4_nl_4x4_q8_0,
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct ggml_type_traits_cpu * ggml_get_type_traits_cpu(enum ggml_type type) {
|
const struct ggml_type_traits_cpu * ggml_get_type_traits_cpu(enum ggml_type type) {
|
||||||
@ -4509,9 +4468,6 @@ static void ggml_compute_forward_add(
|
|||||||
case GGML_TYPE_IQ4_XS:
|
case GGML_TYPE_IQ4_XS:
|
||||||
case GGML_TYPE_IQ3_S:
|
case GGML_TYPE_IQ3_S:
|
||||||
case GGML_TYPE_IQ2_S:
|
case GGML_TYPE_IQ2_S:
|
||||||
case GGML_TYPE_Q4_0_4_4:
|
|
||||||
case GGML_TYPE_Q4_0_4_8:
|
|
||||||
case GGML_TYPE_Q4_0_8_8:
|
|
||||||
{
|
{
|
||||||
ggml_compute_forward_add_q_f32(params, dst);
|
ggml_compute_forward_add_q_f32(params, dst);
|
||||||
} break;
|
} break;
|
||||||
@ -4889,9 +4845,6 @@ static void ggml_compute_forward_add1(
|
|||||||
case GGML_TYPE_IQ4_XS:
|
case GGML_TYPE_IQ4_XS:
|
||||||
case GGML_TYPE_IQ3_S:
|
case GGML_TYPE_IQ3_S:
|
||||||
case GGML_TYPE_IQ2_S:
|
case GGML_TYPE_IQ2_S:
|
||||||
case GGML_TYPE_Q4_0_4_4:
|
|
||||||
case GGML_TYPE_Q4_0_4_8:
|
|
||||||
case GGML_TYPE_Q4_0_8_8:
|
|
||||||
{
|
{
|
||||||
ggml_compute_forward_add1_q_f32(params, dst);
|
ggml_compute_forward_add1_q_f32(params, dst);
|
||||||
} break;
|
} break;
|
||||||
@ -5019,9 +4972,6 @@ static void ggml_compute_forward_acc(
|
|||||||
case GGML_TYPE_IQ4_XS:
|
case GGML_TYPE_IQ4_XS:
|
||||||
case GGML_TYPE_IQ3_S:
|
case GGML_TYPE_IQ3_S:
|
||||||
case GGML_TYPE_IQ2_S:
|
case GGML_TYPE_IQ2_S:
|
||||||
case GGML_TYPE_Q4_0_4_4:
|
|
||||||
case GGML_TYPE_Q4_0_4_8:
|
|
||||||
case GGML_TYPE_Q4_0_8_8:
|
|
||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
GGML_ABORT("fatal error");
|
GGML_ABORT("fatal error");
|
||||||
@ -7437,27 +7387,9 @@ static void ggml_compute_forward_mul_mat(
|
|||||||
const int ith = params->ith;
|
const int ith = params->ith;
|
||||||
const int nth = params->nth;
|
const int nth = params->nth;
|
||||||
|
|
||||||
enum ggml_type type = src0->type;
|
enum ggml_type const vec_dot_type = type_traits_cpu[src0->type].vec_dot_type;
|
||||||
|
|
||||||
if (src0->buffer && ggml_backend_cpu_buft_is_aarch64(src0->buffer->buft)) {
|
|
||||||
type = (enum ggml_type)(intptr_t)src0->extra;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
|
|
||||||
if (src0->buffer && ggml_backend_amx_buft_is_amx(src0->buffer->buft)) {
|
|
||||||
ggml_backend_amx_mul_mat(params, dst);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type;
|
|
||||||
ggml_from_float_t const from_float = type_traits_cpu[vec_dot_type].from_float;
|
ggml_from_float_t const from_float = type_traits_cpu[vec_dot_type].from_float;
|
||||||
ggml_from_float_to_mat_t const from_float_to_mat = type_traits_cpu[vec_dot_type].from_float_to_mat;
|
int64_t const vec_dot_num_rows = type_traits_cpu[src0->type].nrows;
|
||||||
int64_t const vec_dot_num_rows = type_traits_cpu[type].nrows;
|
|
||||||
int64_t const matmul_num_cols = type_traits_cpu[type].ncols;
|
|
||||||
int64_t const blck_size_interleave = ggml_get_type_traits(type)->blck_size_interleave;
|
|
||||||
ggml_gemv_t const gemv = type_traits_cpu[type].gemv;
|
|
||||||
ggml_gemm_t const gemm = type_traits_cpu[type].gemm;
|
|
||||||
|
|
||||||
GGML_ASSERT(ne0 == ne01);
|
GGML_ASSERT(ne0 == ne01);
|
||||||
GGML_ASSERT(ne1 == ne11);
|
GGML_ASSERT(ne1 == ne11);
|
||||||
@ -7465,7 +7397,7 @@ static void ggml_compute_forward_mul_mat(
|
|||||||
GGML_ASSERT(ne3 == ne13);
|
GGML_ASSERT(ne3 == ne13);
|
||||||
|
|
||||||
// we don't support permuted src0 or src1
|
// we don't support permuted src0 or src1
|
||||||
GGML_ASSERT(nb00 == ggml_type_size(type));
|
GGML_ASSERT(nb00 == ggml_type_size(src0->type));
|
||||||
GGML_ASSERT(nb10 == ggml_type_size(src1->type));
|
GGML_ASSERT(nb10 == ggml_type_size(src1->type));
|
||||||
|
|
||||||
// dst cannot be transposed or permuted
|
// dst cannot be transposed or permuted
|
||||||
@ -7477,6 +7409,7 @@ static void ggml_compute_forward_mul_mat(
|
|||||||
// nb01 >= nb00 - src0 is not transposed
|
// nb01 >= nb00 - src0 is not transposed
|
||||||
// compute by src0 rows
|
// compute by src0 rows
|
||||||
|
|
||||||
|
// TODO: extract to "extra_op"
|
||||||
#if GGML_USE_LLAMAFILE
|
#if GGML_USE_LLAMAFILE
|
||||||
// broadcast factors
|
// broadcast factors
|
||||||
const int64_t r2 = ne12 / ne02;
|
const int64_t r2 = ne12 / ne02;
|
||||||
@ -7487,15 +7420,15 @@ static void ggml_compute_forward_mul_mat(
|
|||||||
if (src1_cont) {
|
if (src1_cont) {
|
||||||
for (int64_t i13 = 0; i13 < ne13; i13++)
|
for (int64_t i13 = 0; i13 < ne13; i13++)
|
||||||
for (int64_t i12 = 0; i12 < ne12; i12++)
|
for (int64_t i12 = 0; i12 < ne12; i12++)
|
||||||
if (!llamafile_sgemm(ne01, ne11, ne00/ggml_blck_size(type),
|
if (!llamafile_sgemm(ne01, ne11, ne00/ggml_blck_size(src0->type),
|
||||||
(const char *)src0->data + i12/r2*nb02 + i13/r3*nb03,
|
(const char *)src0->data + i12/r2*nb02 + i13/r3*nb03,
|
||||||
nb01/ggml_type_size(type),
|
nb01/ggml_type_size(src0->type),
|
||||||
(const char *)src1->data + i12*nb12 + i13*nb13,
|
(const char *)src1->data + i12*nb12 + i13*nb13,
|
||||||
nb11/ggml_type_size(src1->type),
|
nb11/ggml_type_size(src1->type),
|
||||||
(char *)dst->data + i12*nb2 + i13*nb3,
|
(char *)dst->data + i12*nb2 + i13*nb3,
|
||||||
nb1/ggml_type_size(dst->type),
|
nb1/ggml_type_size(dst->type),
|
||||||
ith, nth,
|
ith, nth,
|
||||||
type,
|
src0->type,
|
||||||
src1->type,
|
src1->type,
|
||||||
dst->type))
|
dst->type))
|
||||||
goto UseGgmlGemm1;
|
goto UseGgmlGemm1;
|
||||||
@ -7516,16 +7449,7 @@ UseGgmlGemm1:;
|
|||||||
|
|
||||||
for (int64_t i13 = 0; i13 < ne13; ++i13) {
|
for (int64_t i13 = 0; i13 < ne13; ++i13) {
|
||||||
for (int64_t i12 = 0; i12 < ne12; ++i12) {
|
for (int64_t i12 = 0; i12 < ne12; ++i12) {
|
||||||
int64_t i11_processed = 0;
|
for (int64_t i11 = ith; i11 < ne11; i11 += nth) {
|
||||||
if ((ggml_n_dims(src1) == 2) && from_float_to_mat && gemm) {
|
|
||||||
for (int64_t i11 = ith * 4; i11 < ne11 - ne11 % 4; i11 += nth * 4) {
|
|
||||||
from_float_to_mat((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11),
|
|
||||||
(void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1),
|
|
||||||
4, ne10, blck_size_interleave);
|
|
||||||
}
|
|
||||||
i11_processed = ne11 - ne11 % 4;
|
|
||||||
}
|
|
||||||
for (int64_t i11 = i11_processed + ith; i11 < ne11; i11 += nth) {
|
|
||||||
from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11),
|
from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11),
|
||||||
(void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1),
|
(void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1),
|
||||||
ne10);
|
ne10);
|
||||||
@ -7548,15 +7472,15 @@ UseGgmlGemm1:;
|
|||||||
|
|
||||||
for (int64_t i13 = 0; i13 < ne13; i13++)
|
for (int64_t i13 = 0; i13 < ne13; i13++)
|
||||||
for (int64_t i12 = 0; i12 < ne12; i12++)
|
for (int64_t i12 = 0; i12 < ne12; i12++)
|
||||||
if (!llamafile_sgemm(ne01, ne11, ne00/ggml_blck_size(type),
|
if (!llamafile_sgemm(ne01, ne11, ne00/ggml_blck_size(src0->type),
|
||||||
(const char *)src0->data + i12/r2*nb02 + i13/r3*nb03,
|
(const char *)src0->data + i12/r2*nb02 + i13/r3*nb03,
|
||||||
nb01/ggml_type_size(type),
|
nb01/ggml_type_size(src0->type),
|
||||||
(const char *)wdata + (i12*ne11 + i13*ne12*ne11)*row_size,
|
(const char *)wdata + (i12*ne11 + i13*ne12*ne11)*row_size,
|
||||||
row_size/ggml_type_size(vec_dot_type),
|
row_size/ggml_type_size(vec_dot_type),
|
||||||
(char *)dst->data + i12*nb2 + i13*nb3,
|
(char *)dst->data + i12*nb2 + i13*nb3,
|
||||||
nb1/ggml_type_size(dst->type),
|
nb1/ggml_type_size(dst->type),
|
||||||
ith, nth,
|
ith, nth,
|
||||||
type,
|
src0->type,
|
||||||
vec_dot_type,
|
vec_dot_type,
|
||||||
dst->type))
|
dst->type))
|
||||||
goto UseGgmlGemm2;
|
goto UseGgmlGemm2;
|
||||||
@ -7598,28 +7522,6 @@ UseGgmlGemm2:;
|
|||||||
const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0;
|
const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0;
|
||||||
const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1;
|
const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1;
|
||||||
|
|
||||||
if ((ggml_n_dims(src0) == 2) && gemv) {
|
|
||||||
const void * src1_wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
|
|
||||||
const size_t src1_col_stride = ggml_is_contiguous(src1) || src1->type != vec_dot_type ? ggml_row_size(vec_dot_type, ne10) : nb11;
|
|
||||||
int64_t src0_start = (ith * ne01) / nth;
|
|
||||||
int64_t src0_end = ((ith + 1) * ne01) / nth;
|
|
||||||
src0_start = (src0_start % matmul_num_cols) ? src0_start + matmul_num_cols - (src0_start % matmul_num_cols): src0_start;
|
|
||||||
src0_end = (src0_end % matmul_num_cols) ? src0_end + matmul_num_cols - (src0_end % matmul_num_cols): src0_end;
|
|
||||||
if (src0_start >= src0_end) return;
|
|
||||||
|
|
||||||
// If there are more than three rows in src1, use gemm; otherwise, use gemv.
|
|
||||||
if (gemm && (ne11 > 3)) {
|
|
||||||
gemm(ne00, (float *)((char *) dst->data) + src0_start, ne01, (const char *) src0->data + src0_start * nb01,
|
|
||||||
(const char *) src1_wdata, ne11 - ne11 % 4, src0_end - src0_start);
|
|
||||||
}
|
|
||||||
for (int iter = gemm ? ne11 - ne11 % 4 : 0; iter < ne11; iter++) {
|
|
||||||
gemv(ne00, (float *)((char *) dst->data + (iter * nb1)) + src0_start, ne01,
|
|
||||||
(const char *) src0->data + src0_start * nb01, (const char *) src1_wdata + (src1_col_stride * iter), 1,
|
|
||||||
src0_end - src0_start);
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The first chunk comes from our thread_id, the rest will get auto-assigned.
|
// The first chunk comes from our thread_id, the rest will get auto-assigned.
|
||||||
int current_chunk = ith;
|
int current_chunk = ith;
|
||||||
|
|
||||||
@ -7642,7 +7544,7 @@ UseGgmlGemm2:;
|
|||||||
num_rows_per_vec_dot = 1;
|
num_rows_per_vec_dot = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
ggml_compute_forward_mul_mat_one_chunk(params, dst, type, num_rows_per_vec_dot, ir0_start, ir0_end, ir1_start, ir1_end);
|
ggml_compute_forward_mul_mat_one_chunk(params, dst, src0->type, num_rows_per_vec_dot, ir0_start, ir0_end, ir1_start, ir1_end);
|
||||||
|
|
||||||
if (nth >= nchunk0 * nchunk1) {
|
if (nth >= nchunk0 * nchunk1) {
|
||||||
break;
|
break;
|
||||||
@ -7674,8 +7576,6 @@ static void ggml_compute_forward_mul_mat_id(
|
|||||||
ggml_vec_dot_t const vec_dot = type_traits_cpu[type].vec_dot;
|
ggml_vec_dot_t const vec_dot = type_traits_cpu[type].vec_dot;
|
||||||
enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type;
|
enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type;
|
||||||
ggml_from_float_t const from_float = type_traits_cpu[vec_dot_type].from_float;
|
ggml_from_float_t const from_float = type_traits_cpu[vec_dot_type].from_float;
|
||||||
int64_t const matmul_num_cols = type_traits_cpu[type].ncols;
|
|
||||||
ggml_gemv_t const gemv = type_traits_cpu[type].gemv;
|
|
||||||
|
|
||||||
// we don't support permuted src0 or src1
|
// we don't support permuted src0 or src1
|
||||||
GGML_ASSERT(nb00 == ggml_type_size(type));
|
GGML_ASSERT(nb00 == ggml_type_size(type));
|
||||||
@ -7761,34 +7661,6 @@ static void ggml_compute_forward_mul_mat_id(
|
|||||||
const int64_t nr0 = ne01; // src0 rows
|
const int64_t nr0 = ne01; // src0 rows
|
||||||
const int64_t nr1 = cne1; // src1 rows
|
const int64_t nr1 = cne1; // src1 rows
|
||||||
|
|
||||||
if (((ggml_n_dims(src0) - 1) == 2) && gemv) {
|
|
||||||
int64_t src0_cur_start = (ith * ne01) / nth;
|
|
||||||
int64_t src0_cur_end = ((ith + 1) * ne01) / nth;
|
|
||||||
src0_cur_start = (src0_cur_start % matmul_num_cols) ? src0_cur_start + matmul_num_cols - (src0_cur_start % matmul_num_cols): src0_cur_start;
|
|
||||||
src0_cur_end = (src0_cur_end % matmul_num_cols) ? src0_cur_end + matmul_num_cols - (src0_cur_end % matmul_num_cols): src0_cur_end;
|
|
||||||
if (src0_cur_start >= src0_cur_end) return;
|
|
||||||
|
|
||||||
for (int ir1 = 0; ir1 < nr1; ir1++) {
|
|
||||||
struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, ir1);
|
|
||||||
const int id = row_mapping.i1; // selected expert index
|
|
||||||
|
|
||||||
const int64_t i11 = id % ne11;
|
|
||||||
const int64_t i12 = row_mapping.i2; // row index in src1
|
|
||||||
|
|
||||||
const int64_t i1 = id; // selected expert index
|
|
||||||
const int64_t i2 = i12; // row
|
|
||||||
|
|
||||||
const char * src1_col = (const char *) wdata +
|
|
||||||
(src1_cont || src1->type != vec_dot_type
|
|
||||||
? (i11 + i12 * ne11) * row_size
|
|
||||||
: (i11 * nb11 + i12 * nb12));
|
|
||||||
|
|
||||||
gemv(ne00, (float *)((char *) dst->data + (i1 * nb1 + i2 * nb2)) + src0_cur_start, ne01,
|
|
||||||
(const char *) src0_cur + src0_cur_start * nb01, src1_col, 1, src0_cur_end - src0_cur_start);
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// distribute the thread work across the inner or outer loop based on which one is larger
|
// distribute the thread work across the inner or outer loop based on which one is larger
|
||||||
|
|
||||||
const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
|
const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
|
||||||
@ -8096,9 +7968,6 @@ static void ggml_compute_forward_out_prod(
|
|||||||
case GGML_TYPE_IQ4_XS:
|
case GGML_TYPE_IQ4_XS:
|
||||||
case GGML_TYPE_IQ3_S:
|
case GGML_TYPE_IQ3_S:
|
||||||
case GGML_TYPE_IQ2_S:
|
case GGML_TYPE_IQ2_S:
|
||||||
case GGML_TYPE_Q4_0_4_4:
|
|
||||||
case GGML_TYPE_Q4_0_4_8:
|
|
||||||
case GGML_TYPE_Q4_0_8_8:
|
|
||||||
{
|
{
|
||||||
ggml_compute_forward_out_prod_q_f32(params, dst);
|
ggml_compute_forward_out_prod_q_f32(params, dst);
|
||||||
} break;
|
} break;
|
||||||
@ -8361,9 +8230,6 @@ static void ggml_compute_forward_set(
|
|||||||
case GGML_TYPE_IQ4_XS:
|
case GGML_TYPE_IQ4_XS:
|
||||||
case GGML_TYPE_IQ3_S:
|
case GGML_TYPE_IQ3_S:
|
||||||
case GGML_TYPE_IQ2_S:
|
case GGML_TYPE_IQ2_S:
|
||||||
case GGML_TYPE_Q4_0_4_4:
|
|
||||||
case GGML_TYPE_Q4_0_4_8:
|
|
||||||
case GGML_TYPE_Q4_0_8_8:
|
|
||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
GGML_ABORT("fatal error");
|
GGML_ABORT("fatal error");
|
||||||
@ -8625,9 +8491,6 @@ static void ggml_compute_forward_get_rows(
|
|||||||
case GGML_TYPE_IQ4_XS:
|
case GGML_TYPE_IQ4_XS:
|
||||||
case GGML_TYPE_IQ3_S:
|
case GGML_TYPE_IQ3_S:
|
||||||
case GGML_TYPE_IQ2_S:
|
case GGML_TYPE_IQ2_S:
|
||||||
case GGML_TYPE_Q4_0_4_4:
|
|
||||||
case GGML_TYPE_Q4_0_4_8:
|
|
||||||
case GGML_TYPE_Q4_0_8_8:
|
|
||||||
{
|
{
|
||||||
ggml_compute_forward_get_rows_q(params, dst);
|
ggml_compute_forward_get_rows_q(params, dst);
|
||||||
} break;
|
} break;
|
||||||
@ -9217,10 +9080,6 @@ static void ggml_compute_forward_clamp(
|
|||||||
case GGML_TYPE_IQ3_S:
|
case GGML_TYPE_IQ3_S:
|
||||||
case GGML_TYPE_IQ2_S:
|
case GGML_TYPE_IQ2_S:
|
||||||
case GGML_TYPE_Q8_K:
|
case GGML_TYPE_Q8_K:
|
||||||
case GGML_TYPE_Q4_0_4_4:
|
|
||||||
case GGML_TYPE_Q4_0_4_8:
|
|
||||||
case GGML_TYPE_Q4_0_8_8:
|
|
||||||
case GGML_TYPE_IQ4_NL_4_4:
|
|
||||||
case GGML_TYPE_I8:
|
case GGML_TYPE_I8:
|
||||||
case GGML_TYPE_I16:
|
case GGML_TYPE_I16:
|
||||||
case GGML_TYPE_I32:
|
case GGML_TYPE_I32:
|
||||||
@ -12426,6 +12285,9 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// extra_buffer op?
|
||||||
|
if (ggml_cpu_extra_compute_forward(params, tensor)) return;
|
||||||
|
|
||||||
switch (tensor->op) {
|
switch (tensor->op) {
|
||||||
case GGML_OP_DUP:
|
case GGML_OP_DUP:
|
||||||
{
|
{
|
||||||
@ -13373,6 +13235,8 @@ struct ggml_cplan ggml_graph_plan(
|
|||||||
|
|
||||||
size_t cur = 0;
|
size_t cur = 0;
|
||||||
|
|
||||||
|
if (!ggml_cpu_extra_work_size(n_threads, node, &cur)) {
|
||||||
|
|
||||||
switch (node->op) {
|
switch (node->op) {
|
||||||
case GGML_OP_CPY:
|
case GGML_OP_CPY:
|
||||||
case GGML_OP_DUP:
|
case GGML_OP_DUP:
|
||||||
@ -13403,16 +13267,10 @@ struct ggml_cplan ggml_graph_plan(
|
|||||||
} break;
|
} break;
|
||||||
case GGML_OP_MUL_MAT:
|
case GGML_OP_MUL_MAT:
|
||||||
{
|
{
|
||||||
#if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
|
|
||||||
if (node->src[0]->buffer && ggml_backend_amx_buft_is_amx(node->src[0]->buffer->buft)) {
|
|
||||||
cur = ggml_backend_amx_desired_wsize(node);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
const enum ggml_type vec_dot_type = type_traits_cpu[node->src[0]->type].vec_dot_type;
|
const enum ggml_type vec_dot_type = type_traits_cpu[node->src[0]->type].vec_dot_type;
|
||||||
|
|
||||||
if (node->src[1]->type != vec_dot_type) {
|
if (node->src[1]->type != vec_dot_type) {
|
||||||
size_t cur2 = ggml_row_size(vec_dot_type, ggml_nelements(node->src[1]));
|
cur = ggml_row_size(vec_dot_type, ggml_nelements(node->src[1]));
|
||||||
cur = MAX(cur, cur2);
|
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_MUL_MAT_ID:
|
case GGML_OP_MUL_MAT_ID:
|
||||||
@ -13449,7 +13307,6 @@ struct ggml_cplan ggml_graph_plan(
|
|||||||
const int64_t ne00 = node->src[0]->ne[0]; // K
|
const int64_t ne00 = node->src[0]->ne[0]; // K
|
||||||
const int64_t ne01 = node->src[0]->ne[1]; // Cout
|
const int64_t ne01 = node->src[0]->ne[1]; // Cout
|
||||||
const int64_t ne02 = node->src[0]->ne[2]; // Cin
|
const int64_t ne02 = node->src[0]->ne[2]; // Cin
|
||||||
|
|
||||||
const int64_t ne10 = node->src[1]->ne[0]; // L
|
const int64_t ne10 = node->src[1]->ne[0]; // L
|
||||||
const int64_t ne11 = node->src[1]->ne[1]; // Cin
|
const int64_t ne11 = node->src[1]->ne[1]; // Cin
|
||||||
|
|
||||||
@ -13514,6 +13371,7 @@ struct ggml_cplan ggml_graph_plan(
|
|||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
work_size = MAX(work_size, cur);
|
work_size = MAX(work_size, cur);
|
||||||
}
|
}
|
||||||
|
@ -2,12 +2,18 @@
|
|||||||
#include "ggml-backend-impl.h"
|
#include "ggml-backend-impl.h"
|
||||||
#include "ggml-cpu.h"
|
#include "ggml-cpu.h"
|
||||||
#include "ggml-cpu-aarch64.h"
|
#include "ggml-cpu-aarch64.h"
|
||||||
|
#include "ggml-cpu-traits.h"
|
||||||
#include "ggml-impl.h"
|
#include "ggml-impl.h"
|
||||||
#include "amx/amx.h"
|
#include "amx/amx.h"
|
||||||
|
|
||||||
#include <cctype>
|
#include <cctype>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
|
#ifdef GGML_USE_CPU_HBM
|
||||||
|
#include "ggml-cpu-hbm.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(__APPLE__)
|
#if defined(__APPLE__)
|
||||||
#include <sys/types.h>
|
#include <sys/types.h>
|
||||||
#include <sys/sysctl.h>
|
#include <sys/sysctl.h>
|
||||||
@ -23,115 +29,7 @@
|
|||||||
|
|
||||||
// ggml-backend interface
|
// ggml-backend interface
|
||||||
|
|
||||||
#ifdef GGML_USE_CPU_HBM
|
std::vector<ggml_backend_buffer_type_t>& ggml_backend_cpu_get_extra_buffers_type() {
|
||||||
|
|
||||||
// buffer type HBM
|
|
||||||
|
|
||||||
#include <hbwmalloc.h>
|
|
||||||
|
|
||||||
static const char * ggml_backend_cpu_hbm_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
|
|
||||||
return "CPU_HBM";
|
|
||||||
|
|
||||||
GGML_UNUSED(buft);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ggml_backend_cpu_hbm_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
|
||||||
hbw_free(buffer->context);
|
|
||||||
}
|
|
||||||
|
|
||||||
static ggml_backend_buffer_t ggml_backend_cpu_hbm_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
|
|
||||||
void * ptr;
|
|
||||||
int result = hbw_posix_memalign(&ptr, ggml_backend_cpu_buffer_type_get_alignment(buft), size);
|
|
||||||
if (result != 0) {
|
|
||||||
GGML_LOG_ERROR("failed to allocate HBM buffer of size %zu\n", size);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);
|
|
||||||
buffer->buft = buft;
|
|
||||||
buffer->iface.free_buffer = ggml_backend_cpu_hbm_buffer_free_buffer;
|
|
||||||
|
|
||||||
return buffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void) {
|
|
||||||
static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_hbm = {
|
|
||||||
/* .iface = */ {
|
|
||||||
/* .get_name = */ ggml_backend_cpu_hbm_buffer_type_get_name,
|
|
||||||
/* .alloc_buffer = */ ggml_backend_cpu_hbm_buffer_type_alloc_buffer,
|
|
||||||
/* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment,
|
|
||||||
/* .get_max_size = */ NULL, // defaults to SIZE_MAX
|
|
||||||
/* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
|
|
||||||
/* .is_host = */ ggml_backend_cpu_buffer_type_is_host,
|
|
||||||
},
|
|
||||||
/* .context = */ NULL,
|
|
||||||
};
|
|
||||||
|
|
||||||
return &ggml_backend_cpu_buffer_type_hbm;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// buffer type AARCH64
|
|
||||||
|
|
||||||
static void ggml_backend_cpu_aarch64_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
|
|
||||||
tensor->extra = (void *)ggml_aarch64_get_optimal_repack_type(tensor); // NOLINT
|
|
||||||
|
|
||||||
GGML_UNUSED(buffer);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ggml_backend_cpu_aarch64_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
|
|
||||||
GGML_ASSERT(offset == 0);
|
|
||||||
GGML_ASSERT(size == ggml_nbytes(tensor));
|
|
||||||
|
|
||||||
enum ggml_type repack_type = (enum ggml_type)(intptr_t)tensor->extra;
|
|
||||||
|
|
||||||
ggml_aarch64_repack_tensor(tensor, repack_type, data, size);
|
|
||||||
|
|
||||||
GGML_UNUSED(buffer);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const char * ggml_backend_cpu_aarch64_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
|
|
||||||
return "CPU_AARCH64";
|
|
||||||
|
|
||||||
GGML_UNUSED(buft);
|
|
||||||
}
|
|
||||||
|
|
||||||
static ggml_backend_buffer_t ggml_backend_cpu_aarch64_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
|
|
||||||
auto * buffer = ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
|
|
||||||
|
|
||||||
if (buffer == NULL) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer->buft = buft;
|
|
||||||
buffer->iface.init_tensor = ggml_backend_cpu_aarch64_buffer_init_tensor;
|
|
||||||
buffer->iface.set_tensor = ggml_backend_cpu_aarch64_buffer_set_tensor;
|
|
||||||
|
|
||||||
return buffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
ggml_backend_buffer_type_t ggml_backend_cpu_aarch64_buffer_type(void) {
|
|
||||||
static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type_aarch64 = {
|
|
||||||
/* .iface = */ {
|
|
||||||
/* .get_name = */ ggml_backend_cpu_aarch64_buffer_type_get_name,
|
|
||||||
/* .alloc_buffer = */ ggml_backend_cpu_aarch64_buffer_type_alloc_buffer,
|
|
||||||
/* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment,
|
|
||||||
/* .get_max_size = */ NULL, // defaults to SIZE_MAX
|
|
||||||
/* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
|
|
||||||
/* .is_host = */ NULL,
|
|
||||||
},
|
|
||||||
/* .device = */ ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
|
|
||||||
/* .context = */ NULL,
|
|
||||||
};
|
|
||||||
|
|
||||||
return &ggml_backend_cpu_buffer_type_aarch64;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ggml_backend_cpu_buft_is_aarch64(ggml_backend_buffer_type_t buft) {
|
|
||||||
return buft == ggml_backend_cpu_aarch64_buffer_type();
|
|
||||||
}
|
|
||||||
|
|
||||||
static ggml_backend_buffer_type_t * ggml_backend_cpu_get_extra_bufts(ggml_backend_dev_t device) {
|
|
||||||
static std::vector<ggml_backend_buffer_type_t> bufts = []() {
|
static std::vector<ggml_backend_buffer_type_t> bufts = []() {
|
||||||
std::vector<ggml_backend_buffer_type_t> bufts;
|
std::vector<ggml_backend_buffer_type_t> bufts;
|
||||||
|
|
||||||
@ -152,11 +50,22 @@ static ggml_backend_buffer_type_t * ggml_backend_cpu_get_extra_bufts(ggml_backen
|
|||||||
return bufts;
|
return bufts;
|
||||||
}();
|
}();
|
||||||
|
|
||||||
return bufts.data();
|
return bufts;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ggml_backend_buffer_type_t * ggml_backend_cpu_device_get_extra_buffers_type(ggml_backend_dev_t device) {
|
||||||
|
return ggml_backend_cpu_get_extra_buffers_type().data();
|
||||||
|
|
||||||
GGML_UNUSED(device);
|
GGML_UNUSED(device);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool ggml_backend_cpu_is_extra_buffer_type(ggml_backend_buffer_type_t buft) {
|
||||||
|
for (auto extra : ggml_backend_cpu_get_extra_buffers_type()) {
|
||||||
|
if (extra && extra == buft) return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
// CPU backend - backend (stream)
|
// CPU backend - backend (stream)
|
||||||
|
|
||||||
struct ggml_backend_cpu_context {
|
struct ggml_backend_cpu_context {
|
||||||
@ -465,25 +374,19 @@ static bool ggml_backend_cpu_device_supports_op(ggml_backend_dev_t dev, const st
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (src0 && src0->buffer && ggml_backend_cpu_buft_is_aarch64(src0->buffer->buft)) {
|
// extra_buffer_op?
|
||||||
if (op->op != GGML_OP_MUL_MAT || src0->type == ggml_aarch64_get_optimal_repack_type(src0)) {
|
for (auto extra : ggml_backend_cpu_get_extra_buffers_type()) {
|
||||||
return false;
|
if (extra) {
|
||||||
|
auto buf_extra = (ggml::cpu::extra_buffer_type*) extra->context;
|
||||||
|
if (buf_extra && buf_extra->supports_op(dev, op)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
|
// the other case need host buffer.
|
||||||
if (src0 && src0->buffer && ggml_backend_amx_buft_is_amx(src0->buffer->buft)) {
|
for (int i = 0; i < GGML_MAX_SRC; i++) {
|
||||||
return ggml_backend_amx_device_supports_op(op);
|
if (op->src[i] && op->src[i]->buffer && !ggml_backend_buft_is_host(op->src[i]->buffer->buft)) {
|
||||||
}
|
|
||||||
for (int i = 1; i < GGML_MAX_SRC; i++) {
|
|
||||||
if (op->src[i] && op->src[i]->buffer && ggml_backend_amx_buft_is_amx(op->src[i]->buffer->buft)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
for (int i = 1; i < GGML_MAX_SRC; i++) {
|
|
||||||
if (op->src[i] && op->src[i]->buffer && ggml_backend_cpu_buft_is_aarch64(op->src[i]->buffer->buft)) {
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -506,19 +409,10 @@ static bool ggml_backend_cpu_device_supports_op(ggml_backend_dev_t dev, const st
|
|||||||
default:
|
default:
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
GGML_UNUSED(dev);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ggml_backend_cpu_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
|
static bool ggml_backend_cpu_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
|
||||||
bool supported = ggml_backend_buft_is_host(buft) || ggml_backend_cpu_buft_is_aarch64(buft);
|
return ggml_backend_buft_is_host(buft) || ggml_backend_cpu_is_extra_buffer_type(buft);
|
||||||
|
|
||||||
#if defined(__AMX_INT8__) && defined(__AVX512VNNI__)
|
|
||||||
supported = supported || ggml_backend_amx_buft_is_amx(buft);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return supported;
|
|
||||||
|
|
||||||
GGML_UNUSED(dev);
|
GGML_UNUSED(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -666,10 +560,12 @@ static ggml_backend_feature * ggml_backend_cpu_get_features(ggml_backend_reg_t r
|
|||||||
|
|
||||||
static void * ggml_backend_cpu_get_proc_address(ggml_backend_reg_t reg, const char * name) {
|
static void * ggml_backend_cpu_get_proc_address(ggml_backend_reg_t reg, const char * name) {
|
||||||
if (strcmp(name, "ggml_backend_set_n_threads") == 0) {
|
if (strcmp(name, "ggml_backend_set_n_threads") == 0) {
|
||||||
return (void *)ggml_backend_cpu_set_n_threads;
|
ggml_backend_set_n_threads_t fct = ggml_backend_cpu_set_n_threads;
|
||||||
|
return (void *)fct;
|
||||||
}
|
}
|
||||||
if (strcmp(name, "ggml_backend_dev_get_extra_bufts") == 0) {
|
if (strcmp(name, "ggml_backend_dev_get_extra_bufts") == 0) {
|
||||||
return (void *)ggml_backend_cpu_get_extra_bufts;
|
ggml_backend_dev_get_extra_bufts_t fct = ggml_backend_cpu_device_get_extra_buffers_type;
|
||||||
|
return (void *)fct;
|
||||||
}
|
}
|
||||||
if (strcmp(name, "ggml_backend_get_features") == 0) {
|
if (strcmp(name, "ggml_backend_get_features") == 0) {
|
||||||
return (void *)ggml_backend_cpu_get_features;
|
return (void *)ggml_backend_cpu_get_features;
|
||||||
|
@ -3210,7 +3210,7 @@ static void * ggml_backend_cuda_reg_get_proc_address(ggml_backend_reg_t reg, con
|
|||||||
static const ggml_backend_reg_i ggml_backend_cuda_reg_interface = {
|
static const ggml_backend_reg_i ggml_backend_cuda_reg_interface = {
|
||||||
/* .get_name = */ ggml_backend_cuda_reg_get_name,
|
/* .get_name = */ ggml_backend_cuda_reg_get_name,
|
||||||
/* .get_device_count = */ ggml_backend_cuda_reg_get_device_count,
|
/* .get_device_count = */ ggml_backend_cuda_reg_get_device_count,
|
||||||
/* .get_device_get = */ ggml_backend_cuda_reg_get_device,
|
/* .get_device = */ ggml_backend_cuda_reg_get_device,
|
||||||
/* .get_proc_address = */ ggml_backend_cuda_reg_get_proc_address,
|
/* .get_proc_address = */ ggml_backend_cuda_reg_get_proc_address,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -5220,15 +5220,6 @@ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbyte
|
|||||||
{
|
{
|
||||||
VALIDATE_ROW_DATA_D_F16_IMPL(block_iq4_nl, data, nb);
|
VALIDATE_ROW_DATA_D_F16_IMPL(block_iq4_nl, data, nb);
|
||||||
} break;
|
} break;
|
||||||
case GGML_TYPE_Q4_0_4_4:
|
|
||||||
case GGML_TYPE_Q4_0_4_8:
|
|
||||||
{
|
|
||||||
VALIDATE_ROW_DATA_DVEC_F16_IMPL(block_q4_0x4, data, nbytes / sizeof(block_q4_0x4), 4);
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_Q4_0_8_8:
|
|
||||||
{
|
|
||||||
VALIDATE_ROW_DATA_DVEC_F16_IMPL(block_q4_0x8, data, nbytes / sizeof(block_q4_0x8), 8);
|
|
||||||
} break;
|
|
||||||
|
|
||||||
case GGML_TYPE_I8:
|
case GGML_TYPE_I8:
|
||||||
case GGML_TYPE_I16:
|
case GGML_TYPE_I16:
|
||||||
|
@ -4630,7 +4630,7 @@ static void *ggml_backend_sycl_reg_get_proc_address(ggml_backend_reg_t reg, cons
|
|||||||
static const ggml_backend_reg_i ggml_backend_sycl_reg_interface = {
|
static const ggml_backend_reg_i ggml_backend_sycl_reg_interface = {
|
||||||
/* .get_name = */ ggml_backend_sycl_reg_get_name,
|
/* .get_name = */ ggml_backend_sycl_reg_get_name,
|
||||||
/* .get_device_count = */ ggml_backend_sycl_reg_get_device_count,
|
/* .get_device_count = */ ggml_backend_sycl_reg_get_device_count,
|
||||||
/* .get_device_get = */ ggml_backend_sycl_reg_get_device,
|
/* .get_device = */ ggml_backend_sycl_reg_get_device,
|
||||||
/* .get_proc_address = */ ggml_backend_sycl_reg_get_proc_address,
|
/* .get_proc_address = */ ggml_backend_sycl_reg_get_proc_address,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -8,7 +8,10 @@
|
|||||||
|
|
||||||
// FIXME: required here for quantization functions
|
// FIXME: required here for quantization functions
|
||||||
#include "ggml-quants.h"
|
#include "ggml-quants.h"
|
||||||
#include "ggml-aarch64.h"
|
|
||||||
|
#ifdef GGML_USE_CPU_HBM
|
||||||
|
#include <hbwmalloc.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(_MSC_VER) || defined(__MINGW32__)
|
#if defined(_MSC_VER) || defined(__MINGW32__)
|
||||||
#include <malloc.h> // using malloc.h with MSC/MINGW
|
#include <malloc.h> // using malloc.h with MSC/MINGW
|
||||||
@ -788,32 +791,23 @@ static const struct ggml_type_traits type_traits[GGML_TYPE_COUNT] = {
|
|||||||
.to_float = (ggml_to_float_t) ggml_bf16_to_fp32_row,
|
.to_float = (ggml_to_float_t) ggml_bf16_to_fp32_row,
|
||||||
.from_float_ref = (ggml_from_float_t) ggml_fp32_to_bf16_row_ref,
|
.from_float_ref = (ggml_from_float_t) ggml_fp32_to_bf16_row_ref,
|
||||||
},
|
},
|
||||||
[GGML_TYPE_Q4_0_4_4] = {
|
[31] = { // GGML_TYPE_Q4_0_4_4
|
||||||
.type_name = "q4_0_4x4",
|
.type_name = "TYPE_Q4_0_4_4 REMOVED, use Q4_0 with runtime repacking",
|
||||||
.blck_size = QK4_0,
|
.blck_size = 0,
|
||||||
.blck_size_interleave = 4,
|
.type_size = 0,
|
||||||
.type_size = sizeof(block_q4_0),
|
.is_quantized = false,
|
||||||
.is_quantized = true,
|
|
||||||
.to_float = NULL,
|
|
||||||
.from_float_ref = NULL,
|
|
||||||
},
|
},
|
||||||
[GGML_TYPE_Q4_0_4_8] = {
|
[32] = { // GGML_TYPE_Q4_0_4_8
|
||||||
.type_name = "q4_0_4x8",
|
.type_name = "TYPE_Q4_0_4_8 REMOVED, use Q4_0 with runtime repacking",
|
||||||
.blck_size = QK4_0,
|
.blck_size = 0,
|
||||||
.blck_size_interleave = 8,
|
.type_size = 0,
|
||||||
.type_size = sizeof(block_q4_0),
|
.is_quantized = false,
|
||||||
.is_quantized = true,
|
|
||||||
.to_float = NULL,
|
|
||||||
.from_float_ref = NULL,
|
|
||||||
},
|
},
|
||||||
[GGML_TYPE_Q4_0_8_8] = {
|
[33] = { // GGML_TYPE_Q4_0_8_8
|
||||||
.type_name = "q4_0_8x8",
|
.type_name = "TYPE_Q4_0_8_8 REMOVED, use Q4_0 with runtime repacking",
|
||||||
.blck_size = QK4_0,
|
.blck_size = 0,
|
||||||
.blck_size_interleave = 8,
|
.type_size = 0,
|
||||||
.type_size = sizeof(block_q4_0),
|
.is_quantized = false,
|
||||||
.is_quantized = true,
|
|
||||||
.to_float = NULL,
|
|
||||||
.from_float_ref = NULL,
|
|
||||||
},
|
},
|
||||||
[GGML_TYPE_TQ1_0] = {
|
[GGML_TYPE_TQ1_0] = {
|
||||||
.type_name = "tq1_0",
|
.type_name = "tq1_0",
|
||||||
@ -831,14 +825,23 @@ static const struct ggml_type_traits type_traits[GGML_TYPE_COUNT] = {
|
|||||||
.to_float = (ggml_to_float_t) dequantize_row_tq2_0,
|
.to_float = (ggml_to_float_t) dequantize_row_tq2_0,
|
||||||
.from_float_ref = (ggml_from_float_t) quantize_row_tq2_0_ref,
|
.from_float_ref = (ggml_from_float_t) quantize_row_tq2_0_ref,
|
||||||
},
|
},
|
||||||
[GGML_TYPE_IQ4_NL_4_4] = {
|
[36] = { // GGML_TYPE_IQ4_NL_4_4
|
||||||
.type_name = "iq4_nl_4x4",
|
.type_name = "TYPE_IQ4_NL_4_4 REMOVED, use IQ4_NL with runtime repacking",
|
||||||
.blck_size = QK4_NL,
|
.blck_size = 0,
|
||||||
.blck_size_interleave = 4,
|
.type_size = 0,
|
||||||
.type_size = sizeof(block_iq4_nl),
|
.is_quantized = false,
|
||||||
.is_quantized = true,
|
},
|
||||||
.to_float = NULL,
|
[37] = { // GGML_TYPE_IQ4_NL_4_8
|
||||||
.from_float_ref = NULL,
|
.type_name = "TYPE_IQ4_NL_4_8 REMOVED, use IQ4_NL with runtime repacking",
|
||||||
|
.blck_size = 0,
|
||||||
|
.type_size = 0,
|
||||||
|
.is_quantized = false,
|
||||||
|
},
|
||||||
|
[38] = { // GGML_TYPE_IQ4_NL_8_8
|
||||||
|
.type_name = "TYPE_IQ4_NL_8_8 REMOVED, use IQ4_NL with runtime repacking",
|
||||||
|
.blck_size = 0,
|
||||||
|
.type_size = 0,
|
||||||
|
.is_quantized = false,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1270,9 +1273,6 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
|
|||||||
case GGML_FTYPE_MOSTLY_IQ4_XS: wtype = GGML_TYPE_IQ4_XS; break;
|
case GGML_FTYPE_MOSTLY_IQ4_XS: wtype = GGML_TYPE_IQ4_XS; break;
|
||||||
case GGML_FTYPE_MOSTLY_IQ3_S: wtype = GGML_TYPE_IQ3_S; break;
|
case GGML_FTYPE_MOSTLY_IQ3_S: wtype = GGML_TYPE_IQ3_S; break;
|
||||||
case GGML_FTYPE_MOSTLY_IQ2_S: wtype = GGML_TYPE_IQ2_S; break;
|
case GGML_FTYPE_MOSTLY_IQ2_S: wtype = GGML_TYPE_IQ2_S; break;
|
||||||
case GGML_FTYPE_MOSTLY_Q4_0_4_4: wtype = GGML_TYPE_Q4_0_4_4; break;
|
|
||||||
case GGML_FTYPE_MOSTLY_Q4_0_4_8: wtype = GGML_TYPE_Q4_0_4_8; break;
|
|
||||||
case GGML_FTYPE_MOSTLY_Q4_0_8_8: wtype = GGML_TYPE_Q4_0_8_8; break;
|
|
||||||
case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
|
case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
|
||||||
case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
|
case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
|
||||||
}
|
}
|
||||||
@ -6304,9 +6304,6 @@ size_t ggml_quantize_chunk(
|
|||||||
case GGML_TYPE_IQ1_M: result = quantize_iq1_m (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
|
case GGML_TYPE_IQ1_M: result = quantize_iq1_m (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
|
||||||
case GGML_TYPE_IQ4_NL: result = quantize_iq4_nl (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
|
case GGML_TYPE_IQ4_NL: result = quantize_iq4_nl (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
|
||||||
case GGML_TYPE_IQ4_XS: result = quantize_iq4_xs (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
|
case GGML_TYPE_IQ4_XS: result = quantize_iq4_xs (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
|
||||||
case GGML_TYPE_Q4_0_4_4: result = quantize_q4_0_4x4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
|
|
||||||
case GGML_TYPE_Q4_0_4_8: result = quantize_q4_0_4x8(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
|
|
||||||
case GGML_TYPE_Q4_0_8_8: result = quantize_q4_0_8x8(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
|
|
||||||
case GGML_TYPE_F16:
|
case GGML_TYPE_F16:
|
||||||
{
|
{
|
||||||
size_t elemsize = sizeof(ggml_fp16_t);
|
size_t elemsize = sizeof(ggml_fp16_t);
|
||||||
@ -6838,7 +6835,16 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
|
|||||||
(int64_t) info->ne[2] *
|
(int64_t) info->ne[2] *
|
||||||
(int64_t) info->ne[3];
|
(int64_t) info->ne[3];
|
||||||
|
|
||||||
if (ggml_blck_size(info->type) == 0 || ne % ggml_blck_size(info->type) != 0) {
|
if (ggml_blck_size(info->type) == 0 ) {
|
||||||
|
// this tensor type support have been removed:
|
||||||
|
fprintf(stderr, "%s: tensor '%s' of type %d: %s\n",
|
||||||
|
__func__, info->name.data, (int) info->type, ggml_type_name(info->type));
|
||||||
|
fclose(file);
|
||||||
|
gguf_free(ctx);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ne % ggml_blck_size(info->type) != 0) {
|
||||||
fprintf(stderr, "%s: tensor '%s' of type %d (%s) number of elements (%" PRId64 ") is not a multiple of block size (%" PRId64 ")\n",
|
fprintf(stderr, "%s: tensor '%s' of type %d (%s) number of elements (%" PRId64 ") is not a multiple of block size (%" PRId64 ")\n",
|
||||||
__func__, info->name.data, (int) info->type, ggml_type_name(info->type), ne, ggml_blck_size(info->type));
|
__func__, info->name.data, (int) info->type, ggml_type_name(info->type), ne, ggml_blck_size(info->type));
|
||||||
fclose(file);
|
fclose(file);
|
||||||
|
@ -1432,9 +1432,6 @@ class GGMLQuantizationType(IntEnum):
|
|||||||
F64 = 28
|
F64 = 28
|
||||||
IQ1_M = 29
|
IQ1_M = 29
|
||||||
BF16 = 30
|
BF16 = 30
|
||||||
Q4_0_4_4 = 31
|
|
||||||
Q4_0_4_8 = 32
|
|
||||||
Q4_0_8_8 = 33
|
|
||||||
TQ1_0 = 34
|
TQ1_0 = 34
|
||||||
TQ2_0 = 35
|
TQ2_0 = 35
|
||||||
|
|
||||||
@ -1478,9 +1475,9 @@ class LlamaFileType(IntEnum):
|
|||||||
MOSTLY_IQ4_XS = 30 # except 1d tensors
|
MOSTLY_IQ4_XS = 30 # except 1d tensors
|
||||||
MOSTLY_IQ1_M = 31 # except 1d tensors
|
MOSTLY_IQ1_M = 31 # except 1d tensors
|
||||||
MOSTLY_BF16 = 32 # except 1d tensors
|
MOSTLY_BF16 = 32 # except 1d tensors
|
||||||
MOSTLY_Q4_0_4_4 = 33 # except 1d tensors
|
# MOSTLY_Q4_0_4_4 = 33 # removed from gguf files, use Q4_0 and runtime repack
|
||||||
MOSTLY_Q4_0_4_8 = 34 # except 1d tensors
|
# MOSTLY_Q4_0_4_8 = 34 # removed from gguf files, use Q4_0 and runtime repack
|
||||||
MOSTLY_Q4_0_8_8 = 35 # except 1d tensors
|
# MOSTLY_Q4_0_8_8 = 35 # removed from gguf files, use Q4_0 and runtime repack
|
||||||
MOSTLY_TQ1_0 = 36 # except 1d tensors
|
MOSTLY_TQ1_0 = 36 # except 1d tensors
|
||||||
MOSTLY_TQ2_0 = 37 # except 1d tensors
|
MOSTLY_TQ2_0 = 37 # except 1d tensors
|
||||||
|
|
||||||
@ -1556,9 +1553,6 @@ GGML_QUANT_SIZES: dict[GGMLQuantizationType, tuple[int, int]] = {
|
|||||||
GGMLQuantizationType.F64: (1, 8),
|
GGMLQuantizationType.F64: (1, 8),
|
||||||
GGMLQuantizationType.IQ1_M: (256, QK_K // 8 + QK_K // 16 + QK_K // 32),
|
GGMLQuantizationType.IQ1_M: (256, QK_K // 8 + QK_K // 16 + QK_K // 32),
|
||||||
GGMLQuantizationType.BF16: (1, 2),
|
GGMLQuantizationType.BF16: (1, 2),
|
||||||
GGMLQuantizationType.Q4_0_4_4:(32, 2 + 16),
|
|
||||||
GGMLQuantizationType.Q4_0_4_8:(32, 2 + 16),
|
|
||||||
GGMLQuantizationType.Q4_0_8_8:(32, 2 + 16),
|
|
||||||
GGMLQuantizationType.TQ1_0: (256, 2 + 4 * 13),
|
GGMLQuantizationType.TQ1_0: (256, 2 + 4 * 13),
|
||||||
GGMLQuantizationType.TQ2_0: (256, 2 + 64),
|
GGMLQuantizationType.TQ2_0: (256, 2 + 64),
|
||||||
}
|
}
|
||||||
|
@ -172,9 +172,9 @@ extern "C" {
|
|||||||
LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_IQ4_XS = 30, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_IQ1_M = 31, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_BF16 = 32, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_BF16 = 32, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33, // except 1d tensors
|
//LLAMA_FTYPE_MOSTLY_Q4_0_4_4 = 33, // removed from gguf files, use Q4_0 and runtime repack
|
||||||
LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34, // except 1d tensors
|
//LLAMA_FTYPE_MOSTLY_Q4_0_4_8 = 34, // removed from gguf files, use Q4_0 and runtime repack
|
||||||
LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // except 1d tensors
|
//LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // removed from gguf files, use Q4_0 and runtime repack
|
||||||
LLAMA_FTYPE_MOSTLY_TQ1_0 = 36, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_TQ1_0 = 36, // except 1d tensors
|
||||||
LLAMA_FTYPE_MOSTLY_TQ2_0 = 37, // except 1d tensors
|
LLAMA_FTYPE_MOSTLY_TQ2_0 = 37, // except 1d tensors
|
||||||
|
|
||||||
|
@ -4578,9 +4578,6 @@ struct llama_model_loader {
|
|||||||
case GGML_TYPE_IQ4_NL: ftype = LLAMA_FTYPE_MOSTLY_IQ4_NL; break;
|
case GGML_TYPE_IQ4_NL: ftype = LLAMA_FTYPE_MOSTLY_IQ4_NL; break;
|
||||||
case GGML_TYPE_IQ4_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ4_XS; break;
|
case GGML_TYPE_IQ4_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ4_XS; break;
|
||||||
case GGML_TYPE_IQ3_S: ftype = LLAMA_FTYPE_MOSTLY_IQ3_S; break;
|
case GGML_TYPE_IQ3_S: ftype = LLAMA_FTYPE_MOSTLY_IQ3_S; break;
|
||||||
case GGML_TYPE_Q4_0_4_4: ftype = LLAMA_FTYPE_MOSTLY_Q4_0_4_4; break;
|
|
||||||
case GGML_TYPE_Q4_0_4_8: ftype = LLAMA_FTYPE_MOSTLY_Q4_0_4_8; break;
|
|
||||||
case GGML_TYPE_Q4_0_8_8: ftype = LLAMA_FTYPE_MOSTLY_Q4_0_8_8; break;
|
|
||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max));
|
LLAMA_LOG_WARN("%s: unknown type %s\n", __func__, ggml_type_name(type_max));
|
||||||
@ -5344,9 +5341,6 @@ static std::string llama_model_ftype_name(llama_ftype ftype) {
|
|||||||
case LLAMA_FTYPE_MOSTLY_IQ4_XS: return "IQ4_XS - 4.25 bpw";
|
case LLAMA_FTYPE_MOSTLY_IQ4_XS: return "IQ4_XS - 4.25 bpw";
|
||||||
case LLAMA_FTYPE_MOSTLY_IQ3_S: return "IQ3_S - 3.4375 bpw";
|
case LLAMA_FTYPE_MOSTLY_IQ3_S: return "IQ3_S - 3.4375 bpw";
|
||||||
case LLAMA_FTYPE_MOSTLY_IQ3_M: return "IQ3_S mix - 3.66 bpw";
|
case LLAMA_FTYPE_MOSTLY_IQ3_M: return "IQ3_S mix - 3.66 bpw";
|
||||||
case LLAMA_FTYPE_MOSTLY_Q4_0_4_4: return "Q4_0_4_4";
|
|
||||||
case LLAMA_FTYPE_MOSTLY_Q4_0_4_8: return "Q4_0_4_8";
|
|
||||||
case LLAMA_FTYPE_MOSTLY_Q4_0_8_8: return "Q4_0_8_8";
|
|
||||||
|
|
||||||
default: return "unknown, may not work";
|
default: return "unknown, may not work";
|
||||||
}
|
}
|
||||||
@ -18367,10 +18361,6 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
|
|||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
|
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS) {
|
||||||
new_type = GGML_TYPE_IQ3_S;
|
new_type = GGML_TYPE_IQ3_S;
|
||||||
}
|
}
|
||||||
else if (new_type == GGML_TYPE_Q4_0_4_4 || new_type == GGML_TYPE_Q4_0_4_8 ||
|
|
||||||
new_type == GGML_TYPE_Q4_0_8_8) {
|
|
||||||
new_type = GGML_TYPE_Q4_0;
|
|
||||||
}
|
|
||||||
else if (ftype == LLAMA_FTYPE_MOSTLY_TQ1_0 || ftype == LLAMA_FTYPE_MOSTLY_TQ2_0) {
|
else if (ftype == LLAMA_FTYPE_MOSTLY_TQ1_0 || ftype == LLAMA_FTYPE_MOSTLY_TQ2_0) {
|
||||||
new_type = GGML_TYPE_Q4_K;
|
new_type = GGML_TYPE_Q4_K;
|
||||||
}
|
}
|
||||||
@ -18693,9 +18683,6 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
case LLAMA_FTYPE_MOSTLY_IQ4_XS: default_type = GGML_TYPE_IQ4_XS; break;
|
case LLAMA_FTYPE_MOSTLY_IQ4_XS: default_type = GGML_TYPE_IQ4_XS; break;
|
||||||
case LLAMA_FTYPE_MOSTLY_IQ3_S: default_type = GGML_TYPE_IQ3_S; break;
|
case LLAMA_FTYPE_MOSTLY_IQ3_S: default_type = GGML_TYPE_IQ3_S; break;
|
||||||
case LLAMA_FTYPE_MOSTLY_IQ3_M: default_type = GGML_TYPE_IQ3_S; break;
|
case LLAMA_FTYPE_MOSTLY_IQ3_M: default_type = GGML_TYPE_IQ3_S; break;
|
||||||
case LLAMA_FTYPE_MOSTLY_Q4_0_4_4: default_type = GGML_TYPE_Q4_0_4_4; break;
|
|
||||||
case LLAMA_FTYPE_MOSTLY_Q4_0_4_8: default_type = GGML_TYPE_Q4_0_4_8; break;
|
|
||||||
case LLAMA_FTYPE_MOSTLY_Q4_0_8_8: default_type = GGML_TYPE_Q4_0_8_8; break;
|
|
||||||
|
|
||||||
default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
|
default: throw std::runtime_error(format("invalid output file type %d\n", ftype));
|
||||||
}
|
}
|
||||||
@ -19034,14 +19021,6 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
f32_data = (float *) f32_conv_buf.data();
|
f32_data = (float *) f32_conv_buf.data();
|
||||||
}
|
}
|
||||||
|
|
||||||
int chunk_size_multiplier = 1;
|
|
||||||
if (new_type == GGML_TYPE_Q4_0_4_4 || new_type == GGML_TYPE_Q4_0_4_8 || new_type == GGML_TYPE_Q4_0_8_8) {
|
|
||||||
if ((new_type == GGML_TYPE_Q4_0_8_8) && (tensor->ne[1] % 8 != 0)) new_type = GGML_TYPE_Q4_0;
|
|
||||||
else if (tensor->ne[1] % 4 != 0) new_type = GGML_TYPE_Q4_0;
|
|
||||||
if (new_type == GGML_TYPE_Q4_0_8_8) chunk_size_multiplier = 8;
|
|
||||||
else if (new_type == GGML_TYPE_Q4_0_4_4 || new_type == GGML_TYPE_Q4_0_4_8) chunk_size_multiplier = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
LLAMA_LOG_INFO("converting to %s .. ", ggml_type_name(new_type));
|
LLAMA_LOG_INFO("converting to %s .. ", ggml_type_name(new_type));
|
||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
|
|
||||||
@ -19054,8 +19033,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
|||||||
const int64_t nrows = tensor->ne[1];
|
const int64_t nrows = tensor->ne[1];
|
||||||
|
|
||||||
static const int64_t min_chunk_size = 32 * 512;
|
static const int64_t min_chunk_size = 32 * 512;
|
||||||
const int64_t chunk_size = (n_per_row >= min_chunk_size ? n_per_row : n_per_row * ((min_chunk_size + n_per_row - 1)/n_per_row)) *
|
const int64_t chunk_size = (n_per_row >= min_chunk_size ? n_per_row : n_per_row * ((min_chunk_size + n_per_row - 1)/n_per_row));
|
||||||
chunk_size_multiplier;
|
|
||||||
|
|
||||||
const int64_t nelements_matrix = tensor->ne[0] * tensor->ne[1];
|
const int64_t nelements_matrix = tensor->ne[0] * tensor->ne[1];
|
||||||
const int64_t nchunk = (nelements_matrix + chunk_size - 1)/chunk_size;
|
const int64_t nchunk = (nelements_matrix + chunk_size - 1)/chunk_size;
|
||||||
|
Loading…
Reference in New Issue
Block a user