diff --git a/.devops/full-cuda.Dockerfile b/.devops/full-cuda.Dockerfile
index 059fd2695..c01006efe 100644
--- a/.devops/full-cuda.Dockerfile
+++ b/.devops/full-cuda.Dockerfile
@@ -31,6 +31,6 @@ ENV LLAMA_CUDA=1
# Enable cURL
ENV LLAMA_CURL=1
-RUN make
+RUN make -j$(nproc)
ENTRYPOINT ["/app/.devops/tools.sh"]
diff --git a/.devops/full-rocm.Dockerfile b/.devops/full-rocm.Dockerfile
index 6ecf3bcc7..0314d469b 100644
--- a/.devops/full-rocm.Dockerfile
+++ b/.devops/full-rocm.Dockerfile
@@ -45,6 +45,6 @@ ENV LLAMA_CURL=1
RUN apt-get update && \
apt-get install -y libcurl4-openssl-dev
-RUN make
+RUN make -j$(nproc)
ENTRYPOINT ["/app/.devops/tools.sh"]
diff --git a/.devops/full.Dockerfile b/.devops/full.Dockerfile
index 432fb5dad..6d5943a2f 100644
--- a/.devops/full.Dockerfile
+++ b/.devops/full.Dockerfile
@@ -18,7 +18,7 @@ COPY . .
ENV LLAMA_CURL=1
-RUN make
+RUN make -j$(nproc)
ENV LC_ALL=C.utf8
diff --git a/.devops/main-cuda.Dockerfile b/.devops/main-cuda.Dockerfile
index b937a4829..23f428944 100644
--- a/.devops/main-cuda.Dockerfile
+++ b/.devops/main-cuda.Dockerfile
@@ -23,7 +23,7 @@ ENV CUDA_DOCKER_ARCH=${CUDA_DOCKER_ARCH}
# Enable CUDA
ENV LLAMA_CUDA=1
-RUN make
+RUN make -j$(nproc)
FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
diff --git a/.devops/main-intel.Dockerfile b/.devops/main-intel.Dockerfile
index 274b91b71..7516c8313 100644
--- a/.devops/main-intel.Dockerfile
+++ b/.devops/main-intel.Dockerfile
@@ -2,6 +2,14 @@ ARG ONEAPI_VERSION=2024.0.1-devel-ubuntu22.04
FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
+RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor | tee /usr/share/keyrings/intel-oneapi-archive-keyring.gpg > /dev/null && \
+ echo "deb [signed-by=/usr/share/keyrings/intel-oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main " | tee /etc/apt/sources.list.d/oneAPI.list && \
+ chmod 644 /usr/share/keyrings/intel-oneapi-archive-keyring.gpg && \
+ rm /etc/apt/sources.list.d/intel-graphics.list && \
+ wget -O- https://repositories.intel.com/graphics/intel-graphics.key | gpg --dearmor | tee /usr/share/keyrings/intel-graphics.gpg > /dev/null && \
+ echo "deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu jammy arc" | tee /etc/apt/sources.list.d/intel.gpu.jammy.list && \
+ chmod 644 /usr/share/keyrings/intel-graphics.gpg
+
ARG LLAMA_SYCL_F16=OFF
RUN apt-get update && \
apt-get install -y git
diff --git a/.devops/main-rocm.Dockerfile b/.devops/main-rocm.Dockerfile
index 0a706dc73..37576d68e 100644
--- a/.devops/main-rocm.Dockerfile
+++ b/.devops/main-rocm.Dockerfile
@@ -40,6 +40,6 @@ ENV LLAMA_HIPBLAS=1
ENV CC=/opt/rocm/llvm/bin/clang
ENV CXX=/opt/rocm/llvm/bin/clang++
-RUN make
+RUN make -j$(nproc)
ENTRYPOINT [ "/app/main" ]
diff --git a/.devops/main.Dockerfile b/.devops/main.Dockerfile
index 3ab1decd6..763d75fce 100644
--- a/.devops/main.Dockerfile
+++ b/.devops/main.Dockerfile
@@ -9,7 +9,7 @@ WORKDIR /app
COPY . .
-RUN make
+RUN make -j$(nproc)
FROM ubuntu:$UBUNTU_VERSION as runtime
diff --git a/.devops/server-cuda.Dockerfile b/.devops/server-cuda.Dockerfile
index 59a52ba21..7f5228185 100644
--- a/.devops/server-cuda.Dockerfile
+++ b/.devops/server-cuda.Dockerfile
@@ -25,7 +25,7 @@ ENV LLAMA_CUDA=1
# Enable cURL
ENV LLAMA_CURL=1
-RUN make
+RUN make -j$(nproc)
FROM ${BASE_CUDA_RUN_CONTAINER} as runtime
diff --git a/.devops/server-intel.Dockerfile b/.devops/server-intel.Dockerfile
index a8e451fa9..13d00b737 100644
--- a/.devops/server-intel.Dockerfile
+++ b/.devops/server-intel.Dockerfile
@@ -2,6 +2,14 @@ ARG ONEAPI_VERSION=2024.0.1-devel-ubuntu22.04
FROM intel/oneapi-basekit:$ONEAPI_VERSION as build
+RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor | tee /usr/share/keyrings/intel-oneapi-archive-keyring.gpg > /dev/null && \
+ echo "deb [signed-by=/usr/share/keyrings/intel-oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main " | tee /etc/apt/sources.list.d/oneAPI.list && \
+ chmod 644 /usr/share/keyrings/intel-oneapi-archive-keyring.gpg && \
+ rm /etc/apt/sources.list.d/intel-graphics.list && \
+ wget -O- https://repositories.intel.com/graphics/intel-graphics.key | gpg --dearmor | tee /usr/share/keyrings/intel-graphics.gpg > /dev/null && \
+ echo "deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu jammy arc" | tee /etc/apt/sources.list.d/intel.gpu.jammy.list && \
+ chmod 644 /usr/share/keyrings/intel-graphics.gpg
+
ARG LLAMA_SYCL_F16=OFF
RUN apt-get update && \
apt-get install -y git libcurl4-openssl-dev
@@ -19,6 +27,14 @@ RUN if [ "${LLAMA_SYCL_F16}" = "ON" ]; then \
FROM intel/oneapi-basekit:$ONEAPI_VERSION as runtime
+RUN wget -O- https://apt.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS.PUB | gpg --dearmor | tee /usr/share/keyrings/intel-oneapi-archive-keyring.gpg > /dev/null && \
+ echo "deb [signed-by=/usr/share/keyrings/intel-oneapi-archive-keyring.gpg] https://apt.repos.intel.com/oneapi all main " | tee /etc/apt/sources.list.d/oneAPI.list && \
+ chmod 644 /usr/share/keyrings/intel-oneapi-archive-keyring.gpg && \
+ rm /etc/apt/sources.list.d/intel-graphics.list && \
+ wget -O- https://repositories.intel.com/graphics/intel-graphics.key | gpg --dearmor | tee /usr/share/keyrings/intel-graphics.gpg > /dev/null && \
+ echo "deb [arch=amd64,i386 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu jammy arc" | tee /etc/apt/sources.list.d/intel.gpu.jammy.list && \
+ chmod 644 /usr/share/keyrings/intel-graphics.gpg
+
RUN apt-get update && \
apt-get install -y libcurl4-openssl-dev
diff --git a/.devops/server-rocm.Dockerfile b/.devops/server-rocm.Dockerfile
index c02a31dd8..a6b76dee8 100644
--- a/.devops/server-rocm.Dockerfile
+++ b/.devops/server-rocm.Dockerfile
@@ -45,6 +45,6 @@ ENV LLAMA_CURL=1
RUN apt-get update && \
apt-get install -y libcurl4-openssl-dev
-RUN make
+RUN make -j$(nproc)
ENTRYPOINT [ "/app/server" ]
diff --git a/.devops/server.Dockerfile b/.devops/server.Dockerfile
index be964e0e8..0d09d3627 100644
--- a/.devops/server.Dockerfile
+++ b/.devops/server.Dockerfile
@@ -11,7 +11,7 @@ COPY . .
ENV LLAMA_CURL=1
-RUN make
+RUN make -j$(nproc)
FROM ubuntu:$UBUNTU_VERSION as runtime
diff --git a/.devops/tools.sh b/.devops/tools.sh
index 3a7d274e4..97424c3aa 100755
--- a/.devops/tools.sh
+++ b/.devops/tools.sh
@@ -8,7 +8,7 @@ arg1="$1"
shift
if [[ "$arg1" == '--convert' || "$arg1" == '-c' ]]; then
- python3 ./convert.py "$@"
+ python3 ./convert-hf-to-gguf.py "$@"
elif [[ "$arg1" == '--quantize' || "$arg1" == '-q' ]]; then
./quantize "$@"
elif [[ "$arg1" == '--run' || "$arg1" == '-r' ]]; then
diff --git a/.github/ISSUE_TEMPLATE/06-question.yml b/.github/ISSUE_TEMPLATE/06-question.yml
deleted file mode 100644
index 9d3ff4972..000000000
--- a/.github/ISSUE_TEMPLATE/06-question.yml
+++ /dev/null
@@ -1,38 +0,0 @@
-name: Question
-description: Used to ask questions about llama.cpp
-title: "Question: "
-labels: ["question"]
-body:
- - type: markdown
- attributes:
- value: |
- [Please search your question first in Discussion if you got a common general question.](https://github.com/ggerganov/llama.cpp/discussions/categories/q-a)
-
- - type: checkboxes
- id: prerequisites
- attributes:
- label: Prerequisites
- description: Please confirm the following before submitting your question.
- options:
- - label: I searched using keywords relevant to my issue to make sure that I am creating a new issue that is not already open (or closed).
- required: true
- - label: I reviewed the [Discussions](https://github.com/ggerganov/llama.cpp/discussions), and have a new useful question to share that cannot be answered within Discussions.
- required: true
-
- - type: textarea
- id: background-description
- attributes:
- label: Background Description
- description: Please provide a detailed written description of what you were trying to do, and what you expected `llama.cpp` to do as an question.
- placeholder: Detailed description of your question
- validations:
- required: true
-
- - type: textarea
- id: possible-answer
- attributes:
- label: Possible Answer
- description: If you have some idea of possible answers you want to confirm, that would also be appreciated.
- placeholder: Your idea of possible answers
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/06-research.yml b/.github/ISSUE_TEMPLATE/06-research.yml
new file mode 100644
index 000000000..3ae4e9f8c
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/06-research.yml
@@ -0,0 +1,52 @@
+name: Research
+description: Track new technical research area
+title: "Research: "
+labels: ["research 🔬"]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Don't forget to check for any [duplicate research issue tickets](https://github.com/ggerganov/llama.cpp/issues?q=is%3Aopen+is%3Aissue+label%3A%22research+%F0%9F%94%AC%22)
+
+ - type: checkboxes
+ id: research-stage
+ attributes:
+ label: Research Stage
+ description: Track general state of this research ticket
+ options:
+ - label: Background Research (Let's try to avoid reinventing the wheel)
+ - label: Hypothesis Formed (How do you think this will work and it's effect?)
+ - label: Strategy / Implementation Forming
+ - label: Analysis of results
+ - label: Debrief / Documentation (So people in the future can learn from us)
+
+ - type: textarea
+ id: background
+ attributes:
+ label: Previous existing literature and research
+ description: Whats the current state of the art and whats the motivation for this research?
+
+ - type: textarea
+ id: hypothesis
+ attributes:
+ label: Hypothesis
+ description: How do you think this will work and it's effect?
+
+ - type: textarea
+ id: implementation
+ attributes:
+ label: Implementation
+ description: Got an approach? e.g. a PR ready to go?
+
+ - type: textarea
+ id: analysis
+ attributes:
+ label: Analysis
+ description: How does the proposed implementation behave?
+
+ - type: textarea
+ id: logs
+ attributes:
+ label: Relevant log output
+ description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks.
+ render: shell
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 000000000..c88134dbb
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,13 @@
+blank_issues_enabled: true
+contact_links:
+ - name: Got an idea?
+ url: https://github.com/ggerganov/llama.cpp/discussions/categories/ideas
+ about: Pop it there. It may then become an enhancement ticket.
+ - name: Got a question?
+ url: https://github.com/ggerganov/llama.cpp/discussions/categories/q-a
+ about: Ask a question there!
+ - name: Want to contribute?
+ url: https://github.com/ggerganov/llama.cpp/wiki/contribute
+ about: Head to the contribution guide page of the wiki for areas you can help with
+
+
diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml
index c2838cbd9..9b03d19bc 100644
--- a/.github/workflows/docker.yml
+++ b/.github/workflows/docker.yml
@@ -42,9 +42,8 @@ jobs:
- { tag: "light-rocm", dockerfile: ".devops/main-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" }
- { tag: "full-rocm", dockerfile: ".devops/full-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" }
- { tag: "server-rocm", dockerfile: ".devops/server-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" }
- # TODO: Disabled due to build issues https://github.com/ggerganov/llama.cpp/issues/7507
- #- { tag: "light-intel", dockerfile: ".devops/main-intel.Dockerfile", platforms: "linux/amd64" }
- #- { tag: "server-intel", dockerfile: ".devops/server-intel.Dockerfile", platforms: "linux/amd64" }
+ - { tag: "light-intel", dockerfile: ".devops/main-intel.Dockerfile", platforms: "linux/amd64" }
+ - { tag: "server-intel", dockerfile: ".devops/server-intel.Dockerfile", platforms: "linux/amd64" }
steps:
- name: Check out the repo
uses: actions/checkout@v4
diff --git a/CMakeLists.txt b/CMakeLists.txt
index c5add8239..52b392a13 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -106,6 +106,7 @@ set(LLAMA_CUDA_PEER_MAX_BATCH_SIZE "128" CACHE STRING
"llama: max. batch size for using peer access")
option(LLAMA_CUDA_NO_PEER_COPY "llama: do not use peer to peer copies" OFF)
option(LLAMA_CUDA_NO_VMM "llama: do not try to use CUDA VMM" OFF)
+option(LLAMA_CUDA_FA_ALL_QUANTS "llama: compile all quants for FlashAttention" OFF)
option(LLAMA_CURL "llama: use libcurl to download model from an URL" OFF)
option(LLAMA_HIPBLAS "llama: use hipBLAS" OFF)
@@ -402,6 +403,8 @@ if (LLAMA_CUDA)
file(GLOB GGML_SOURCES_CUDA "ggml-cuda/*.cu")
list(APPEND GGML_SOURCES_CUDA "ggml-cuda.cu")
+ file(GLOB SRCS "ggml-cuda/template-instances/fattn-wmma*.cu")
+ list(APPEND GGML_SOURCES_CUDA ${SRCS})
add_compile_definitions(GGML_USE_CUDA)
add_compile_definitions(GGML_CUDA_USE_GRAPHS)
@@ -427,6 +430,18 @@ if (LLAMA_CUDA)
if (LLAMA_CUDA_NO_PEER_COPY)
add_compile_definitions(GGML_CUDA_NO_PEER_COPY)
endif()
+ if (LLAMA_CUDA_FA_ALL_QUANTS)
+ file(GLOB SRCS "ggml-cuda/template-instances/fattn-vec*.cu")
+ list(APPEND GGML_SOURCES_CUDA ${SRCS})
+ add_compile_definitions(GGML_CUDA_FA_ALL_QUANTS)
+ else()
+ file(GLOB SRCS "ggml-cuda/template-instances/fattn-vec*q4_0-q4_0.cu")
+ list(APPEND GGML_SOURCES_CUDA ${SRCS})
+ file(GLOB SRCS "ggml-cuda/template-instances/fattn-vec*q8_0-q8_0.cu")
+ list(APPEND GGML_SOURCES_CUDA ${SRCS})
+ file(GLOB SRCS "ggml-cuda/template-instances/fattn-vec*f16-f16.cu")
+ list(APPEND GGML_SOURCES_CUDA ${SRCS})
+ endif()
if (LLAMA_STATIC)
if (WIN32)
@@ -571,6 +586,8 @@ if (LLAMA_HIPBLAS)
file(GLOB GGML_SOURCES_ROCM "ggml-cuda/*.cu")
list(APPEND GGML_SOURCES_ROCM "ggml-cuda.cu")
+ file(GLOB SRCS "ggml-cuda/template-instances/fattn-wmma*.cu")
+ list(APPEND GGML_SOURCES_ROCM ${SRCS})
add_compile_definitions(GGML_USE_HIPBLAS GGML_USE_CUDA)
@@ -590,6 +607,19 @@ if (LLAMA_HIPBLAS)
add_compile_definitions(GGML_CUDA_NO_PEER_COPY)
endif()
+ if (LLAMA_CUDA_FA_ALL_QUANTS)
+ file(GLOB SRCS "ggml-cuda/template-instances/fattn-vec*.cu")
+ list(APPEND GGML_SOURCES_ROCM ${SRCS})
+ add_compile_definitions(GGML_CUDA_FA_ALL_QUANTS)
+ else()
+ file(GLOB SRCS "ggml-cuda/template-instances/fattn-vec*q4_0-q4_0.cu")
+ list(APPEND GGML_SOURCES_ROCM ${SRCS})
+ file(GLOB SRCS "ggml-cuda/template-instances/fattn-vec*q8_0-q8_0.cu")
+ list(APPEND GGML_SOURCES_ROCM ${SRCS})
+ file(GLOB SRCS "ggml-cuda/template-instances/fattn-vec*f16-f16.cu")
+ list(APPEND GGML_SOURCES_ROCM ${SRCS})
+ endif()
+
add_compile_definitions(GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X})
add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y})
add_compile_definitions(K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER})
@@ -628,6 +658,10 @@ if (LLAMA_SYCL)
add_compile_definitions(GGML_SYCL_F16)
endif()
+ if (LLAMA_CUDA_FORCE_MMQ)
+ add_compile_definitions(GGML_SYCL_FORCE_MMQ)
+ endif()
+
add_compile_options(-I./) #include DPCT
add_compile_options(-I/${SYCL_INCLUDE_DIR})
@@ -1310,7 +1344,7 @@ set_target_properties(llama PROPERTIES PUBLIC_HEADER ${CMAKE_CURRENT_SOURCE_DIR}
install(TARGETS llama LIBRARY PUBLIC_HEADER)
install(
- FILES convert.py
+ FILES convert-hf-to-gguf.py
PERMISSIONS
OWNER_READ
OWNER_WRITE
diff --git a/Makefile b/Makefile
index 5caf31cdf..dfb3bb2cd 100644
--- a/Makefile
+++ b/Makefile
@@ -421,6 +421,15 @@ ifdef LLAMA_CUBLAS
LLAMA_CUDA := 1
endif
+OBJS_CUDA_TEMP_INST = $(patsubst %.cu,%.o,$(wildcard ggml-cuda/template-instances/fattn-wmma*.cu))
+ifdef LLAMA_CUDA_FA_ALL_QUANTS
+ OBJS_CUDA_TEMP_INST += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/template-instances/fattn-vec*.cu))
+else
+ OBJS_CUDA_TEMP_INST += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/template-instances/fattn-vec*q4_0-q4_0.cu))
+ OBJS_CUDA_TEMP_INST += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/template-instances/fattn-vec*q8_0-q8_0.cu))
+ OBJS_CUDA_TEMP_INST += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/template-instances/fattn-vec*f16-f16.cu))
+endif # LLAMA_CUDA_FA_ALL_QUANTS
+
ifdef LLAMA_CUDA
ifneq ('', '$(wildcard /opt/cuda)')
CUDA_PATH ?= /opt/cuda
@@ -431,6 +440,7 @@ ifdef LLAMA_CUDA
MK_LDFLAGS += -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L$(CUDA_PATH)/lib64 -L/usr/lib64 -L$(CUDA_PATH)/targets/$(UNAME_M)-linux/lib -L/usr/lib/wsl/lib
OBJS += ggml-cuda.o
OBJS += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/*.cu))
+ OBJS += $(OBJS_CUDA_TEMP_INST)
MK_NVCCFLAGS += -use_fast_math
ifdef LLAMA_FATAL_WARNINGS
MK_NVCCFLAGS += -Werror all-warnings
@@ -493,7 +503,10 @@ ifdef LLAMA_CUDA_NO_PEER_COPY
endif # LLAMA_CUDA_NO_PEER_COPY
ifdef LLAMA_CUDA_CCBIN
MK_NVCCFLAGS += -ccbin $(LLAMA_CUDA_CCBIN)
-endif
+endif # LLAMA_CUDA_CCBIN
+ifdef LLAMA_CUDA_FA_ALL_QUANTS
+ MK_NVCCFLAGS += -DGGML_CUDA_FA_ALL_QUANTS
+endif # LLAMA_CUDA_FA_ALL_QUANTS
ifdef JETSON_EOL_MODULE_DETECT
define NVCC_COMPILE
@@ -505,7 +518,7 @@ define NVCC_COMPILE
endef # NVCC_COMPILE
endif # JETSON_EOL_MODULE_DETECT
-ggml-cuda/%.o: ggml-cuda/%.cu ggml-cuda/%.cuh ggml.h ggml-common.h ggml-cuda/common.cuh
+ggml-cuda/%.o: ggml-cuda/%.cu ggml.h ggml-common.h ggml-cuda/common.cuh
$(NVCC_COMPILE)
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h ggml.h ggml-backend.h ggml-backend-impl.h ggml-common.h $(wildcard ggml-cuda/*.cuh)
@@ -571,6 +584,7 @@ ifdef LLAMA_HIP_UMA
MK_CPPFLAGS += -DGGML_HIP_UMA
endif # LLAMA_HIP_UMA
MK_LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib
+ MK_LDFLAGS += -L$(ROCM_PATH)/lib64 -Wl,-rpath=$(ROCM_PATH)/lib64
MK_LDFLAGS += -lhipblas -lamdhip64 -lrocblas
HIPFLAGS += $(addprefix --offload-arch=,$(AMDGPU_TARGETS))
HIPFLAGS += -DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X)
@@ -584,11 +598,12 @@ ifdef LLAMA_CUDA_NO_PEER_COPY
endif # LLAMA_CUDA_NO_PEER_COPY
OBJS += ggml-cuda.o
OBJS += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/*.cu))
+ OBJS += $(OBJS_CUDA_TEMP_INST)
ggml-cuda.o: ggml-cuda.cu ggml-cuda.h ggml.h ggml-backend.h ggml-backend-impl.h ggml-common.h $(wildcard ggml-cuda/*.cuh)
$(HIPCC) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $<
-ggml-cuda/%.o: ggml-cuda/%.cu ggml-cuda/%.cuh ggml.h ggml-common.h ggml-cuda/common.cuh
+ggml-cuda/%.o: ggml-cuda/%.cu ggml.h ggml-common.h ggml-cuda/common.cuh
$(HIPCC) $(CXXFLAGS) $(HIPFLAGS) -x hip -c -o $@ $<
endif # LLAMA_HIPBLAS
@@ -748,6 +763,7 @@ libllama.a: llama.o ggml.o $(OBJS) $(COMMON_DEPS)
clean:
rm -vrf *.o tests/*.o *.so *.a *.dll benchmark-matmult lookup-create lookup-merge lookup-stats common/build-info.cpp *.dot $(COV_TARGETS) $(BUILD_TARGETS) $(TEST_TARGETS)
rm -vrf ggml-cuda/*.o
+ rm -vrf ggml-cuda/template-instances/*.o
find examples pocs -type f -name "*.o" -delete
#
diff --git a/README-sycl.md b/README-sycl.md
index cfa248a95..37f0306dc 100644
--- a/README-sycl.md
+++ b/README-sycl.md
@@ -54,10 +54,10 @@ It has the similar design of other llama.cpp BLAS-based paths such as *OpenBLAS,
## OS
-| OS | Status | Verified |
-|---------|---------|------------------------------------|
-| Linux | Support | Ubuntu 22.04, Fedora Silverblue 39 |
-| Windows | Support | Windows 11 |
+| OS | Status | Verified |
+|---------|---------|------------------------------------------------|
+| Linux | Support | Ubuntu 22.04, Fedora Silverblue 39, Arch Linux |
+| Windows | Support | Windows 11 |
## Hardware
@@ -70,7 +70,7 @@ It has the similar design of other llama.cpp BLAS-based paths such as *OpenBLAS,
|-------------------------------|---------|---------------------------------------|
| Intel Data Center Max Series | Support | Max 1550, 1100 |
| Intel Data Center Flex Series | Support | Flex 170 |
-| Intel Arc Series | Support | Arc 770, 730M |
+| Intel Arc Series | Support | Arc 770, 730M, Arc A750 |
| Intel built-in Arc GPU | Support | built-in Arc GPU in Meteor Lake |
| Intel iGPU | Support | iGPU in i5-1250P, i7-1260P, i7-1165G7 |
diff --git a/README.md b/README.md
index 15519c97f..4791f84af 100644
--- a/README.md
+++ b/README.md
@@ -2,7 +2,9 @@
![llama](https://user-images.githubusercontent.com/1991296/230134379-7181e485-c521-4d23-a0d6-f7b3b61ba524.png)
-[![License: MIT](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![Server](https://github.com/ggerganov/llama.cpp/actions/workflows/server.yml/badge.svg?branch=master&event=schedule)](https://github.com/ggerganov/llama.cpp/actions/workflows/server.yml)
+[![License: MIT](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT)
+[![Server](https://github.com/ggerganov/llama.cpp/actions/workflows/server.yml/badge.svg?branch=master&event=schedule)](https://github.com/ggerganov/llama.cpp/actions/workflows/server.yml)
+[![Conan Center](https://shields.io/conan/v/llama-cpp)](https://conan.io/center/llama-cpp)
[Roadmap](https://github.com/users/ggerganov/projects/7) / [Project status](https://github.com/ggerganov/llama.cpp/discussions/3471) / [Manifesto](https://github.com/ggerganov/llama.cpp/discussions/205) / [ggml](https://github.com/ggerganov/ggml)
@@ -20,7 +22,8 @@ Inference of Meta's [LLaMA](https://arxiv.org/abs/2302.13971) model (and others)
### Hot topics
-- **Initial Flash-Attention support: https://github.com/ggerganov/llama.cpp/pull/5021**
+- **`convert.py` has been deprecated and moved to `examples/convert-legacy-llama.py`, please use `convert-hf-to-gguf.py`** https://github.com/ggerganov/llama.cpp/pull/7430
+- Initial Flash-Attention support: https://github.com/ggerganov/llama.cpp/pull/5021
- BPE pre-tokenization support has been added: https://github.com/ggerganov/llama.cpp/pull/6920
- MoE memory layout has been updated - reconvert models for `mmap` support and regenerate `imatrix` https://github.com/ggerganov/llama.cpp/pull/6387
- Model sharding instructions using `gguf-split` https://github.com/ggerganov/llama.cpp/discussions/6404
@@ -200,6 +203,7 @@ Unless otherwise noted these projects are open-source with permissive licensing:
- [KodiBot](https://github.com/firatkiral/kodibot) (GPL)
- [eva](https://github.com/ylsdamxssjxxdd/eva) (MIT)
- [AI Sublime Text plugin](https://github.com/yaroslavyaroslav/OpenAI-sublime-text) (MIT)
+- [AIKit](https://github.com/sozercan/aikit) (MIT)
*(to have a project listed here, it should clearly state that it depends on `llama.cpp`)*
@@ -315,8 +319,6 @@ In order to build llama.cpp you have four different options.
make
```
- **Note**: for `Debug` builds, run `make LLAMA_DEBUG=1`
-
- On Windows:
1. Download the latest fortran version of [w64devkit](https://github.com/skeeto/w64devkit/releases).
@@ -328,23 +330,32 @@ In order to build llama.cpp you have four different options.
make
```
+ - Notes:
+ - For faster compilation, add the `-j` argument to run multiple jobs in parallel. For example, `make -j 8` will run 8 jobs in parallel.
+ - For faster repeated compilation, install [ccache](https://ccache.dev/).
+ - For debug builds, run `make LLAMA_DEBUG=1`
+
- Using `CMake`:
- ```bash
- cmake -B build
- cmake --build build --config Release
- ```
+ ```bash
+ cmake -B build
+ cmake --build build --config Release
+ ```
- **Note**: for `Debug` builds, there are two cases:
+ **Notes**:
- - Single-config generators (e.g. default = `Unix Makefiles`; note that they just ignore the `--config` flag):
+ - For faster compilation, add the `-j` argument to run multiple jobs in parallel. For example, `cmake --build build --config Release -j 8` will run 8 jobs in parallel.
+ - For faster repeated compilation, install [ccache](https://ccache.dev/).
+ - For debug builds, there are two cases:
+
+ 1. Single-config generators (e.g. default = `Unix Makefiles`; note that they just ignore the `--config` flag):
```bash
cmake -B build -DCMAKE_BUILD_TYPE=Debug
cmake --build build
```
- - Multi-config generators (`-G` param set to Visual Studio, XCode...):
+ 2. Multi-config generators (`-G` param set to Visual Studio, XCode...):
```bash
cmake -B build -G "Xcode"
@@ -379,6 +390,14 @@ In order to build llama.cpp you have four different options.
CLBLAST support for use OpenCL GPU acceleration in FreeBSD. Please read
the instructions for use and activate this options in this document below.
+### Homebrew
+
+On Mac and Linux, the homebrew package manager can be used via
+```
+brew install llama.cpp
+```
+The formula is automatically updated with new `llama.cpp` releases. More info: https://github.com/ggerganov/llama.cpp/discussions/7668
+
### Metal Build
On MacOS, Metal is enabled by default. Using Metal makes the computation run on the GPU.
@@ -477,10 +496,12 @@ Building the program with BLAS support may lead to some performance improvements
|--------------------------------|------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| LLAMA_CUDA_FORCE_DMMV | Boolean | false | Force the use of dequantization + matrix vector multiplication kernels instead of using kernels that do matrix vector multiplication on quantized data. By default the decision is made based on compute capability (MMVQ for 6.1/Pascal/GTX 1000 or higher). Does not affect k-quants. |
| LLAMA_CUDA_DMMV_X | Positive integer >= 32 | 32 | Number of values in x direction processed by the CUDA dequantization + matrix vector multiplication kernel per iteration. Increasing this value can improve performance on fast GPUs. Power of 2 heavily recommended. Does not affect k-quants. |
- | LLAMA_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the CUDA mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. |
+ | LLAMA_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the CUDA mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. |
+ | LLAMA_CUDA_FORCE_MMQ | Boolean | false | Force the use of dequantization + matrix multiplication kernels instead of leveraging Math libraries. | |
| LLAMA_CUDA_F16 | Boolean | false | If enabled, use half-precision floating point arithmetic for the CUDA dequantization + mul mat vec kernels and for the q4_1 and q5_1 matrix matrix multiplication kernels. Can improve performance on relatively recent GPUs. |
| LLAMA_CUDA_KQUANTS_ITER | 1 or 2 | 2 | Number of values processed per iteration and per CUDA thread for Q2_K and Q6_K quantization formats. Setting this value to 1 can improve performance for slow GPUs. |
| LLAMA_CUDA_PEER_MAX_BATCH_SIZE | Positive integer | 128 | Maximum batch size for which to enable peer access between multiple GPUs. Peer access requires either Linux or NVLink. When using NVLink enabling peer access for larger batch sizes is potentially beneficial. |
+ | LLAMA_CUDA_FA_ALL_QUANTS | Boolean | false | Compile support for all KV cache quantization type (combinations) for the FlashAttention CUDA kernels. More fine-grained control over KV cache size but compilation takes much longer. |
- #### hipBLAS
@@ -696,7 +717,8 @@ Building the program with BLAS support may lead to some performance improvements
To obtain the official LLaMA 2 weights please see the Obtaining and using the Facebook LLaMA 2 model section. There is also a large selection of pre-quantized `gguf` models available on Hugging Face.
-Note: `convert.py` does not support LLaMA 3, you can use `convert-hf-to-gguf.py` with LLaMA 3 downloaded from Hugging Face.
+Note: `convert.py` has been moved to `examples/convert-legacy-llama.py` and shouldn't be used for anything other than `Llama/Llama2/Mistral` models and their derievatives.
+It does not support LLaMA 3, you can use `convert-hf-to-gguf.py` with LLaMA 3 downloaded from Hugging Face.
```bash
# obtain the official LLaMA model weights and place them in ./models
@@ -713,10 +735,10 @@ ls ./models
python3 -m pip install -r requirements.txt
# convert the model to ggml FP16 format
-python3 convert.py models/mymodel/
+python3 convert-hf-to-gguf.py models/mymodel/
# [Optional] for models using BPE tokenizers
-python convert.py models/mymodel/ --vocab-type bpe
+python convert-hf-to-gguf.py models/mymodel/ --vocab-type bpe
# quantize the model to 4-bits (using Q4_K_M method)
./quantize ./models/mymodel/ggml-model-f16.gguf ./models/mymodel/ggml-model-Q4_K_M.gguf Q4_K_M
diff --git a/ci/run.sh b/ci/run.sh
index 940299025..3fc5f48b2 100755
--- a/ci/run.sh
+++ b/ci/run.sh
@@ -287,7 +287,7 @@ function gg_run_open_llama_7b_v2 {
(time cmake -DCMAKE_BUILD_TYPE=Release ${CMAKE_EXTRA} -DLLAMA_CUDA=1 .. ) 2>&1 | tee -a $OUT/${ci}-cmake.log
(time make -j ) 2>&1 | tee -a $OUT/${ci}-make.log
- python3 ../convert.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf
+ python3 ../examples/convert-legacy-llama.py ${path_models} --outfile ${path_models}/ggml-model-f16.gguf
model_f16="${path_models}/ggml-model-f16.gguf"
model_q8_0="${path_models}/ggml-model-q8_0.gguf"
diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py
index 3c9958337..95c3ac30c 100755
--- a/convert-hf-to-gguf.py
+++ b/convert-hf-to-gguf.py
@@ -25,8 +25,6 @@ if 'NO_LOCAL_GGUF' not in os.environ:
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
import gguf
-from convert import LlamaHfVocab
-
logger = logging.getLogger("hf-to-gguf")
@@ -634,7 +632,7 @@ class Model:
special_vocab.add_to_gguf(self.gguf_writer)
def _set_vocab_llama_hf(self):
- vocab = LlamaHfVocab(self.dir_model)
+ vocab = gguf.LlamaHfVocab(self.dir_model)
tokens = []
scores = []
toktypes = []
@@ -2971,7 +2969,12 @@ def main() -> None:
hparams = Model.load_hparams(dir_model)
with torch.inference_mode():
- model_class = Model.from_model_architecture(hparams["architectures"][0])
+ try:
+ model_class = Model.from_model_architecture(hparams["architectures"][0])
+ except NotImplementedError:
+ logger.error(f"Model {hparams['architectures'][0]} is not supported")
+ sys.exit(1)
+
model_instance = model_class(dir_model, ftype_map[args.outtype], fname_out, args.bigendian, args.use_temp_file, args.no_lazy)
logger.info("Set model parameters")
diff --git a/docs/HOWTO-add-model.md b/docs/HOWTO-add-model.md
index 48769cdf6..138124248 100644
--- a/docs/HOWTO-add-model.md
+++ b/docs/HOWTO-add-model.md
@@ -17,7 +17,7 @@ Also, it is important to check that the examples and main ggml backends (CUDA, M
### 1. Convert the model to GGUF
This step is done in python with a `convert` script using the [gguf](https://pypi.org/project/gguf/) library.
-Depending on the model architecture, you can use either [convert.py](../convert.py) or [convert-hf-to-gguf.py](../convert-hf-to-gguf.py).
+Depending on the model architecture, you can use either [convert-hf-to-gguf.py](../convert-hf-to-gguf.py) or [examples/convert-legacy-llama.py](../examples/convert-legacy-llama.py) (for `llama/llama2` models in `.pth` format).
The convert script reads the model configuration, tokenizer, tensor names+data and converts them to GGUF metadata and tensors.
diff --git a/convert.py b/examples/convert-legacy-llama.py
similarity index 82%
rename from convert.py
rename to examples/convert-legacy-llama.py
index da1247957..fd8401015 100755
--- a/convert.py
+++ b/examples/convert-legacy-llama.py
@@ -24,14 +24,16 @@ from abc import ABC, abstractmethod
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from dataclasses import dataclass
from pathlib import Path
-from typing import TYPE_CHECKING, Any, Callable, ClassVar, IO, Iterable, Literal, Protocol, TypeVar, runtime_checkable, Optional
+from typing import TYPE_CHECKING, Any, Callable, IO, Iterable, Literal, TypeVar, Optional
import numpy as np
-from sentencepiece import SentencePieceProcessor
if 'NO_LOCAL_GGUF' not in os.environ:
- sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
+ # use .parent.parent since we are in "examples" directory
+ sys.path.insert(1, str(Path(__file__).parent.parent / 'gguf-py'))
+
import gguf
+from gguf import BaseVocab, Vocab, NoVocab, BpeVocab, SentencePieceVocab, LlamaHfVocab
if TYPE_CHECKING:
from typing_extensions import Self, TypeAlias
@@ -380,306 +382,6 @@ class Metadata:
return metadata
-#
-# vocab
-#
-
-
-@runtime_checkable
-class BaseVocab(Protocol):
- tokenizer_model: ClassVar[str]
- name: ClassVar[str]
-
-
-class NoVocab(BaseVocab):
- tokenizer_model = "no_vocab"
- name = "no_vocab"
-
- def __repr__(self) -> str:
- return ""
-
-
-@runtime_checkable
-class Vocab(BaseVocab, Protocol):
- vocab_size: int
- added_tokens_dict: dict[str, int]
- added_tokens_list: list[str]
- fname_tokenizer: Path
-
- def __init__(self, base_path: Path): ...
- def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: ...
-
-
-class BpeVocab(Vocab):
- tokenizer_model = "gpt2"
- name = "bpe"
-
- def __init__(self, base_path: Path):
- added_tokens: dict[str, int] = {}
-
- if (fname_tokenizer := base_path / 'vocab.json').exists():
- # "slow" tokenizer
- with open(fname_tokenizer, encoding="utf-8") as f:
- self.vocab = json.load(f)
-
- try:
- # FIXME: Verify that added tokens here _cannot_ overlap with the main vocab.
- with open(base_path / ADDED_TOKENS_FILE, encoding="utf-8") as f:
- added_tokens = json.load(f)
- except FileNotFoundError:
- pass
- else:
- # "fast" tokenizer
- fname_tokenizer = base_path / FAST_TOKENIZER_FILE
-
- # if this fails, FileNotFoundError propagates to caller
- with open(fname_tokenizer, encoding="utf-8") as f:
- tokenizer_json = json.load(f)
-
- tokenizer_model: dict[str, Any] = tokenizer_json['model']
- if (
- tokenizer_model['type'] != 'BPE' or tokenizer_model.get('byte_fallback', False)
- or tokenizer_json['decoder']['type'] != 'ByteLevel'
- ):
- raise FileNotFoundError('Cannot find GPT-2 BPE tokenizer')
-
- self.vocab = tokenizer_model["vocab"]
-
- if (added := tokenizer_json.get('added_tokens')) is not None:
- # Added tokens here can be duplicates of the main vocabulary.
- added_tokens = {item['content']: item['id']
- for item in added
- if item['content'] not in self.vocab}
-
- vocab_size = len(self.vocab)
- expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
- actual_ids = sorted(added_tokens.values())
- if expected_ids != actual_ids:
- expected_end_id = vocab_size + len(actual_ids) - 1
- raise ValueError(f"Expected the {len(actual_ids)} added token ID(s) to be sequential in the range "
- f"{vocab_size} - {expected_end_id}; got {actual_ids}")
-
- items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1])
- self.added_tokens_dict = added_tokens
- self.added_tokens_list = [text for (text, idx) in items]
- self.vocab_size_base = vocab_size
- self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
- self.fname_tokenizer = fname_tokenizer
-
- def bpe_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
- reverse_vocab = {id: encoded_tok for encoded_tok, id in self.vocab.items()}
-
- for i, _ in enumerate(self.vocab):
- yield reverse_vocab[i], 0.0, gguf.TokenType.NORMAL
-
- def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
- for text in self.added_tokens_list:
- score = -1000.0
- yield text.encode("utf-8"), score, gguf.TokenType.CONTROL
-
- def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
- yield from self.bpe_tokens()
- yield from self.added_tokens()
-
- def __repr__(self) -> str:
- return f""
-
-
-class SentencePieceVocab(Vocab):
- tokenizer_model = "llama"
- name = "spm"
-
- def __init__(self, base_path: Path):
- added_tokens: dict[str, int] = {}
- if (fname_tokenizer := base_path / 'tokenizer.model').exists():
- # normal location
- try:
- with open(base_path / ADDED_TOKENS_FILE, encoding="utf-8") as f:
- added_tokens = json.load(f)
- except FileNotFoundError:
- pass
- elif not (fname_tokenizer := base_path.parent / 'tokenizer.model').exists():
- # not found in alternate location either
- raise FileNotFoundError('Cannot find tokenizer.model')
-
- self.sentencepiece_tokenizer = SentencePieceProcessor()
- self.sentencepiece_tokenizer.LoadFromFile(str(fname_tokenizer))
- vocab_size = self.sentencepiece_tokenizer.vocab_size()
-
- new_tokens = {id: piece for piece, id in added_tokens.items() if id >= vocab_size}
- expected_new_ids = list(range(vocab_size, vocab_size + len(new_tokens)))
- actual_new_ids = sorted(new_tokens.keys())
-
- if expected_new_ids != actual_new_ids:
- raise ValueError(f"Expected new token IDs {expected_new_ids} to be sequential; got {actual_new_ids}")
-
- # Token pieces that were added to the base vocabulary.
- self.added_tokens_dict = added_tokens
- self.added_tokens_list = [new_tokens[id] for id in actual_new_ids]
- self.vocab_size_base = vocab_size
- self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
- self.fname_tokenizer = fname_tokenizer
-
- def sentencepiece_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
- tokenizer = self.sentencepiece_tokenizer
- for i in range(tokenizer.vocab_size()):
- piece = tokenizer.IdToPiece(i)
- text = piece.encode("utf-8")
- score: float = tokenizer.GetScore(i)
-
- toktype = gguf.TokenType.NORMAL
- if tokenizer.IsUnknown(i):
- toktype = gguf.TokenType.UNKNOWN
- if tokenizer.IsControl(i):
- toktype = gguf.TokenType.CONTROL
-
- # NOTE: I think added_tokens are user defined.
- # ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
- # if tokenizer.is_user_defined(i): toktype = gguf.TokenType.USER_DEFINED
-
- if tokenizer.IsUnused(i):
- toktype = gguf.TokenType.UNUSED
- if tokenizer.IsByte(i):
- toktype = gguf.TokenType.BYTE
-
- yield text, score, toktype
-
- def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
- for text in self.added_tokens_list:
- score = -1000.0
- yield text.encode("utf-8"), score, gguf.TokenType.USER_DEFINED
-
- def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
- yield from self.sentencepiece_tokens()
- yield from self.added_tokens()
-
- def __repr__(self) -> str:
- return f""
-
-
-class LlamaHfVocab(Vocab):
- tokenizer_model = "llama"
- name = "hfft"
-
- def __init__(self, base_path: Path):
- fname_tokenizer = base_path / FAST_TOKENIZER_FILE
- # if this fails, FileNotFoundError propagates to caller
- with open(fname_tokenizer, encoding='utf-8') as f:
- tokenizer_json = json.load(f)
-
- # pre-check so we know if we need transformers
- tokenizer_model: dict[str, Any] = tokenizer_json['model']
- is_llama3 = (
- tokenizer_model['type'] == 'BPE' and tokenizer_model.get('ignore_merges', False)
- and not tokenizer_model.get('byte_fallback', True)
- )
- if is_llama3:
- raise TypeError('Llama 3 must be converted with BpeVocab')
-
- if not is_llama3 and (
- tokenizer_model['type'] != 'BPE' or not tokenizer_model.get('byte_fallback', False)
- or tokenizer_json['decoder']['type'] != 'Sequence'
- ):
- raise FileNotFoundError('Cannot find Llama BPE tokenizer')
-
- try:
- from transformers import AutoTokenizer
- except ImportError as e:
- raise ImportError(
- "To use LlamaHfVocab, please install the `transformers` package. "
- "You can install it with `pip install transformers`."
- ) from e
-
- # Allow the tokenizer to default to slow or fast versions.
- # Explicitly set tokenizer to use local paths.
- self.tokenizer = AutoTokenizer.from_pretrained(
- base_path,
- cache_dir=base_path,
- local_files_only=True,
- )
- assert self.tokenizer.is_fast # assume tokenizer.json is used
-
- # Initialize lists and dictionaries for added tokens
- self.added_tokens_list = []
- self.added_tokens_dict = dict()
- self.added_tokens_ids = set()
-
- # Process added tokens
- for tok, tokidx in sorted(
- self.tokenizer.get_added_vocab().items(), key=lambda x: x[1]
- ):
- # Only consider added tokens that are not in the base vocabulary
- if tokidx >= self.tokenizer.vocab_size:
- self.added_tokens_list.append(tok)
- self.added_tokens_dict[tok] = tokidx
- self.added_tokens_ids.add(tokidx)
-
- # Store special tokens and their IDs
- self.specials = {
- tok: self.tokenizer.get_vocab()[tok]
- for tok in self.tokenizer.all_special_tokens
- }
- self.special_ids = set(self.tokenizer.all_special_ids)
-
- # Set vocabulary sizes
- self.vocab_size_base = self.tokenizer.vocab_size
- self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
-
- self.fname_tokenizer = fname_tokenizer
-
- def hf_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
- reverse_vocab = {
- id: encoded_tok for encoded_tok, id in self.tokenizer.get_vocab().items()
- }
-
- for token_id in range(self.vocab_size_base):
- # Skip processing added tokens here
- if token_id in self.added_tokens_ids:
- continue
-
- # Convert token text to bytes
- token_text = reverse_vocab[token_id].encode("utf-8")
-
- # Yield token text, score, and type
- yield token_text, self.get_token_score(token_id), self.get_token_type(
- token_id, token_text, self.special_ids # Reuse already stored special IDs
- )
-
- def get_token_type(self, token_id: int, token_text: bytes, special_ids: set[int]) -> gguf.TokenType:
- # Special case for byte tokens
- if re.fullmatch(br"<0x[0-9A-Fa-f]{2}>", token_text):
- return gguf.TokenType.BYTE
-
- # Determine token type based on whether it's a special token
- return gguf.TokenType.CONTROL if token_id in special_ids else gguf.TokenType.NORMAL
-
- def get_token_score(self, token_id: int) -> float:
- # Placeholder for actual logic to determine the token's score
- # This needs to be implemented based on specific requirements
- return -1000.0 # Default score
-
- def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
- for text in self.added_tokens_list:
- if text in self.specials:
- toktype = self.get_token_type(self.specials[text], b'', self.special_ids)
- score = self.get_token_score(self.specials[text])
- else:
- toktype = gguf.TokenType.USER_DEFINED
- score = -1000.0
-
- yield text.encode("utf-8"), score, toktype
-
- def has_newline_token(self):
- return "<0x0A>" in self.tokenizer.vocab or "\n" in self.tokenizer.vocab
-
- def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
- yield from self.hf_tokens()
- yield from self.added_tokens()
-
- def __repr__(self) -> str:
- return f""
-
-
#
# data loading
# TODO: reuse (probably move to gguf.py?)
diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp
index 2afdb3abd..c00890447 100644
--- a/examples/llama-bench/llama-bench.cpp
+++ b/examples/llama-bench/llama-bench.cpp
@@ -178,6 +178,7 @@ struct cmd_params {
std::vector type_v;
std::vector n_threads;
std::vector n_gpu_layers;
+ std::vector rpc_servers;
std::vector split_mode;
std::vector main_gpu;
std::vector no_kv_offload;
@@ -202,6 +203,7 @@ static const cmd_params cmd_params_defaults = {
/* type_v */ {GGML_TYPE_F16},
/* n_threads */ {cpu_get_num_math()},
/* n_gpu_layers */ {99},
+ /* rpc_servers */ {""},
/* split_mode */ {LLAMA_SPLIT_MODE_LAYER},
/* main_gpu */ {0},
/* no_kv_offload */ {false},
@@ -230,6 +232,7 @@ static void print_usage(int /* argc */, char ** argv) {
printf(" -ctv, --cache-type-v (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_v, ggml_type_name), ",").c_str());
printf(" -t, --threads (default: %s)\n", join(cmd_params_defaults.n_threads, ",").c_str());
printf(" -ngl, --n-gpu-layers (default: %s)\n", join(cmd_params_defaults.n_gpu_layers, ",").c_str());
+ printf(" -rpc, --rpc (default: %s)\n", join(cmd_params_defaults.rpc_servers, ",").c_str());
printf(" -sm, --split-mode (default: %s)\n", join(transform_to_str(cmd_params_defaults.split_mode, split_mode_str), ",").c_str());
printf(" -mg, --main-gpu (default: %s)\n", join(cmd_params_defaults.main_gpu, ",").c_str());
printf(" -nkvo, --no-kv-offload <0|1> (default: %s)\n", join(cmd_params_defaults.no_kv_offload, ",").c_str());
@@ -384,6 +387,12 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
}
auto p = split(argv[i], split_delim);
params.n_gpu_layers.insert(params.n_gpu_layers.end(), p.begin(), p.end());
+ } else if (arg == "-rpc" || arg == "--rpc") {
+ if (++i >= argc) {
+ invalid_param = true;
+ break;
+ }
+ params.rpc_servers.push_back(argv[i]);
} else if (arg == "-sm" || arg == "--split-mode") {
if (++i >= argc) {
invalid_param = true;
@@ -519,6 +528,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
if (params.type_k.empty()) { params.type_k = cmd_params_defaults.type_k; }
if (params.type_v.empty()) { params.type_v = cmd_params_defaults.type_v; }
if (params.n_gpu_layers.empty()) { params.n_gpu_layers = cmd_params_defaults.n_gpu_layers; }
+ if (params.rpc_servers.empty()) { params.rpc_servers = cmd_params_defaults.rpc_servers; }
if (params.split_mode.empty()) { params.split_mode = cmd_params_defaults.split_mode; }
if (params.main_gpu.empty()) { params.main_gpu = cmd_params_defaults.main_gpu; }
if (params.no_kv_offload.empty()){ params.no_kv_offload = cmd_params_defaults.no_kv_offload; }
@@ -541,6 +551,7 @@ struct cmd_params_instance {
ggml_type type_v;
int n_threads;
int n_gpu_layers;
+ std::string rpc_servers;
llama_split_mode split_mode;
int main_gpu;
bool no_kv_offload;
@@ -553,6 +564,9 @@ struct cmd_params_instance {
llama_model_params mparams = llama_model_default_params();
mparams.n_gpu_layers = n_gpu_layers;
+ if (!rpc_servers.empty()) {
+ mparams.rpc_servers = rpc_servers.c_str();
+ }
mparams.split_mode = split_mode;
mparams.main_gpu = main_gpu;
mparams.tensor_split = tensor_split.data();
@@ -564,6 +578,7 @@ struct cmd_params_instance {
bool equal_mparams(const cmd_params_instance & other) const {
return model == other.model &&
n_gpu_layers == other.n_gpu_layers &&
+ rpc_servers == other.rpc_servers &&
split_mode == other.split_mode &&
main_gpu == other.main_gpu &&
use_mmap == other.use_mmap &&
@@ -592,6 +607,7 @@ static std::vector get_cmd_params_instances(const cmd_param
// this ordering minimizes the number of times that each model needs to be reloaded
for (const auto & m : params.model)
for (const auto & nl : params.n_gpu_layers)
+ for (const auto & rpc : params.rpc_servers)
for (const auto & sm : params.split_mode)
for (const auto & mg : params.main_gpu)
for (const auto & ts : params.tensor_split)
@@ -618,6 +634,7 @@ static std::vector get_cmd_params_instances(const cmd_param
/* .type_v = */ tv,
/* .n_threads = */ nt,
/* .n_gpu_layers = */ nl,
+ /* .rpc_servers = */ rpc,
/* .split_mode = */ sm,
/* .main_gpu = */ mg,
/* .no_kv_offload= */ nkvo,
@@ -643,6 +660,7 @@ static std::vector get_cmd_params_instances(const cmd_param
/* .type_v = */ tv,
/* .n_threads = */ nt,
/* .n_gpu_layers = */ nl,
+ /* .rpc_servers = */ rpc,
/* .split_mode = */ sm,
/* .main_gpu = */ mg,
/* .no_kv_offload= */ nkvo,
@@ -668,6 +686,7 @@ static std::vector get_cmd_params_instances(const cmd_param
/* .type_v = */ tv,
/* .n_threads = */ nt,
/* .n_gpu_layers = */ nl,
+ /* .rpc_servers = */ rpc,
/* .split_mode = */ sm,
/* .main_gpu = */ mg,
/* .no_kv_offload= */ nkvo,
@@ -692,6 +711,7 @@ struct test {
static const bool kompute;
static const bool metal;
static const bool sycl;
+ static const bool rpc;
static const bool gpu_blas;
static const bool blas;
static const std::string cpu_info;
@@ -790,6 +810,9 @@ struct test {
if (sycl) {
return GGML_SYCL_NAME;
}
+ if (rpc) {
+ return "RPC";
+ }
if (gpu_blas) {
return "GPU BLAS";
}
@@ -803,7 +826,7 @@ struct test {
static const std::vector & get_fields() {
static const std::vector fields = {
"build_commit", "build_number",
- "cuda", "opencl", "vulkan", "kompute", "metal", "sycl", "gpu_blas", "blas",
+ "cuda", "opencl", "vulkan", "kompute", "metal", "sycl", "rpc", "gpu_blas", "blas",
"cpu_info", "gpu_info",
"model_filename", "model_type", "model_size", "model_n_params",
"n_batch", "n_ubatch",
@@ -859,7 +882,7 @@ struct test {
std::vector values = {
build_commit, std::to_string(build_number),
std::to_string(cuda), std::to_string(opencl), std::to_string(vulkan), std::to_string(vulkan),
- std::to_string(metal), std::to_string(sycl), std::to_string(gpu_blas), std::to_string(blas),
+ std::to_string(metal), std::to_string(sycl), std::to_string(rpc), std::to_string(gpu_blas), std::to_string(blas),
cpu_info, gpu_info,
model_filename, model_type, std::to_string(model_size), std::to_string(model_n_params),
std::to_string(n_batch), std::to_string(n_ubatch),
@@ -894,6 +917,7 @@ const bool test::metal = !!ggml_cpu_has_metal();
const bool test::gpu_blas = !!ggml_cpu_has_gpublas();
const bool test::blas = !!ggml_cpu_has_blas();
const bool test::sycl = !!ggml_cpu_has_sycl();
+const bool test::rpc = !!ggml_cpu_has_rpc();
const std::string test::cpu_info = get_cpu_info();
const std::string test::gpu_info = get_gpu_info();
diff --git a/examples/llava/MobileVLM-README.md b/examples/llava/MobileVLM-README.md
index 413e433dd..74f021dec 100644
--- a/examples/llava/MobileVLM-README.md
+++ b/examples/llava/MobileVLM-README.md
@@ -54,10 +54,10 @@ python ./examples/llava/convert-image-encoder-to-gguf \
--projector-type ldpv2
```
-4. Use `convert.py` to convert the LLaMA part of LLaVA to GGUF:
+4. Use `examples/convert-legacy-llama.py` to convert the LLaMA part of LLaVA to GGUF:
```sh
-python ./convert.py path/to/MobileVLM-1.7B
+python ./examples/convert-legacy-llama.py path/to/MobileVLM-1.7B
```
5. Use `quantize` to convert LLaMA part's DataType from `fp16` to `q4_k`
diff --git a/examples/llava/README.md b/examples/llava/README.md
index 4fb0cf381..8d1ae5270 100644
--- a/examples/llava/README.md
+++ b/examples/llava/README.md
@@ -50,10 +50,10 @@ python ./examples/llava/llava-surgery.py -m ../llava-v1.5-7b
python ./examples/llava/convert-image-encoder-to-gguf.py -m ../clip-vit-large-patch14-336 --llava-projector ../llava-v1.5-7b/llava.projector --output-dir ../llava-v1.5-7b
```
-5. Use `convert.py` to convert the LLaMA part of LLaVA to GGUF:
+5. Use `examples/convert-legacy-llama.py` to convert the LLaMA part of LLaVA to GGUF:
```sh
-python ./convert.py ../llava-v1.5-7b --skip-unknown
+python ./examples/convert-legacy-llama.py ../llava-v1.5-7b --skip-unknown
```
Now both the LLaMA part and the image encoder are in the `llava-v1.5-7b` directory.
@@ -92,7 +92,7 @@ python ./examples/llava/convert-image-encoder-to-gguf.py -m vit --llava-projecto
6) Then convert the model to gguf format:
```console
-python ./convert.py ../llava-v1.6-vicuna-7b/ --skip-unknown
+python ./examples/convert-legacy-llama.py ../llava-v1.6-vicuna-7b/ --skip-unknown
```
7) And finally we can run the llava-cli using the 1.6 model version:
diff --git a/examples/llava/requirements.txt b/examples/llava/requirements.txt
index f80f727a7..17cb4d5e5 100644
--- a/examples/llava/requirements.txt
+++ b/examples/llava/requirements.txt
@@ -1,3 +1,3 @@
--r ../../requirements/requirements-convert.txt
+-r ../../requirements/requirements-convert-legacy-llama.txt
pillow~=10.2.0
torch~=2.1.1
diff --git a/examples/make-ggml.py b/examples/make-ggml.py
deleted file mode 100755
index c73485ebf..000000000
--- a/examples/make-ggml.py
+++ /dev/null
@@ -1,98 +0,0 @@
-#!/usr/bin/env python3
-"""
-This script converts Hugging Face Llama, StarCoder, Falcon, Baichuan, and GPT-NeoX models to GGUF and quantizes them.
-
-Usage:
-python make-ggml.py {model_dir_or_hf_repo_name} --model_type {model_type} [--outname {output_name} (Optional)] [--outdir {output_directory} (Optional)] [--quants {quant_types} (Optional)] [--keep_fp16 (Optional)]
-
-Arguments:
-- model: (Required) The directory of the downloaded Hugging Face model or the name of the Hugging Face model repository. If the model directory does not exist, it will be downloaded from the Hugging Face model hub.
-- --model_type: (Required) The type of the model to be converted. Choose from llama, starcoder, falcon, baichuan, or gptneox.
-- --outname: (Optional) The name of the output model. If not specified, the last part of the model directory path or the Hugging Face model repo name will be used.
-- --outdir: (Optional) The directory where the output model(s) will be stored. If not specified, '../models/{outname}' will be used.
-- --quants: (Optional) The types of quantization to apply. This should be a space-separated list. The default is 'Q4_K_M Q5_K_S'.
-- --keep_fp16: (Optional) If specified, the FP16 model will not be deleted after the quantized models are created.
-
-Old quant types (some base model types require these):
-- Q4_0: small, very high quality loss - legacy, prefer using Q3_K_M
-- Q4_1: small, substantial quality loss - legacy, prefer using Q3_K_L
-- Q5_0: medium, balanced quality - legacy, prefer using Q4_K_M
-- Q5_1: medium, low quality loss - legacy, prefer using Q5_K_M
-
-New quant types (recommended):
-- Q2_K: smallest, extreme quality loss - not recommended
-- Q3_K: alias for Q3_K_M
-- Q3_K_S: very small, very high quality loss
-- Q3_K_M: very small, very high quality loss
-- Q3_K_L: small, substantial quality loss
-- Q4_K: alias for Q4_K_M
-- Q4_K_S: small, significant quality loss
-- Q4_K_M: medium, balanced quality - recommended
-- Q5_K: alias for Q5_K_M
-- Q5_K_S: large, low quality loss - recommended
-- Q5_K_M: large, very low quality loss - recommended
-- Q6_K: very large, extremely low quality loss
-- Q8_0: very large, extremely low quality loss - not recommended
-- F16: extremely large, virtually no quality loss - not recommended
-- F32: absolutely huge, lossless - not recommended
-"""
-import subprocess
-subprocess.run(f"pip install huggingface-hub==0.16.4", shell=True, check=True)
-
-import argparse
-import os
-from huggingface_hub import snapshot_download
-
-def main(model, model_type, outname, outdir, quants, keep_fp16):
- if not os.path.isdir(model):
- print(f"Model not found at {model}. Downloading...")
- try:
- if outname is None:
- outname = model.split('/')[-1]
- model = snapshot_download(repo_id=model, cache_dir='../models/hf_cache')
- except Exception as e:
- raise Exception(f"Could not download the model: {e}")
-
- if outdir is None:
- outdir = f'../models/{outname}'
-
- if not os.path.isfile(f"{model}/config.json"):
- raise Exception(f"Could not find config.json in {model}")
-
- os.makedirs(outdir, exist_ok=True)
-
- print("Building llama.cpp")
- subprocess.run(f"cd .. && make quantize", shell=True, check=True)
-
- fp16 = f"{outdir}/{outname}.gguf.fp16.bin"
-
- print(f"Making unquantised GGUF at {fp16}")
- if not os.path.isfile(fp16):
- if model_type != "llama":
- subprocess.run(f"python3 ../convert-{model_type}-hf-to-gguf.py {model} 1 --outfile {fp16}", shell=True, check=True)
- else:
- subprocess.run(f"python3 ../convert.py {model} --outtype f16 --outfile {fp16}", shell=True, check=True)
- else:
- print(f"Unquantised GGML already exists at: {fp16}")
-
- print("Making quants")
- for type in quants:
- outfile = f"{outdir}/{outname}.gguf.{type}.bin"
- print(f"Making {type} : {outfile}")
- subprocess.run(f"../quantize {fp16} {outfile} {type}", shell=True, check=True)
-
- if not keep_fp16:
- os.remove(fp16)
-
-if __name__ == "__main__":
- parser = argparse.ArgumentParser(description='Convert/Quantize HF models to GGUF. If you have the HF model downloaded already, pass the path to the model dir. Otherwise, pass the Hugging Face model repo name. You need to be in the /examples folder for it to work.')
- parser.add_argument('model', help='Downloaded model dir or Hugging Face model repo name')
- parser.add_argument('--model_type', required=True, choices=['llama', 'starcoder', 'falcon', 'baichuan', 'gptneox'], help='Type of the model to be converted. Choose from llama, starcoder, falcon, baichuan, or gptneox.')
- parser.add_argument('--outname', default=None, help='Output model(s) name')
- parser.add_argument('--outdir', default=None, help='Output directory')
- parser.add_argument('--quants', nargs='*', default=["Q4_K_M", "Q5_K_S"], help='Quant types')
- parser.add_argument('--keep_fp16', action='store_true', help='Keep fp16 model', default=False)
-
- args = parser.parse_args()
-
- main(args.model, args.model_type, args.outname, args.outdir, args.quants, args.keep_fp16)
diff --git a/examples/server/public/index.js b/examples/server/public/index.js
index 695aec256..670960939 100644
--- a/examples/server/public/index.js
+++ b/examples/server/public/index.js
@@ -1 +1 @@
-const t=Symbol.for("preact-signals");function n(){if(r>1){r--;return}let t,n=!1;while(void 0!==i){let _=i;i=void 0;u++;while(void 0!==_){const i=_.o;_.o=void 0;_.f&=-3;if(!(8&_.f)&&h(_))try{_.c()}catch(e){if(!n){t=e;n=!0}}_=i}}u=0;r--;if(n)throw t}function e(t){if(r>0)return t();r++;try{return t()}finally{n()}}let _,i;function o(t){const n=_;_=void 0;try{return t()}finally{_=n}}let r=0,u=0,l=0;function s(t){if(void 0===_)return;let n=t.n;if(void 0===n||n.t!==_){n={i:0,S:t,p:_.s,n:void 0,t:_,e:void 0,x:void 0,r:n};if(void 0!==_.s)_.s.n=n;_.s=n;t.n=n;if(32&_.f)t.S(n);return n}else if(-1===n.i){n.i=0;if(void 0!==n.n){n.n.p=n.p;if(void 0!==n.p)n.p.n=n.n;n.p=_.s;n.n=void 0;_.s.n=n;_.s=n}return n}}function f(t){this.v=t;this.i=0;this.n=void 0;this.t=void 0}f.prototype.brand=t;f.prototype.h=function(){return!0};f.prototype.S=function(t){if(this.t!==t&&void 0===t.e){t.x=this.t;if(void 0!==this.t)this.t.e=t;this.t=t}};f.prototype.U=function(t){if(void 0!==this.t){const n=t.e,e=t.x;if(void 0!==n){n.x=e;t.e=void 0}if(void 0!==e){e.e=n;t.x=void 0}if(t===this.t)this.t=e}};f.prototype.subscribe=function(t){return k(()=>{const n=this.value,e=_;_=void 0;try{t(n)}finally{_=e}})};f.prototype.valueOf=function(){return this.value};f.prototype.toString=function(){return this.value+""};f.prototype.toJSON=function(){return this.value};f.prototype.peek=function(){const t=_;_=void 0;try{return this.value}finally{_=t}};Object.defineProperty(f.prototype,"value",{get(){const t=s(this);if(void 0!==t)t.i=this.i;return this.v},set(t){if(t!==this.v){if(u>100)throw new Error("Cycle detected");this.v=t;this.i++;l++;r++;try{for(let t=this.t;void 0!==t;t=t.x)t.t.N()}finally{n()}}}});function c(t){return new f(t)}function h(t){for(let n=t.s;void 0!==n;n=n.n)if(n.S.i!==n.i||!n.S.h()||n.S.i!==n.i)return!0;return!1}function a(t){for(let n=t.s;void 0!==n;n=n.n){const e=n.S.n;if(void 0!==e)n.r=e;n.S.n=n;n.i=-1;if(void 0===n.n){t.s=n;break}}}function p(t){let n,e=t.s;while(void 0!==e){const t=e.p;if(-1===e.i){e.S.U(e);if(void 0!==t)t.n=e.n;if(void 0!==e.n)e.n.p=t}else n=e;e.S.n=e.r;if(void 0!==e.r)e.r=void 0;e=t}t.s=n}function d(t){f.call(this,void 0);this.x=t;this.s=void 0;this.g=l-1;this.f=4}(d.prototype=new f).h=function(){this.f&=-3;if(1&this.f)return!1;if(32==(36&this.f))return!0;this.f&=-5;if(this.g===l)return!0;this.g=l;this.f|=1;if(this.i>0&&!h(this)){this.f&=-2;return!0}const t=_;try{a(this);_=this;const t=this.x();if(16&this.f||this.v!==t||0===this.i){this.v=t;this.f&=-17;this.i++}}catch(t){this.v=t;this.f|=16;this.i++}_=t;p(this);this.f&=-2;return!0};d.prototype.S=function(t){if(void 0===this.t){this.f|=36;for(let t=this.s;void 0!==t;t=t.n)t.S.S(t)}f.prototype.S.call(this,t)};d.prototype.U=function(t){if(void 0!==this.t){f.prototype.U.call(this,t);if(void 0===this.t){this.f&=-33;for(let t=this.s;void 0!==t;t=t.n)t.S.U(t)}}};d.prototype.N=function(){if(!(2&this.f)){this.f|=6;for(let t=this.t;void 0!==t;t=t.x)t.t.N()}};Object.defineProperty(d.prototype,"value",{get(){if(1&this.f)throw new Error("Cycle detected");const t=s(this);this.h();if(void 0!==t)t.i=this.i;if(16&this.f)throw this.v;return this.v}});function v(t){return new d(t)}function y(t){const e=t.u;t.u=void 0;if("function"==typeof e){r++;const i=_;_=void 0;try{e()}catch(n){t.f&=-2;t.f|=8;m(t);throw n}finally{_=i;n()}}}function m(t){for(let n=t.s;void 0!==n;n=n.n)n.S.U(n);t.x=void 0;t.s=void 0;y(t)}function g(t){if(_!==this)throw new Error("Out-of-order effect");p(this);_=t;this.f&=-2;if(8&this.f)m(this);n()}function b(t){this.x=t;this.u=void 0;this.s=void 0;this.o=void 0;this.f=32}b.prototype.c=function(){const t=this.S();try{if(8&this.f)return;if(void 0===this.x)return;const n=this.x();if("function"==typeof n)this.u=n}finally{t()}};b.prototype.S=function(){if(1&this.f)throw new Error("Cycle detected");this.f|=1;this.f&=-9;y(this);a(this);r++;const t=_;_=this;return g.bind(this,t)};b.prototype.N=function(){if(!(2&this.f)){this.f|=2;this.o=i;i=this}};b.prototype.d=function(){this.f|=8;if(!(1&this.f))m(this)};function k(t){const n=new b(t);try{n.c()}catch(t){n.d();throw t}return n.d.bind(n)}var S,w,x,C,E,U,H,P,N,$,D,T,F={},V=[],A=/acit|ex(?:s|g|n|p|$)|rph|grid|ows|mnc|ntw|ine[ch]|zoo|^ord|itera/i,M=Array.isArray;function W(t,n){for(var e in n)t[e]=n[e];return t}function O(t){var n=t.parentNode;n&&n.removeChild(t)}function L(t,n,e){var _,i,o,r={};for(o in n)"key"==o?_=n[o]:"ref"==o?i=n[o]:r[o]=n[o];if(arguments.length>2&&(r.children=arguments.length>3?S.call(arguments,2):e),"function"==typeof t&&null!=t.defaultProps)for(o in t.defaultProps)void 0===r[o]&&(r[o]=t.defaultProps[o]);return R(t,r,_,i,null)}function R(t,n,e,_,i){var o={type:t,props:n,key:e,ref:_,__k:null,__:null,__b:0,__e:null,__d:void 0,__c:null,constructor:void 0,__v:null==i?++x:i,__i:-1,__u:0};return null==i&&null!=w.vnode&&w.vnode(o),o}function I(){return{current:null}}function j(t){return t.children}function q(t,n){this.props=t,this.context=n}function B(t,n){if(null==n)return t.__?B(t.__,t.__i+1):null;for(var e;nn&&E.sort(P));J.__r=0}function K(t,n,e,_,i,o,r,u,l,s,f){var c,h,a,p,d,v=_&&_.__k||V,y=n.length;for(e.__d=l,Q(e,n,v),l=e.__d,c=0;c0?R(i.type,i.props,i.key,i.ref?i.ref:null,i.__v):i)?(i.__=t,i.__b=t.__b+1,u=Z(i,e,r,f),i.__i=u,o=null,-1!==u&&(f--,(o=e[u])&&(o.__u|=131072)),null==o||null===o.__v?(-1==u&&c--,"function"!=typeof i.type&&(i.__u|=65536)):u!==r&&(u===r+1?c++:u>r?f>l-r?c+=u-r:c--:u(null!=l&&0==(131072&l.__u)?1:0))for(;r>=0||u=0){if((l=n[r])&&0==(131072&l.__u)&&i==l.key&&o===l.type)return r;r--}if(u2&&(u.children=arguments.length>3?S.call(arguments,2):e),R(t.type,u,_||t.key,i||t.ref,null)}function ht(t,n){var e={__c:n="__cC"+T++,__:t,Consumer:function(t,n){return t.children(n)},Provider:function(t){var e,_;return this.getChildContext||(e=[],(_={})[n]=this,this.getChildContext=function(){return _},this.shouldComponentUpdate=function(t){this.props.value!==t.value&&e.some((function(t){t.__e=!0,z(t)}))},this.sub=function(t){e.push(t);var n=t.componentWillUnmount;t.componentWillUnmount=function(){e.splice(e.indexOf(t),1),n&&n.call(t)}}),t.children}};return e.Provider.__=e.Consumer.contextType=e}S=V.slice,w={__e:function(t,n,e,_){for(var i,o,r;n=n.__;)if((i=n.__c)&&!i.__)try{if((o=i.constructor)&&null!=o.getDerivedStateFromError&&(i.setState(o.getDerivedStateFromError(t)),r=i.__d),null!=i.componentDidCatch&&(i.componentDidCatch(t,_||{}),r=i.__d),r)return i.__E=i}catch(n){t=n}throw t}},x=0,C=function(t){return null!=t&&null==t.constructor},q.prototype.setState=function(t,n){var e;e=null!=this.__s&&this.__s!==this.state?this.__s:this.__s=W({},this.state),"function"==typeof t&&(t=t(W({},e),this.props)),t&&W(e,t),null!=t&&this.__v&&(n&&this._sb.push(n),z(this))},q.prototype.forceUpdate=function(t){this.__v&&(this.__e=!0,t&&this.__h.push(t),z(this))},q.prototype.render=j,E=[],H="function"==typeof Promise?Promise.prototype.then.bind(Promise.resolve()):setTimeout,P=function(t,n){return t.__v.__b-n.__v.__b},J.__r=0,N=0,$=et(!1),D=et(!0),T=0;var at,pt,dt,vt,yt=0,mt=[],gt=[],bt=w,kt=bt.__b,St=bt.__r,wt=bt.diffed,xt=bt.__c,Ct=bt.unmount,Et=bt.__;function Ut(t,n){bt.__h&&bt.__h(pt,t,yt||n),yt=0;var e=pt.__H||(pt.__H={__:[],__h:[]});return t>=e.__.length&&e.__.push({__V:gt}),e.__[t]}function Ht(t){return yt=1,Pt(Gt,t)}function Pt(t,n,e){var _=Ut(at++,2);if(_.t=t,!_.__c&&(_.__=[e?e(n):Gt(void 0,n),function(t){var n=_.__N?_.__N[0]:_.__[0],e=_.t(n,t);n!==e&&(_.__N=[e,_.__[1]],_.__c.setState({}))}],_.__c=pt,!pt.u)){var i=function(t,n,e){if(!_.__c.__H)return!0;var i=_.__c.__H.__.filter((function(t){return!!t.__c}));if(i.every((function(t){return!t.__N})))return!o||o.call(this,t,n,e);var r=!1;return i.forEach((function(t){if(t.__N){var n=t.__[0];t.__=t.__N,t.__N=void 0,n!==t.__[0]&&(r=!0)}})),!(!r&&_.__c.props===t)&&(!o||o.call(this,t,n,e))};pt.u=!0;var o=pt.shouldComponentUpdate,r=pt.componentWillUpdate;pt.componentWillUpdate=function(t,n,e){if(this.__e){var _=o;o=void 0,i(t,n,e),o=_}r&&r.call(this,t,n,e)},pt.shouldComponentUpdate=i}return _.__N||_.__}function Nt(t,n){var e=Ut(at++,3);!bt.__s&&Bt(e.__H,n)&&(e.__=t,e.i=n,pt.__H.__h.push(e))}function $t(t,n){var e=Ut(at++,4);!bt.__s&&Bt(e.__H,n)&&(e.__=t,e.i=n,pt.__h.push(e))}function Dt(t){return yt=5,Ft((function(){return{current:t}}),[])}function Tt(t,n,e){yt=6,$t((function(){return"function"==typeof t?(t(n()),function(){return t(null)}):t?(t.current=n(),function(){return t.current=null}):void 0}),null==e?e:e.concat(t))}function Ft(t,n){var e=Ut(at++,7);return Bt(e.__H,n)?(e.__V=t(),e.i=n,e.__h=t,e.__V):e.__}function Vt(t,n){return yt=8,Ft((function(){return t}),n)}function At(t){var n=pt.context[t.__c],e=Ut(at++,9);return e.c=t,n?(null==e.__&&(e.__=!0,n.sub(pt)),n.props.value):t.__}function Mt(t,n){bt.useDebugValue&&bt.useDebugValue(n?n(t):t)}function Wt(t){var n=Ut(at++,10),e=Ht();return n.__=t,pt.componentDidCatch||(pt.componentDidCatch=function(t,_){n.__&&n.__(t,_),e[1](t)}),[e[0],function(){e[1](void 0)}]}function Ot(){var t=Ut(at++,11);if(!t.__){for(var n=pt.__v;null!==n&&!n.__m&&null!==n.__;)n=n.__;var e=n.__m||(n.__m=[0,0]);t.__="P"+e[0]+"-"+e[1]++}return t.__}function Lt(){for(var t;t=mt.shift();)if(t.__P&&t.__H)try{t.__H.__h.forEach(jt),t.__H.__h.forEach(qt),t.__H.__h=[]}catch(n){t.__H.__h=[],bt.__e(n,t.__v)}}bt.__b=function(t){pt=null,kt&&kt(t)},bt.__=function(t,n){t&&n.__k&&n.__k.__m&&(t.__m=n.__k.__m),Et&&Et(t,n)},bt.__r=function(t){St&&St(t),at=0;var n=(pt=t.__c).__H;n&&(dt===pt?(n.__h=[],pt.__h=[],n.__.forEach((function(t){t.__N&&(t.__=t.__N),t.__V=gt,t.__N=t.i=void 0}))):(n.__h.forEach(jt),n.__h.forEach(qt),n.__h=[],at=0)),dt=pt},bt.diffed=function(t){wt&&wt(t);var n=t.__c;n&&n.__H&&(n.__H.__h.length&&(1!==mt.push(n)&&vt===bt.requestAnimationFrame||((vt=bt.requestAnimationFrame)||It)(Lt)),n.__H.__.forEach((function(t){t.i&&(t.__H=t.i),t.__V!==gt&&(t.__=t.__V),t.i=void 0,t.__V=gt}))),dt=pt=null},bt.__c=function(t,n){n.some((function(t){try{t.__h.forEach(jt),t.__h=t.__h.filter((function(t){return!t.__||qt(t)}))}catch(r){n.some((function(t){t.__h&&(t.__h=[])})),n=[],bt.__e(r,t.__v)}})),xt&&xt(t,n)},bt.unmount=function(t){Ct&&Ct(t);var n,e=t.__c;e&&e.__H&&(e.__H.__.forEach((function(t){try{jt(t)}catch(t){n=t}})),e.__H=void 0,n&&bt.__e(n,e.__v))};var Rt="function"==typeof requestAnimationFrame;function It(t){var n,e=function(){clearTimeout(_),Rt&&cancelAnimationFrame(n),setTimeout(t)},_=setTimeout(e,100);Rt&&(n=requestAnimationFrame(e))}function jt(t){var n=pt,e=t.__c;"function"==typeof e&&(t.__c=void 0,e()),pt=n}function qt(t){var n=pt;t.__c=t.__(),pt=n}function Bt(t,n){return!t||t.length!==n.length||n.some((function(n,e){return n!==t[e]}))}function Gt(t,n){return"function"==typeof n?n(t):n}function zt(t,n){w[t]=n.bind(null,w[t]||(()=>{}))}let Jt,Kt;function Qt(t){if(Kt)Kt();Kt=t&&t.S()}function Xt({data:t}){const n=Zt(t);n.value=t;const e=Ft(()=>{let t=this.__v;while(t=t.__)if(t.__c){t.__c.__$f|=4;break}this.__$u.c=()=>{var t;if(!C(e.peek())&&3===(null==(t=this.base)?void 0:t.nodeType))this.base.data=e.peek();else{this.__$f|=1;this.setState({})}};return v(()=>{let t=n.value.value;return 0===t?0:!0===t?"":t||""})},[]);return e.value}Xt.displayName="_st";Object.defineProperties(f.prototype,{constructor:{configurable:!0,value:void 0},type:{configurable:!0,value:Xt},props:{configurable:!0,get(){return{data:this}}},__b:{configurable:!0,value:1}});zt("__b",(t,n)=>{if("string"==typeof n.type){let t,e=n.props;for(let _ in e){if("children"===_)continue;let i=e[_];if(i instanceof f){if(!t)n.__np=t={};t[_]=i;e[_]=i.peek()}}}t(n)});zt("__r",(t,n)=>{Qt();let e,_=n.__c;if(_){_.__$f&=-2;e=_.__$u;if(void 0===e)_.__$u=e=function(t){let n;k((function(){n=this}));n.c=()=>{_.__$f|=1;_.setState({})};return n}()}Jt=_;Qt(e);t(n)});zt("__e",(t,n,e,_)=>{Qt();Jt=void 0;t(n,e,_)});zt("diffed",(t,n)=>{Qt();Jt=void 0;let e;if("string"==typeof n.type&&(e=n.__e)){let t=n.__np,_=n.props;if(t){let n=e.U;if(n)for(let e in n){let _=n[e];if(void 0!==_&&!(e in t)){_.d();n[e]=void 0}}else{n={};e.U=n}for(let i in t){let o=n[i],r=t[i];if(void 0===o){o=Yt(e,i,r,_);n[i]=o}else o.o(r,_)}}}t(n)});function Yt(t,n,e,_){const i=n in t&&void 0===t.ownerSVGElement,o=c(e);return{o:(t,n)=>{o.value=t;_=n},d:k(()=>{const e=o.value.value;if(_[n]!==e){_[n]=e;if(i)t[n]=e;else if(e)t.setAttribute(n,e);else t.removeAttribute(n)}})}}zt("unmount",(t,n)=>{if("string"==typeof n.type){let t=n.__e;if(t){const n=t.U;if(n){t.U=void 0;for(let t in n){let e=n[t];if(e)e.d()}}}}else{let t=n.__c;if(t){const n=t.__$u;if(n){t.__$u=void 0;n.d()}}}t(n)});zt("__h",(t,n,e,_)=>{if(_<3||9===_)n.__$f|=2;t(n,e,_)});q.prototype.shouldComponentUpdate=function(t,n){const e=this.__$u;if(!(e&&void 0!==e.s||4&this.__$f))return!0;if(3&this.__$f)return!0;for(let _ in n)return!0;for(let _ in t)if("__source"!==_&&t[_]!==this.props[_])return!0;for(let _ in this.props)if(!(_ in t))return!0;return!1};function Zt(t){return Ft(()=>c(t),[])}function tn(t){const n=Dt(t);n.current=t;Jt.__$f|=4;return Ft(()=>v(()=>n.current()),[])}function nn(t){const n=Dt(t);n.current=t;Nt(()=>k(()=>n.current()),[])}var en=function(t,n,e,_){var i;n[0]=0;for(var o=1;o=5&&((i||!t&&5===_)&&(r.push(_,0,i,e),_=6),t&&(r.push(_,t,0,e),_=6)),i=""},l=0;l"===n?(_=1,i=""):i=n+i[0]:o?n===o?o="":i+=n:'"'===n||"'"===n?o=n:">"===n?(u(),_=1):_&&("="===n?(_=5,e=i,i=""):"/"===n&&(_<5||">"===t[l][s+1])?(u(),3===_&&(r=r[0]),_=r,(r=r[0]).push(2,0,_),_=0):" "===n||"\t"===n||"\n"===n||"\r"===n?(u(),_=2):i+=n),3===_&&"!--"===i&&(_=4,r=r[0])}return u(),r}(t)),n),arguments,[])).length>1?n:n[0]}var rn=on.bind(L);export{q as Component,j as Fragment,f as Signal,e as batch,ct as cloneElement,v as computed,ht as createContext,L as createElement,I as createRef,k as effect,L as h,rn as html,ft as hydrate,C as isValidElement,w as options,st as render,c as signal,Y as toChildArray,o as untracked,Vt as useCallback,tn as useComputed,At as useContext,Mt as useDebugValue,Nt as useEffect,Wt as useErrorBoundary,Ot as useId,Tt as useImperativeHandle,$t as useLayoutEffect,Ft as useMemo,Pt as useReducer,Dt as useRef,Zt as useSignal,nn as useSignalEffect,Ht as useState};
+const t=Symbol.for("preact-signals");function n(){if(r>1){r--;return}let t,n=!1;while(void 0!==i){let _=i;i=void 0;u++;while(void 0!==_){const i=_.o;_.o=void 0;_.f&=-3;if(!(8&_.f)&&h(_))try{_.c()}catch(e){if(!n){t=e;n=!0}}_=i}}u=0;r--;if(n)throw t}function e(t){if(r>0)return t();r++;try{return t()}finally{n()}}let _,i;function o(t){const n=_;_=void 0;try{return t()}finally{_=n}}let r=0,u=0,l=0;function s(t){if(void 0===_)return;let n=t.n;if(void 0===n||n.t!==_){n={i:0,S:t,p:_.s,n:void 0,t:_,e:void 0,x:void 0,r:n};if(void 0!==_.s)_.s.n=n;_.s=n;t.n=n;if(32&_.f)t.S(n);return n}else if(-1===n.i){n.i=0;if(void 0!==n.n){n.n.p=n.p;if(void 0!==n.p)n.p.n=n.n;n.p=_.s;n.n=void 0;_.s.n=n;_.s=n}return n}}function f(t){this.v=t;this.i=0;this.n=void 0;this.t=void 0}f.prototype.brand=t;f.prototype.h=function(){return!0};f.prototype.S=function(t){if(this.t!==t&&void 0===t.e){t.x=this.t;if(void 0!==this.t)this.t.e=t;this.t=t}};f.prototype.U=function(t){if(void 0!==this.t){const n=t.e,e=t.x;if(void 0!==n){n.x=e;t.e=void 0}if(void 0!==e){e.e=n;t.x=void 0}if(t===this.t)this.t=e}};f.prototype.subscribe=function(t){return k(()=>{const n=this.value,e=_;_=void 0;try{t(n)}finally{_=e}})};f.prototype.valueOf=function(){return this.value};f.prototype.toString=function(){return this.value+""};f.prototype.toJSON=function(){return this.value};f.prototype.peek=function(){const t=_;_=void 0;try{return this.value}finally{_=t}};Object.defineProperty(f.prototype,"value",{get(){const t=s(this);if(void 0!==t)t.i=this.i;return this.v},set(t){if(t!==this.v){if(u>100)throw new Error("Cycle detected");this.v=t;this.i++;l++;r++;try{for(let t=this.t;void 0!==t;t=t.x)t.t.N()}finally{n()}}}});function c(t){return new f(t)}function h(t){for(let n=t.s;void 0!==n;n=n.n)if(n.S.i!==n.i||!n.S.h()||n.S.i!==n.i)return!0;return!1}function a(t){for(let n=t.s;void 0!==n;n=n.n){const e=n.S.n;if(void 0!==e)n.r=e;n.S.n=n;n.i=-1;if(void 0===n.n){t.s=n;break}}}function p(t){let n,e=t.s;while(void 0!==e){const t=e.p;if(-1===e.i){e.S.U(e);if(void 0!==t)t.n=e.n;if(void 0!==e.n)e.n.p=t}else n=e;e.S.n=e.r;if(void 0!==e.r)e.r=void 0;e=t}t.s=n}function d(t){f.call(this,void 0);this.x=t;this.s=void 0;this.g=l-1;this.f=4}(d.prototype=new f).h=function(){this.f&=-3;if(1&this.f)return!1;if(32==(36&this.f))return!0;this.f&=-5;if(this.g===l)return!0;this.g=l;this.f|=1;if(this.i>0&&!h(this)){this.f&=-2;return!0}const t=_;try{a(this);_=this;const t=this.x();if(16&this.f||this.v!==t||0===this.i){this.v=t;this.f&=-17;this.i++}}catch(t){this.v=t;this.f|=16;this.i++}_=t;p(this);this.f&=-2;return!0};d.prototype.S=function(t){if(void 0===this.t){this.f|=36;for(let t=this.s;void 0!==t;t=t.n)t.S.S(t)}f.prototype.S.call(this,t)};d.prototype.U=function(t){if(void 0!==this.t){f.prototype.U.call(this,t);if(void 0===this.t){this.f&=-33;for(let t=this.s;void 0!==t;t=t.n)t.S.U(t)}}};d.prototype.N=function(){if(!(2&this.f)){this.f|=6;for(let t=this.t;void 0!==t;t=t.x)t.t.N()}};Object.defineProperty(d.prototype,"value",{get(){if(1&this.f)throw new Error("Cycle detected");const t=s(this);this.h();if(void 0!==t)t.i=this.i;if(16&this.f)throw this.v;return this.v}});function v(t){return new d(t)}function y(t){const e=t.u;t.u=void 0;if("function"==typeof e){r++;const i=_;_=void 0;try{e()}catch(n){t.f&=-2;t.f|=8;m(t);throw n}finally{_=i;n()}}}function m(t){for(let n=t.s;void 0!==n;n=n.n)n.S.U(n);t.x=void 0;t.s=void 0;y(t)}function g(t){if(_!==this)throw new Error("Out-of-order effect");p(this);_=t;this.f&=-2;if(8&this.f)m(this);n()}function b(t){this.x=t;this.u=void 0;this.s=void 0;this.o=void 0;this.f=32}b.prototype.c=function(){const t=this.S();try{if(8&this.f)return;if(void 0===this.x)return;const n=this.x();if("function"==typeof n)this.u=n}finally{t()}};b.prototype.S=function(){if(1&this.f)throw new Error("Cycle detected");this.f|=1;this.f&=-9;y(this);a(this);r++;const t=_;_=this;return g.bind(this,t)};b.prototype.N=function(){if(!(2&this.f)){this.f|=2;this.o=i;i=this}};b.prototype.d=function(){this.f|=8;if(!(1&this.f))m(this)};function k(t){const n=new b(t);try{n.c()}catch(t){n.d();throw t}return n.d.bind(n)}var w,S,x,C,U,E,H,P,N,$,D,T,M={},F=[],A=/acit|ex(?:s|g|n|p|$)|rph|grid|ows|mnc|ntw|ine[ch]|zoo|^ord|itera/i,V=Array.isArray;function W(t,n){for(var e in n)t[e]=n[e];return t}function L(t){var n=t.parentNode;n&&n.removeChild(t)}function O(t,n,e){var _,i,o,r={};for(o in n)"key"==o?_=n[o]:"ref"==o?i=n[o]:r[o]=n[o];if(arguments.length>2&&(r.children=arguments.length>3?w.call(arguments,2):e),"function"==typeof t&&null!=t.defaultProps)for(o in t.defaultProps)void 0===r[o]&&(r[o]=t.defaultProps[o]);return R(t,r,_,i,null)}function R(t,n,e,_,i){var o={type:t,props:n,key:e,ref:_,__k:null,__:null,__b:0,__e:null,__d:void 0,__c:null,constructor:void 0,__v:null==i?++x:i,__i:-1,__u:0};return null==i&&null!=S.vnode&&S.vnode(o),o}function I(){return{current:null}}function j(t){return t.children}function q(t,n){this.props=t,this.context=n}function B(t,n){if(null==n)return t.__?B(t.__,t.__i+1):null;for(var e;nn&&U.sort(P));J.__r=0}function K(t,n,e,_,i,o,r,u,l,s,f){var c,h,a,p,d,v=_&&_.__k||F,y=n.length;for(e.__d=l,Q(e,n,v),l=e.__d,c=0;c0?R(i.type,i.props,i.key,i.ref?i.ref:null,i.__v):i)?(i.__=t,i.__b=t.__b+1,u=Z(i,e,r,f),i.__i=u,o=null,-1!==u&&(f--,(o=e[u])&&(o.__u|=131072)),null==o||null===o.__v?(-1==u&&c--,"function"!=typeof i.type&&(i.__u|=65536)):u!==r&&(u===r+1?c++:u>r?f>l-r?c+=u-r:c--:u(null!=l&&0==(131072&l.__u)?1:0))for(;r>=0||u=0){if((l=n[r])&&0==(131072&l.__u)&&i==l.key&&o===l.type)return r;r--}if(u2&&(u.children=arguments.length>3?w.call(arguments,2):e),R(t.type,u,_||t.key,i||t.ref,null)}function ht(t,n){var e={__c:n="__cC"+T++,__:t,Consumer:function(t,n){return t.children(n)},Provider:function(t){var e,_;return this.getChildContext||(e=[],(_={})[n]=this,this.getChildContext=function(){return _},this.shouldComponentUpdate=function(t){this.props.value!==t.value&&e.some((function(t){t.__e=!0,G(t)}))},this.sub=function(t){e.push(t);var n=t.componentWillUnmount;t.componentWillUnmount=function(){e.splice(e.indexOf(t),1),n&&n.call(t)}}),t.children}};return e.Provider.__=e.Consumer.contextType=e}w=F.slice,S={__e:function(t,n,e,_){for(var i,o,r;n=n.__;)if((i=n.__c)&&!i.__)try{if((o=i.constructor)&&null!=o.getDerivedStateFromError&&(i.setState(o.getDerivedStateFromError(t)),r=i.__d),null!=i.componentDidCatch&&(i.componentDidCatch(t,_||{}),r=i.__d),r)return i.__E=i}catch(n){t=n}throw t}},x=0,C=function(t){return null!=t&&null==t.constructor},q.prototype.setState=function(t,n){var e;e=null!=this.__s&&this.__s!==this.state?this.__s:this.__s=W({},this.state),"function"==typeof t&&(t=t(W({},e),this.props)),t&&W(e,t),null!=t&&this.__v&&(n&&this._sb.push(n),G(this))},q.prototype.forceUpdate=function(t){this.__v&&(this.__e=!0,t&&this.__h.push(t),G(this))},q.prototype.render=j,U=[],H="function"==typeof Promise?Promise.prototype.then.bind(Promise.resolve()):setTimeout,P=function(t,n){return t.__v.__b-n.__v.__b},J.__r=0,N=0,$=et(!1),D=et(!0),T=0;var at,pt,dt,vt,yt=0,mt=[],gt=[],bt=S,kt=bt.__b,wt=bt.__r,St=bt.diffed,xt=bt.__c,Ct=bt.unmount,Ut=bt.__;function Et(t,n){bt.__h&&bt.__h(pt,t,yt||n),yt=0;var e=pt.__H||(pt.__H={__:[],__h:[]});return t>=e.__.length&&e.__.push({__V:gt}),e.__[t]}function Ht(t){return yt=1,Pt(zt,t)}function Pt(t,n,e){var _=Et(at++,2);if(_.t=t,!_.__c&&(_.__=[e?e(n):zt(void 0,n),function(t){var n=_.__N?_.__N[0]:_.__[0],e=_.t(n,t);n!==e&&(_.__N=[e,_.__[1]],_.__c.setState({}))}],_.__c=pt,!pt.u)){var i=function(t,n,e){if(!_.__c.__H)return!0;var i=_.__c.__H.__.filter((function(t){return!!t.__c}));if(i.every((function(t){return!t.__N})))return!o||o.call(this,t,n,e);var r=!1;return i.forEach((function(t){if(t.__N){var n=t.__[0];t.__=t.__N,t.__N=void 0,n!==t.__[0]&&(r=!0)}})),!(!r&&_.__c.props===t)&&(!o||o.call(this,t,n,e))};pt.u=!0;var o=pt.shouldComponentUpdate,r=pt.componentWillUpdate;pt.componentWillUpdate=function(t,n,e){if(this.__e){var _=o;o=void 0,i(t,n,e),o=_}r&&r.call(this,t,n,e)},pt.shouldComponentUpdate=i}return _.__N||_.__}function Nt(t,n){var e=Et(at++,3);!bt.__s&&Bt(e.__H,n)&&(e.__=t,e.i=n,pt.__H.__h.push(e))}function $t(t,n){var e=Et(at++,4);!bt.__s&&Bt(e.__H,n)&&(e.__=t,e.i=n,pt.__h.push(e))}function Dt(t){return yt=5,Mt((function(){return{current:t}}),[])}function Tt(t,n,e){yt=6,$t((function(){return"function"==typeof t?(t(n()),function(){return t(null)}):t?(t.current=n(),function(){return t.current=null}):void 0}),null==e?e:e.concat(t))}function Mt(t,n){var e=Et(at++,7);return Bt(e.__H,n)?(e.__V=t(),e.i=n,e.__h=t,e.__V):e.__}function Ft(t,n){return yt=8,Mt((function(){return t}),n)}function At(t){var n=pt.context[t.__c],e=Et(at++,9);return e.c=t,n?(null==e.__&&(e.__=!0,n.sub(pt)),n.props.value):t.__}function Vt(t,n){bt.useDebugValue&&bt.useDebugValue(n?n(t):t)}function Wt(t){var n=Et(at++,10),e=Ht();return n.__=t,pt.componentDidCatch||(pt.componentDidCatch=function(t,_){n.__&&n.__(t,_),e[1](t)}),[e[0],function(){e[1](void 0)}]}function Lt(){var t=Et(at++,11);if(!t.__){for(var n=pt.__v;null!==n&&!n.__m&&null!==n.__;)n=n.__;var e=n.__m||(n.__m=[0,0]);t.__="P"+e[0]+"-"+e[1]++}return t.__}function Ot(){for(var t;t=mt.shift();)if(t.__P&&t.__H)try{t.__H.__h.forEach(jt),t.__H.__h.forEach(qt),t.__H.__h=[]}catch(n){t.__H.__h=[],bt.__e(n,t.__v)}}bt.__b=function(t){pt=null,kt&&kt(t)},bt.__=function(t,n){t&&n.__k&&n.__k.__m&&(t.__m=n.__k.__m),Ut&&Ut(t,n)},bt.__r=function(t){wt&&wt(t),at=0;var n=(pt=t.__c).__H;n&&(dt===pt?(n.__h=[],pt.__h=[],n.__.forEach((function(t){t.__N&&(t.__=t.__N),t.__V=gt,t.__N=t.i=void 0}))):(n.__h.forEach(jt),n.__h.forEach(qt),n.__h=[],at=0)),dt=pt},bt.diffed=function(t){St&&St(t);var n=t.__c;n&&n.__H&&(n.__H.__h.length&&(1!==mt.push(n)&&vt===bt.requestAnimationFrame||((vt=bt.requestAnimationFrame)||It)(Ot)),n.__H.__.forEach((function(t){t.i&&(t.__H=t.i),t.__V!==gt&&(t.__=t.__V),t.i=void 0,t.__V=gt}))),dt=pt=null},bt.__c=function(t,n){n.some((function(t){try{t.__h.forEach(jt),t.__h=t.__h.filter((function(t){return!t.__||qt(t)}))}catch(r){n.some((function(t){t.__h&&(t.__h=[])})),n=[],bt.__e(r,t.__v)}})),xt&&xt(t,n)},bt.unmount=function(t){Ct&&Ct(t);var n,e=t.__c;e&&e.__H&&(e.__H.__.forEach((function(t){try{jt(t)}catch(t){n=t}})),e.__H=void 0,n&&bt.__e(n,e.__v))};var Rt="function"==typeof requestAnimationFrame;function It(t){var n,e=function(){clearTimeout(_),Rt&&cancelAnimationFrame(n),setTimeout(t)},_=setTimeout(e,100);Rt&&(n=requestAnimationFrame(e))}function jt(t){var n=pt,e=t.__c;"function"==typeof e&&(t.__c=void 0,e()),pt=n}function qt(t){var n=pt;t.__c=t.__(),pt=n}function Bt(t,n){return!t||t.length!==n.length||n.some((function(n,e){return n!==t[e]}))}function zt(t,n){return"function"==typeof n?n(t):n}function Gt(t,n){S[t]=n.bind(null,S[t]||(()=>{}))}let Jt,Kt;function Qt(t){if(Kt)Kt();Kt=t&&t.S()}function Xt({data:t}){const n=Zt(t);n.value=t;const e=Mt(()=>{let t=this.__v;while(t=t.__)if(t.__c){t.__c.__$f|=4;break}this.__$u.c=()=>{var t;if(!C(e.peek())&&3===(null==(t=this.base)?void 0:t.nodeType))this.base.data=e.peek();else{this.__$f|=1;this.setState({})}};return v(()=>{let t=n.value.value;return 0===t?0:!0===t?"":t||""})},[]);return e.value}Xt.displayName="_st";Object.defineProperties(f.prototype,{constructor:{configurable:!0,value:void 0},type:{configurable:!0,value:Xt},props:{configurable:!0,get(){return{data:this}}},__b:{configurable:!0,value:1}});Gt("__b",(t,n)=>{if("string"==typeof n.type){let t,e=n.props;for(let _ in e){if("children"===_)continue;let i=e[_];if(i instanceof f){if(!t)n.__np=t={};t[_]=i;e[_]=i.peek()}}}t(n)});Gt("__r",(t,n)=>{Qt();let e,_=n.__c;if(_){_.__$f&=-2;e=_.__$u;if(void 0===e)_.__$u=e=function(t){let n;k((function(){n=this}));n.c=()=>{_.__$f|=1;_.setState({})};return n}()}Jt=_;Qt(e);t(n)});Gt("__e",(t,n,e,_)=>{Qt();Jt=void 0;t(n,e,_)});Gt("diffed",(t,n)=>{Qt();Jt=void 0;let e;if("string"==typeof n.type&&(e=n.__e)){let t=n.__np,_=n.props;if(t){let n=e.U;if(n)for(let e in n){let _=n[e];if(void 0!==_&&!(e in t)){_.d();n[e]=void 0}}else{n={};e.U=n}for(let i in t){let o=n[i],r=t[i];if(void 0===o){o=Yt(e,i,r,_);n[i]=o}else o.o(r,_)}}}t(n)});function Yt(t,n,e,_){const i=n in t&&void 0===t.ownerSVGElement,o=c(e);return{o:(t,n)=>{o.value=t;_=n},d:k(()=>{const e=o.value.value;if(_[n]!==e){_[n]=e;if(i)t[n]=e;else if(e)t.setAttribute(n,e);else t.removeAttribute(n)}})}}Gt("unmount",(t,n)=>{if("string"==typeof n.type){let t=n.__e;if(t){const n=t.U;if(n){t.U=void 0;for(let t in n){let e=n[t];if(e)e.d()}}}}else{let t=n.__c;if(t){const n=t.__$u;if(n){t.__$u=void 0;n.d()}}}t(n)});Gt("__h",(t,n,e,_)=>{if(_<3||9===_)n.__$f|=2;t(n,e,_)});q.prototype.shouldComponentUpdate=function(t,n){const e=this.__$u;if(!(e&&void 0!==e.s||4&this.__$f))return!0;if(3&this.__$f)return!0;for(let _ in n)return!0;for(let _ in t)if("__source"!==_&&t[_]!==this.props[_])return!0;for(let _ in this.props)if(!(_ in t))return!0;return!1};function Zt(t){return Mt(()=>c(t),[])}function tn(t){const n=Dt(t);n.current=t;Jt.__$f|=4;return Mt(()=>v(()=>n.current()),[])}function nn(t){const n=Dt(t);n.current=t;Nt(()=>k(()=>n.current()),[])}var en=function(t,n,e,_){var i;n[0]=0;for(var o=1;o=5&&((i||!t&&5===_)&&(r.push(_,0,i,e),_=6),t&&(r.push(_,t,0,e),_=6)),i=""},l=0;l"===n?(_=1,i=""):i=n+i[0]:o?n===o?o="":i+=n:'"'===n||"'"===n?o=n:">"===n?(u(),_=1):_&&("="===n?(_=5,e=i,i=""):"/"===n&&(_<5||">"===t[l][s+1])?(u(),3===_&&(r=r[0]),_=r,(r=r[0]).push(2,0,_),_=0):" "===n||"\t"===n||"\n"===n||"\r"===n?(u(),_=2):i+=n),3===_&&"!--"===i&&(_=4,r=r[0])}return u(),r}(t)),n),arguments,[])).length>1?n:n[0]}var rn=on.bind(O);export{q as Component,j as Fragment,f as Signal,e as batch,ct as cloneElement,v as computed,ht as createContext,O as createElement,I as createRef,k as effect,O as h,rn as html,ft as hydrate,C as isValidElement,S as options,st as render,c as signal,Y as toChildArray,o as untracked,Ft as useCallback,tn as useComputed,At as useContext,Vt as useDebugValue,Nt as useEffect,Wt as useErrorBoundary,Lt as useId,Tt as useImperativeHandle,$t as useLayoutEffect,Mt as useMemo,Pt as useReducer,Dt as useRef,Zt as useSignal,nn as useSignalEffect,Ht as useState};
diff --git a/ggml-cuda.cu b/ggml-cuda.cu
index d0a754ee1..daaa0cd6a 100644
--- a/ggml-cuda.cu
+++ b/ggml-cuda.cu
@@ -1870,7 +1870,7 @@ static void ggml_cuda_mul_mat_batched_cublas(ggml_backend_cuda_context & ctx, co
}
}
#else
- if (r2 == 1 && r3 == 1 && src0->nb[2]*src0->ne[2] == src0->nb[3] && src1->nb[2]*src1->ne[2] == src1->nb[3]) {
+ if (r2 == 1 && r3 == 1 && ggml_is_contiguous_2(src0) && ggml_is_contiguous_2(src1)) {
// there is no broadcast and src0, src1 are contiguous across dims 2, 3
// use cublasGemmStridedBatchedEx
CUBLAS_CHECK(
@@ -2886,7 +2886,9 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
case GGML_OP_CONT:
case GGML_OP_DIAG_MASK_INF:
case GGML_OP_SOFT_MAX:
+ return true;
case GGML_OP_ROPE:
+ return ggml_is_contiguous(op->src[0]);
case GGML_OP_IM2COL:
case GGML_OP_POOL_2D:
case GGML_OP_SUM_ROWS:
@@ -2903,10 +2905,14 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
return op->src[0]->ne[0] == 64 || op->src[0]->ne[0] == 128;
#else
- if (op->src[0]->ne[0] == 64 || op->src[0]->ne[0] == 128) {
+ if (op->src[0]->ne[0] == 128) {
return true;
}
- return ggml_cuda_info().devices[cuda_ctx->device].cc >= CC_VOLTA;
+ if (op->src[0]->ne[0] == 64 && op->src[1]->type == GGML_TYPE_F16) {
+ return true;
+ }
+ return ggml_cuda_info().devices[cuda_ctx->device].cc >= CC_VOLTA &&
+ op->src[1]->type == GGML_TYPE_F16 && op->src[2]->type == GGML_TYPE_F16;
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
default:
return false;
diff --git a/ggml-cuda/concat.cu b/ggml-cuda/concat.cu
index fb9dee8f8..dac10ec36 100644
--- a/ggml-cuda/concat.cu
+++ b/ggml-cuda/concat.cu
@@ -1,5 +1,6 @@
#include "concat.cuh"
+// contiguous kernels
static __global__ void concat_f32_dim0(const float * x, const float * y, float * dst, const int ne0, const int ne00) {
int nidx = threadIdx.x + blockIdx.x * blockDim.x;
if (nidx >= ne0) {
@@ -92,39 +93,104 @@ static void concat_f32_cuda(const float * x, const float * y, float * dst, int n
concat_f32_dim2<<>>(x, y, dst, ne0, ne02);
}
+// non-contiguous kernel (slow)
+static __global__ void concat_f32_non_cont(
+ const char * src0,
+ const char * src1,
+ char * dst,
+ int64_t ne00,
+ int64_t ne01,
+ int64_t ne02,
+ int64_t ne03,
+ uint64_t nb00,
+ uint64_t nb01,
+ uint64_t nb02,
+ uint64_t nb03,
+ int64_t /*ne10*/,
+ int64_t /*ne11*/,
+ int64_t /*ne12*/,
+ int64_t /*ne13*/,
+ uint64_t nb10,
+ uint64_t nb11,
+ uint64_t nb12,
+ uint64_t nb13,
+ int64_t ne0,
+ int64_t /*ne1*/,
+ int64_t /*ne2*/,
+ int64_t /*ne3*/,
+ uint64_t nb0,
+ uint64_t nb1,
+ uint64_t nb2,
+ uint64_t nb3,
+ int32_t dim) {
+ const int64_t i3 = blockIdx.z;
+ const int64_t i2 = blockIdx.y;
+ const int64_t i1 = blockIdx.x;
+
+ int64_t o[4] = {0, 0, 0, 0};
+ o[dim] = dim == 0 ? ne00 : (dim == 1 ? ne01 : (dim == 2 ? ne02 : ne03));
+
+ const float * x;
+
+ for (int i0 = threadIdx.x; i0 < ne0; i0 += blockDim.x) {
+ if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) {
+ x = (const float *)(src0 + (i3 )*nb03 + (i2 )*nb02 + (i1 )*nb01 + (i0 )*nb00);
+ } else {
+ x = (const float *)(src1 + (i3 - o[3])*nb13 + (i2 - o[2])*nb12 + (i1 - o[1])*nb11 + (i0 - o[0])*nb10);
+ }
+
+ float * y = (float *)(dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
+
+ *y = *x;
+ }
+}
+
+
void ggml_cuda_op_concat(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
const ggml_tensor * src0 = dst->src[0];
const ggml_tensor * src1 = dst->src[1];
- const float * src0_d = (const float *)src0->data;
- const float * src1_d = (const float *)src1->data;
-
- float * dst_d = (float *)dst->data;
cudaStream_t stream = ctx.stream();
const int32_t dim = ((int32_t *) dst->op_params)[0];
- GGML_ASSERT(ggml_is_contiguous(src0));
- GGML_ASSERT(ggml_is_contiguous(src1));
-
GGML_ASSERT(src0->type == GGML_TYPE_F32);
GGML_ASSERT(src1->type == GGML_TYPE_F32);
- GGML_ASSERT(dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
- if (dim != 3) {
- for (int i3 = 0; i3 < dst->ne[3]; i3++) {
- concat_f32_cuda(
- src0_d + i3 * (src0->nb[3] / 4),
- src1_d + i3 * (src1->nb[3] / 4),
- dst_d + i3 * ( dst->nb[3] / 4),
- src0->ne[0], src0->ne[1], src0->ne[2],
- dst->ne[0], dst->ne[1], dst->ne[2], dim, stream);
+ if (ggml_is_contiguous(src0) && ggml_is_contiguous(src1)) {
+ const float * src0_d = (const float *)src0->data;
+ const float * src1_d = (const float *)src1->data;
+
+ float * dst_d = (float *)dst->data;
+
+ if (dim != 3) {
+ for (int i3 = 0; i3 < dst->ne[3]; i3++) {
+ concat_f32_cuda(
+ src0_d + i3 * (src0->nb[3] / 4),
+ src1_d + i3 * (src1->nb[3] / 4),
+ dst_d + i3 * ( dst->nb[3] / 4),
+ src0->ne[0], src0->ne[1], src0->ne[2],
+ dst->ne[0], dst->ne[1], dst->ne[2], dim, stream);
+ }
+ } else {
+ const size_t size0 = ggml_nbytes(src0);
+ const size_t size1 = ggml_nbytes(src1);
+
+ CUDA_CHECK(cudaMemcpyAsync(dst_d, src0_d, size0, cudaMemcpyDeviceToDevice, stream));
+ CUDA_CHECK(cudaMemcpyAsync(dst_d + size0/4, src1_d, size1, cudaMemcpyDeviceToDevice, stream));
}
} else {
- const size_t size0 = ggml_nbytes(src0);
- const size_t size1 = ggml_nbytes(src1);
-
- CUDA_CHECK(cudaMemcpyAsync(dst_d, src0_d, size0, cudaMemcpyDeviceToDevice, stream));
- CUDA_CHECK(cudaMemcpyAsync(dst_d + size0/4, src1_d, size1, cudaMemcpyDeviceToDevice, stream));
+ dim3 grid_dim(dst->ne[1], dst->ne[2], dst->ne[3]);
+ concat_f32_non_cont<<>>(
+ (const char *)src0->data,
+ (const char *)src1->data,
+ ( char *)dst->data,
+ src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3],
+ src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3],
+ src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3],
+ src1->nb[0], src1->nb[1], src1->nb[2], src1->nb[3],
+ dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3],
+ dst->nb[0], dst->nb[1], dst->nb[2], dst->nb[3], dim);
}
}
diff --git a/ggml-cuda/fattn-common.cuh b/ggml-cuda/fattn-common.cuh
index 1dd519bde..c00f8606a 100644
--- a/ggml-cuda/fattn-common.cuh
+++ b/ggml-cuda/fattn-common.cuh
@@ -1,4 +1,8 @@
+#pragma once
+
#include "common.cuh"
+#include "convert.cuh"
+#include "vecdotq.cuh"
#include
@@ -34,11 +38,523 @@ typedef void (* fattn_kernel_t)(
const int nb11,
const int nb12,
const int nb13,
+ const int nb21,
+ const int nb22,
+ const int nb23,
const int ne0,
const int ne1,
const int ne2,
const int ne3);
+typedef half (*vec_dot_KQ_f16_t)(
+ const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8 , const void * __restrict__ Q_ds);
+typedef float (*vec_dot_KQ_f32_t)(
+ const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8 , const void * __restrict__ Q_ds);
+
+template
+static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q4_0(
+ const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) {
+#if __CUDA_ARCH__ >= MIN_CC_DP4A
+
+ const block_q4_0 * K_q4_0 = (const block_q4_0 *) K_c;
+ GGML_UNUSED(Q_v);
+
+ half sum = 0.0f;
+
+#pragma unroll
+ for (int k_KQ_0 = 0; k_KQ_0 < D/sizeof(int); k_KQ_0 += WARP_SIZE) {
+ const int k_KQ = k_KQ_0 + threadIdx.x;
+
+ const int ib = k_KQ / QI8_1;
+ const int iqs4 = k_KQ % QI4_0;
+ const int shift = k_KQ & (QI8_1/2);
+
+ const int v = (get_int_from_uint8(K_q4_0[ib].qs, iqs4) >> shift) & 0x0F0F0F0F;
+ const int u = Q_q8[k_KQ_0/WARP_SIZE];
+
+ const int sumi = __dp4a(v, u, 0);
+
+#if FP16_AVAILABLE
+ if (std::is_same::value) {
+ const half2 * Q_ds = (const half2 *) Q_ds_v;
+
+ const half2 sum2 = __half2half2(K_q4_0[ib].d) * Q_ds[k_KQ_0/WARP_SIZE];
+ sum += (T) (((half) sumi)*__low2half(sum2) - __high2half(sum2) /* *8/QI8_1 == 1 */);
+ } else
+#endif // FP16_AVAILABLE
+ {
+ const float2 * Q_ds = (const float2 *) Q_ds_v;
+
+ sum += (T) (__half2float(K_q4_0[ib].d) * (sumi*Q_ds[k_KQ_0/WARP_SIZE].x - (8/QI8_1)*Q_ds[k_KQ_0/WARP_SIZE].y));
+ }
+ }
+
+ return sum;
+#else
+ GGML_UNUSED(K_c);
+ GGML_UNUSED(Q_v);
+ GGML_UNUSED(Q_q8);
+ GGML_UNUSED(Q_ds_v);
+ NO_DEVICE_CODE;
+#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
+}
+
+template
+static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q4_1(
+ const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) {
+#if __CUDA_ARCH__ >= MIN_CC_DP4A
+
+ const block_q4_1 * K_q4_1 = (const block_q4_1 *) K_c;
+ GGML_UNUSED(Q_v);
+
+ T sum = 0.0f;
+
+#pragma unroll
+ for (int k_KQ_0 = 0; k_KQ_0 < D/sizeof(int); k_KQ_0 += WARP_SIZE) {
+ const int k_KQ = k_KQ_0 + threadIdx.x;
+
+ const int ib = k_KQ / QI8_1;
+ const int iqs4 = k_KQ % QI4_1;
+ const int shift = k_KQ & (QI8_1/2);
+
+ const int v = (get_int_from_uint8_aligned(K_q4_1[ib].qs, iqs4) >> shift) & 0x0F0F0F0F;
+ const int u = Q_q8[k_KQ_0/WARP_SIZE];
+
+ const int sumi = __dp4a(v, u, 0);
+
+#if FP16_AVAILABLE
+ if (std::is_same::value) {
+ const half2 * Q_ds = (const half2 *) Q_ds_v;
+
+ const half2 d4d8_m4s8 = K_q4_1[ib].dm * Q_ds[k_KQ_0/WARP_SIZE];
+ const half2 sumid4d8_m4s8scaled = d4d8_m4s8 * make_half2(sumi, 1.0f/QI8_1);
+ sum += (T) (__low2half(sumid4d8_m4s8scaled) + __high2half(sumid4d8_m4s8scaled));
+ } else
+#endif // FP16_AVAILABLE
+ {
+ const float2 * Q_ds = (const float2 *) Q_ds_v;
+
+ const float sumid4d8 = __low2float(K_q4_1[ib].dm)*Q_ds[k_KQ_0/WARP_SIZE].x * sumi;
+ const float m4s8scaled = __high2float(K_q4_1[ib].dm)*Q_ds[k_KQ_0/WARP_SIZE].y / QI8_1;
+
+ sum += (T) (sumid4d8 + m4s8scaled);
+ }
+ }
+
+ return sum;
+#else
+ GGML_UNUSED(K_c);
+ GGML_UNUSED(Q_v);
+ GGML_UNUSED(Q_q8);
+ GGML_UNUSED(Q_ds_v);
+ NO_DEVICE_CODE;
+#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
+}
+
+template
+static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q5_0(
+ const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) {
+#if __CUDA_ARCH__ >= MIN_CC_DP4A
+
+ const block_q5_0 * K_q5_0 = (const block_q5_0 *) K_c;
+ GGML_UNUSED(Q_v);
+
+ T sum = 0.0f;
+
+#pragma unroll
+ for (int k_KQ_0 = 0; k_KQ_0 < D/sizeof(int); k_KQ_0 += WARP_SIZE) {
+ const int k_KQ = k_KQ_0 + threadIdx.x;
+
+ const int ib = k_KQ / QI8_1;
+ const int iqs4 = k_KQ % QI5_0;
+ const int iqs8 = k_KQ % QI8_1;
+ const int shift = k_KQ & (QI8_1/2);
+
+ int v = (get_int_from_uint8(K_q5_0[ib].qs, iqs4) >> shift) & 0x0F0F0F0F;
+ const int vh = get_int_from_uint8(K_q5_0[ib].qh, 0) >> (iqs8 * QI5_0);
+ v |= (vh << 4) & 0x00000010; // 0 -> 4
+ v |= (vh << 11) & 0x00001000; // 1 -> 12
+ v |= (vh << 18) & 0x00100000; // 2 -> 20
+ v |= (vh << 25) & 0x10000000; // 3 -> 28
+
+ const int u = Q_q8[k_KQ_0/WARP_SIZE];
+
+ const int sumi = __dp4a(v, u, 0);
+
+#if FP16_AVAILABLE
+ if (std::is_same::value) {
+ const half2 * Q_ds = (const half2 *) Q_ds_v;
+
+ const half2 sum2 = __half2half2(K_q5_0[ib].d) * Q_ds[k_KQ_0/WARP_SIZE];
+ sum += (T) (((half) sumi)*__low2half(sum2) - __high2half(sum2)*__float2half(2.0f)) /* *16/QI8_1 == 2 */;
+ } else
+#endif // FP16_AVAILABLE
+ {
+ const float2 * Q_ds = (const float2 *) Q_ds_v;
+
+ sum += (T) (__half2float(K_q5_0[ib].d) * (sumi*Q_ds[k_KQ_0/WARP_SIZE].x - (16/QI8_1)*Q_ds[k_KQ_0/WARP_SIZE].y));
+ }
+ }
+
+ return sum;
+#else
+ GGML_UNUSED(K_c);
+ GGML_UNUSED(Q_v);
+ GGML_UNUSED(Q_q8);
+ GGML_UNUSED(Q_ds_v);
+ NO_DEVICE_CODE;
+#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
+}
+
+template
+static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q5_1(
+ const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) {
+#if __CUDA_ARCH__ >= MIN_CC_DP4A
+
+ const block_q5_1 * K_q5_1 = (const block_q5_1 *) K_c;
+ GGML_UNUSED(Q_v);
+
+ T sum = 0.0f;
+
+#pragma unroll
+ for (int k_KQ_0 = 0; k_KQ_0 < D/sizeof(int); k_KQ_0 += WARP_SIZE) {
+ const int k_KQ = k_KQ_0 + threadIdx.x;
+
+ const int ib = k_KQ / QI8_1;
+ const int iqs4 = k_KQ % QI5_1;
+ const int iqs8 = k_KQ % QI8_1;
+ const int shift = k_KQ & (QI8_1/2);
+
+ int v = (get_int_from_uint8(K_q5_1[ib].qs, iqs4) >> shift) & 0x0F0F0F0F;
+ const int vh = get_int_from_uint8(K_q5_1[ib].qh, 0) >> (iqs8 * QI5_1);
+ v |= (vh << 4) & 0x00000010; // 0 -> 4
+ v |= (vh << 11) & 0x00001000; // 1 -> 12
+ v |= (vh << 18) & 0x00100000; // 2 -> 20
+ v |= (vh << 25) & 0x10000000; // 3 -> 28
+
+ const int u = Q_q8[k_KQ_0/WARP_SIZE];
+
+ const int sumi = __dp4a(v, u, 0);
+
+#if FP16_AVAILABLE
+ if (std::is_same::value) {
+ const half2 * Q_ds = (const half2 *) Q_ds_v;
+
+ const half2 d5d8_m5s8 = K_q5_1[ib].dm * Q_ds[k_KQ_0/WARP_SIZE];
+ const half2 sumid5d8_m5s8scaled = d5d8_m5s8 * make_half2(sumi, 1.0f/QI8_1);
+ sum += (T) (__low2half(sumid5d8_m5s8scaled) + __high2half(sumid5d8_m5s8scaled));
+ } else
+#endif // FP16_AVAILABLE
+ {
+ const float2 * Q_ds = (const float2 *) Q_ds_v;
+
+ const float sumid5d8 = __low2float(K_q5_1[ib].dm)*Q_ds[k_KQ_0/WARP_SIZE].x * sumi;
+ const float m5s8scaled = __high2float(K_q5_1[ib].dm)*Q_ds[k_KQ_0/WARP_SIZE].y / QI8_1;
+
+ sum += (T) (sumid5d8 + m5s8scaled);
+ }
+ }
+
+ return sum;
+#else
+ GGML_UNUSED(K_c);
+ GGML_UNUSED(Q_v);
+ GGML_UNUSED(Q_q8);
+ GGML_UNUSED(Q_ds_v);
+ NO_DEVICE_CODE;
+#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
+}
+
+template
+static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_q8_0(
+ const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8, const void * __restrict__ Q_ds_v) {
+#if __CUDA_ARCH__ >= MIN_CC_DP4A
+
+ const block_q8_0 * K_q8_0 = (const block_q8_0 *) K_c;
+ GGML_UNUSED(Q_v);
+
+ T sum = 0.0f;
+
+#pragma unroll
+ for (int k_KQ_0 = 0; k_KQ_0 < D/sizeof(int); k_KQ_0 += WARP_SIZE) {
+ const int k_KQ = k_KQ_0 + threadIdx.x;
+
+ const int ib = k_KQ / QI8_0;
+ const int iqs = k_KQ % QI8_0;
+
+ const int v = get_int_from_int8(K_q8_0[ib].qs, iqs);
+
+ T Q_d;
+ if (std::is_same::value) {
+ const half2 * Q_ds = (const half2 *) Q_ds_v;
+ Q_d = __low2half(Q_ds[k_KQ_0/WARP_SIZE]);
+ } else {
+ const float2 * Q_ds = (const float2 *) Q_ds_v;
+ Q_d = Q_ds[k_KQ_0/WARP_SIZE].x;
+ }
+
+ sum += vec_dot_q8_0_q8_1_impl(&v, &Q_q8[k_KQ_0/WARP_SIZE], K_q8_0[ib].d, Q_d);
+ }
+
+ return sum;
+#else
+ GGML_UNUSED(K_c);
+ GGML_UNUSED(Q_v);
+ GGML_UNUSED(Q_q8);
+ GGML_UNUSED(Q_ds_v);
+ NO_DEVICE_CODE;
+#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
+}
+
+template
+static __device__ __forceinline__ T vec_dot_fattn_vec_KQ_f16(
+ const char * __restrict__ K_c, const void * __restrict__ Q_v, const int * __restrict__ Q_q8 , const void * __restrict__ Q_ds_v) {
+
+ const half2 * K_h2 = (const half2 *) K_c;
+ GGML_UNUSED(Q_q8);
+ GGML_UNUSED(Q_ds_v);
+
+#if FP16_AVAILABLE
+ if (std::is_same::value) {
+ const half2 * Q_h2 = (const half2 *) Q_v;
+
+ half2 sum2 = make_half2(0.0f, 0.0f);
+
+#pragma unroll
+ for (int k_KQ_0 = 0; k_KQ_0 < D/2; k_KQ_0 += WARP_SIZE) {
+ const int k_KQ = k_KQ_0 + threadIdx.x;
+
+ const half2 K_ik = K_h2[k_KQ];
+ sum2 += K_ik * Q_h2[k_KQ_0/WARP_SIZE];
+ }
+
+ return __low2half(sum2) + __high2half(sum2);
+ }
+#endif // FP16_AVAILABLE
+
+ const float2 * Q_f2 = (const float2 *) Q_v;
+
+ float sum = 0.0f;
+
+#pragma unroll
+ for (int k_KQ_0 = 0; k_KQ_0 < D/2; k_KQ_0 += WARP_SIZE) {
+ const int k_KQ = k_KQ_0 + threadIdx.x;
+
+ const half2 K_ik = K_h2[k_KQ];
+ sum += __low2float(K_ik) * Q_f2[k_KQ_0/WARP_SIZE].x;
+ sum += __high2float(K_ik) * Q_f2[k_KQ_0/WARP_SIZE].y;
+ }
+
+ return sum;
+}
+
+template
+static __device__ __forceinline__ void quantize_q8_1_to_shared(
+ const float * __restrict__ x, const float scale, int * __restrict__ yq32, void * __restrict__ yds) {
+
+ float vals[sizeof(int)] = {0.0f};
+#pragma unroll
+ for (int l = 0; l < sizeof(int); ++l) {
+ vals[l] = scale * x[4*threadIdx.x + l];
+ }
+
+ float amax = fabsf(vals[0]);
+ float sum = vals[0];
+#pragma unroll
+ for (int l = 1; l < sizeof(int); ++l) {
+ amax = fmaxf(amax, fabsf(vals[l]));
+ sum += vals[l];
+ }
+#pragma unroll
+ for (int mask = QI8_1/2; mask > 0; mask >>= 1) {
+ amax = fmaxf(amax, __shfl_xor_sync(0xFFFFFFFF, amax, mask, 32));
+ sum += __shfl_xor_sync(0xFFFFFFFF, sum, mask, 32);
+ }
+
+ const float d = amax / 127;
+ int q32 = 0;
+ int8_t * q8 = (int8_t *) &q32;
+
+ if (d != 0.0f) {
+#pragma unroll
+ for (int l = 0; l < sizeof(int); ++l) {
+ q8[l] = roundf(vals[l] / d);
+ }
+ }
+
+ yq32[threadIdx.x] = q32;
+ if (threadIdx.x % QI8_1 == 0) {
+ if (std::is_same::value) {
+ ((half2 *) yds)[threadIdx.x/QI8_1] = make_half2(d, sum);
+ } else {
+ ((float2 *) yds)[threadIdx.x/QI8_1] = make_float2(d, sum);
+ }
+ }
+}
+
+typedef half (*dequantize_1_f16_t)(const void *, const int64_t);
+typedef float (*dequantize_1_f32_t)(const void *, const int64_t);
+
+template
+static __device__ __forceinline__ T dequantize_1_q4_0(const void * __restrict__ vx, const int64_t i) {
+ const block_q4_0 * x = (const block_q4_0 *) vx;
+
+ const int64_t ib = i / QK4_0;
+ const int iqs = i % (QK4_0/2);
+ const int shift = (i % QK4_0) / (QK4_0/2);
+
+ const T d = x[ib].d;
+ const int q0 = x[ib].qs[iqs];
+ const int q = ((q0 >> (4*shift)) & 0x0F) - 8;
+
+#if FP16_AVAILABLE
+ if (std::is_same::value) {
+ return ((half) d)*((half) q);
+ }
+#endif // FP16_AVAILABLE
+
+ return ((float) d)*((float) q);
+}
+
+template
+static __device__ __forceinline__ T dequantize_1_q4_1(const void * __restrict__ vx, const int64_t i) {
+ const block_q4_1 * x = (const block_q4_1 *) vx;
+
+ const int64_t ib = i / QK4_1;
+ const int iqs = i % (QK4_1/2);
+ const int shift = (i % QK4_1) / (QK4_1/2);
+
+ const half2 dm = x[ib].dm;
+ const int q0 = x[ib].qs[iqs];
+ const int q = ((q0 >> (4*shift)) & 0x0F);
+
+#if FP16_AVAILABLE
+ if (std::is_same::value) {
+ return __low2half(dm)*((half) q) + __high2half(dm);
+ }
+#endif // FP16_AVAILABLE
+
+ return __low2float(dm)*((float) q) + __high2float(dm);
+}
+
+template
+static __device__ __forceinline__ T dequantize_1_q5_0(const void * __restrict__ vx, const int64_t i) {
+ const block_q5_0 * x = (const block_q5_0 *) vx;
+
+ const int64_t ib = i / QK5_0;
+ const int idq = i % QK5_0;
+ const int iqs = i % (QK5_0/2);
+ const int shift = (i % QK5_0) / (QK5_0/2);
+
+ const T d = x[ib].d;
+ const int ql0 = x[ib].qs[iqs];
+ const int qh0 = get_int_from_uint8(x[ib].qh, 0);
+ const int ql = ((ql0 >> (4*shift)) & 0x0F);
+ const int qh = ((qh0 >> idq) << 4) & 0x10;
+ const int q = (ql | qh) - 16;
+
+#if FP16_AVAILABLE
+ if (std::is_same::value) {
+ return ((half) d)*((half) q);
+ }
+#endif // FP16_AVAILABLE
+
+ return ((float) d)*((float) q);
+}
+
+template
+static __device__ __forceinline__ T dequantize_1_q5_1(const void * __restrict__ vx, const int64_t i) {
+ const block_q5_1 * x = (const block_q5_1 *) vx;
+
+ const int64_t ib = i / QK5_1;
+ const int idq = i % QK5_1;
+ const int iqs = i % (QK5_1/2);
+ const int shift = (i % QK5_1) / (QK5_1/2);
+
+ const half2 dm = x[ib].dm;
+ const int ql0 = x[ib].qs[iqs];
+ const int qh0 = get_int_from_uint8_aligned(x[ib].qh, 0);
+ const int ql = ((ql0 >> (4*shift)) & 0x0F);
+ const int qh = ((qh0 >> idq) << 4) & 0x10;
+ const int q = (ql | qh);
+
+#if FP16_AVAILABLE
+ if (std::is_same::value) {
+ return __low2half(dm)*((half) q) + __high2half(dm);
+ }
+#endif // FP16_AVAILABLE
+
+ return __low2float(dm)*((float) q) + __high2float(dm);
+}
+
+template
+static __device__ __forceinline__ T dequantize_1_q8_0(const void * __restrict__ vx, const int64_t i) {
+ const block_q8_0 * x = (const block_q8_0 *) vx;
+
+ const int64_t ib = i / QK8_0;
+ const int iqs = i % QK8_0;
+
+ const T d = x[ib].d;
+ const int q = x[ib].qs[iqs];
+
+#if FP16_AVAILABLE
+ if (std::is_same::value) {
+ return ((half) d)*((half) q);
+ }
+#endif // FP16_AVAILABLE
+
+ return ((float) d)*((float) q);
+}
+
+template
+static __device__ __forceinline__ T dequantize_1_f16(const void * __restrict__ vx, const int64_t i) {
+ const half * x = (const half *) vx;
+
+ return x[i];
+}
+
+template
+constexpr __device__ vec_dot_KQ_f16_t get_vec_dot_KQ_f16(ggml_type type_K) {
+ return type_K == GGML_TYPE_Q4_0 ? vec_dot_fattn_vec_KQ_q4_0 :
+ type_K == GGML_TYPE_Q4_1 ? vec_dot_fattn_vec_KQ_q4_1 :
+ type_K == GGML_TYPE_Q5_0 ? vec_dot_fattn_vec_KQ_q5_0 :
+ type_K == GGML_TYPE_Q5_1 ? vec_dot_fattn_vec_KQ_q5_1 :
+ type_K == GGML_TYPE_Q8_0 ? vec_dot_fattn_vec_KQ_q8_0 :
+ type_K == GGML_TYPE_F16 ? vec_dot_fattn_vec_KQ_f16 :
+ nullptr;
+}
+
+template
+constexpr __device__ vec_dot_KQ_f32_t get_vec_dot_KQ_f32(ggml_type type_K) {
+ return type_K == GGML_TYPE_Q4_0 ? vec_dot_fattn_vec_KQ_q4_0 :
+ type_K == GGML_TYPE_Q4_1 ? vec_dot_fattn_vec_KQ_q4_1 :
+ type_K == GGML_TYPE_Q5_0 ? vec_dot_fattn_vec_KQ_q5_0 :
+ type_K == GGML_TYPE_Q5_1 ? vec_dot_fattn_vec_KQ_q5_1 :
+ type_K == GGML_TYPE_Q8_0 ? vec_dot_fattn_vec_KQ_q8_0 :
+ type_K == GGML_TYPE_F16 ? vec_dot_fattn_vec_KQ_f16 :
+ nullptr;
+}
+
+constexpr __device__ dequantize_1_f16_t get_dequantize_1_f16(ggml_type type_V) {
+ return type_V == GGML_TYPE_Q4_0 ? dequantize_1_q4_0 :
+ type_V == GGML_TYPE_Q4_1 ? dequantize_1_q4_1 :
+ type_V == GGML_TYPE_Q5_0 ? dequantize_1_q5_0 :
+ type_V == GGML_TYPE_Q5_1 ? dequantize_1_q5_1 :
+ type_V == GGML_TYPE_Q8_0 ? dequantize_1_q8_0 :
+ type_V == GGML_TYPE_F16 ? dequantize_1_f16 :
+ nullptr;
+}
+
+constexpr __device__ dequantize_1_f32_t get_dequantize_1_f32(ggml_type type_V) {
+ return type_V == GGML_TYPE_Q4_0 ? dequantize_1_q4_0 :
+ type_V == GGML_TYPE_Q4_1 ? dequantize_1_q4_1 :
+ type_V == GGML_TYPE_Q5_0 ? dequantize_1_q5_0 :
+ type_V == GGML_TYPE_Q5_1 ? dequantize_1_q5_1 :
+ type_V == GGML_TYPE_Q8_0 ? dequantize_1_q8_0 :
+ type_V == GGML_TYPE_F16 ? dequantize_1_f16 :
+ nullptr;
+}
+
template // D == head size
#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
__launch_bounds__(D, 1)
@@ -83,8 +599,32 @@ static __global__ void flash_attn_combine_results(
dst[blockIdx.y*D + tid] = VKQ_numerator / VKQ_denominator;
}
+static void on_no_fattn_vec_case(const int D) {
+ if (D == 64) {
+ fprintf(stderr, "Unsupported KV type combination for head_size 64.\n");
+ fprintf(stderr, "By default only f16 KV cache is supported.\n");
+ fprintf(stderr, "Compile with LLAMA_CUDA_FA_ALL_QUANTS for V cache quantization support.\n");
+ GGML_ASSERT(false);
+ } else if (D == 128) {
+ fprintf(stderr, "Unsupported KV type combination for head_size 128.\n");
+ fprintf(stderr, "Supported combinations:\n");
+ fprintf(stderr, " - K == q4_0, V == q4_0, 4.50 BPV\n");
+ fprintf(stderr, " - K == q8_0, V == q8_0, 8.50 BPV\n");
+ fprintf(stderr, " - K == f16, V == f16, 16.00 BPV\n");
+ fprintf(stderr, "Compile with LLAMA_CUDA_FA_ALL_QUANTS for all combinations of q4_0, q4_1, q5_0, q5_1, q8_0, and f16.\n");
+ GGML_ASSERT(false);
+ } else {
+ fprintf(stderr, "Unsupported KV type combination for head_size 256.\n");
+ fprintf(stderr, "Only f16 is supported.\n");
+ GGML_ASSERT(false);
+ }
+}
+
template
-void launch_fattn(ggml_backend_cuda_context & ctx, ggml_tensor * dst, fattn_kernel_t fattn_kernel, int nwarps, int cols_per_block) {
+void launch_fattn(
+ ggml_backend_cuda_context & ctx, ggml_tensor * dst, fattn_kernel_t fattn_kernel,
+ const int nwarps, const int cols_per_block, const bool need_f16_K, const bool need_f16_V
+) {
const ggml_tensor * Q = dst->src[0];
const ggml_tensor * K = dst->src[1];
const ggml_tensor * V = dst->src[2];
@@ -94,8 +634,6 @@ void launch_fattn(ggml_backend_cuda_context & ctx, ggml_tensor * dst, fattn_kern
ggml_tensor * KQV = dst;
GGML_ASSERT(Q->type == GGML_TYPE_F32);
- GGML_ASSERT(K->type == GGML_TYPE_F16);
- GGML_ASSERT(V->type == GGML_TYPE_F16);
GGML_ASSERT(KQV->type == GGML_TYPE_F32);
GGML_ASSERT(!mask || mask->type == GGML_TYPE_F16);
@@ -107,9 +645,49 @@ void launch_fattn(ggml_backend_cuda_context & ctx, ggml_tensor * dst, fattn_kern
ggml_cuda_pool & pool = ctx.pool();
cudaStream_t main_stream = ctx.stream();
+ ggml_cuda_pool_alloc K_f16(pool);
+ ggml_cuda_pool_alloc V_f16(pool);
ggml_cuda_pool_alloc dst_tmp(pool);
ggml_cuda_pool_alloc dst_tmp_meta(pool);
+ char * K_data = (char *) K->data;
+ size_t nb11 = K->nb[1];
+ size_t nb12 = K->nb[2];
+ size_t nb13 = K->nb[3];
+
+ char * V_data = (char *) V->data;
+ size_t nb21 = V->nb[1];
+ size_t nb22 = V->nb[2];
+ size_t nb23 = V->nb[3];
+
+ if (need_f16_K && K->type != GGML_TYPE_F16) {
+ K_f16.alloc(ggml_nelements(K));
+ to_fp16_cuda_t to_fp16 = ggml_get_to_fp16_cuda(K->type);
+ to_fp16(K_data, K_f16.ptr, ggml_nelements(K), main_stream);
+ K_data = (char *) K_f16.ptr;
+
+ const size_t bs = ggml_blck_size(K->type);
+ const size_t ts = ggml_type_size(K->type);
+
+ nb11 = nb11*bs*sizeof(half)/ts;
+ nb12 = nb12*bs*sizeof(half)/ts;
+ nb13 = nb13*bs*sizeof(half)/ts;
+ }
+
+ if (need_f16_V && V->type != GGML_TYPE_F16) {
+ V_f16.alloc(ggml_nelements(V));
+ to_fp16_cuda_t to_fp16 = ggml_get_to_fp16_cuda(V->type);
+ to_fp16(V_data, V_f16.ptr, ggml_nelements(V), main_stream);
+ V_data = (char *) V_f16.ptr;
+
+ const size_t bs = ggml_blck_size(V->type);
+ const size_t ts = ggml_type_size(V->type);
+
+ nb21 = nb21*bs*sizeof(half)/ts;
+ nb22 = nb22*bs*sizeof(half)/ts;
+ nb23 = nb23*bs*sizeof(half)/ts;
+ }
+
if (parallel_blocks > 1) {
dst_tmp.alloc(parallel_blocks*ggml_nelements(KQV));
dst_tmp_meta.alloc(parallel_blocks*ggml_nrows(KQV));
@@ -133,8 +711,8 @@ void launch_fattn(ggml_backend_cuda_context & ctx, ggml_tensor * dst, fattn_kern
fattn_kernel<<>>(
(const char *) Q->data,
- (const char *) K->data,
- (const char *) V->data,
+ K_data,
+ V_data,
mask ? ((const char *) mask->data) : nullptr,
(parallel_blocks) == 1 ? (float *) KQV->data : dst_tmp.ptr, dst_tmp_meta.ptr,
scale, max_bias, m0, m1, n_head_log2,
@@ -142,7 +720,8 @@ void launch_fattn(ggml_backend_cuda_context & ctx, ggml_tensor * dst, fattn_kern
K->ne[0], K->ne[1], K->ne[2], K->ne[3],
mask ? mask->ne[1] : 0, mask ? mask->nb[1] : 0,
Q->nb[1], Q->nb[2], Q->nb[3],
- K->nb[1], K->nb[2], K->nb[3],
+ nb11, nb12, nb13,
+ nb21, nb22, nb23,
KQV->ne[0], KQV->ne[1], KQV->ne[2], KQV->ne[3]
);
CUDA_CHECK(cudaGetLastError());
diff --git a/ggml-cuda/fattn-tile-f16.cu b/ggml-cuda/fattn-tile-f16.cu
index cdb5eaff7..cb11d7212 100644
--- a/ggml-cuda/fattn-tile-f16.cu
+++ b/ggml-cuda/fattn-tile-f16.cu
@@ -36,6 +36,9 @@ static __global__ void flash_attn_tile_ext_f16(
const int nb11,
const int nb12,
const int nb13,
+ const int nb21,
+ const int nb22,
+ const int nb23,
const int ne0,
const int ne1,
const int ne2,
@@ -275,13 +278,13 @@ void launch_fattn_tile_f16_64_128(ggml_backend_cuda_context & ctx, ggml_tensor *
constexpr int D = 64;
constexpr int nwarps = 8;
fattn_kernel_t fattn_kernel = flash_attn_tile_ext_f16;
- launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block);
+ launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block, true, true);
} break;
case 128: {
constexpr int D = 128;
constexpr int nwarps = 8;
fattn_kernel_t fattn_kernel = flash_attn_tile_ext_f16;
- launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block);
+ launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block, true, true);
} break;
default: {
GGML_ASSERT(false && "FlashAttention without tensor cores only supports head sizes 64 and 128.");
diff --git a/ggml-cuda/fattn-tile-f32.cu b/ggml-cuda/fattn-tile-f32.cu
index 5a3de2918..15e22f495 100644
--- a/ggml-cuda/fattn-tile-f32.cu
+++ b/ggml-cuda/fattn-tile-f32.cu
@@ -36,6 +36,9 @@ static __global__ void flash_attn_tile_ext_f32(
const int nb11,
const int nb12,
const int nb13,
+ const int nb21,
+ const int nb22,
+ const int nb23,
const int ne0,
const int ne1,
const int ne2,
@@ -272,13 +275,13 @@ void launch_fattn_tile_f32_64_128(ggml_backend_cuda_context & ctx, ggml_tensor *
constexpr int D = 64;
constexpr int nwarps = 8;
fattn_kernel_t fattn_kernel = flash_attn_tile_ext_f32;
- launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block);
+ launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block, true, true);
} break;
case 128: {
constexpr int D = 128;
constexpr int nwarps = 8;
fattn_kernel_t fattn_kernel = flash_attn_tile_ext_f32;
- launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block);
+ launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block, true, true);
} break;
default: {
GGML_ASSERT(false && "FlashAttention without tensor cores only supports head sizes 64 and 128.");
diff --git a/ggml-cuda/fattn-vec-f16.cu b/ggml-cuda/fattn-vec-f16.cu
deleted file mode 100644
index 808e8f362..000000000
--- a/ggml-cuda/fattn-vec-f16.cu
+++ /dev/null
@@ -1,330 +0,0 @@
-#include "common.cuh"
-#include "fattn-common.cuh"
-#include "fattn-vec-f16.cuh"
-
-template // D == head size
-#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
-__launch_bounds__(D, 1)
-#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
-static __global__ void flash_attn_vec_ext_f16(
- const char * __restrict__ Q,
- const char * __restrict__ K,
- const char * __restrict__ V,
- const char * __restrict__ mask,
- float * __restrict__ dst,
- float2 * __restrict__ dst_meta,
- const float scale,
- const float max_bias,
- const float m0,
- const float m1,
- const uint32_t n_head_log2,
- const int ne00,
- const int ne01,
- const int ne02,
- const int ne03,
- const int ne10,
- const int ne11,
- const int ne12,
- const int ne13,
- const int ne31,
- const int nb31,
- const int nb01,
- const int nb02,
- const int nb03,
- const int nb11,
- const int nb12,
- const int nb13,
- const int ne0,
- const int ne1,
- const int ne2,
- const int ne3) {
-#if FP16_AVAILABLE
- //In this kernel Q, K, V are matrices while i, j, k are matrix indices.
-
- const int ic0 = (blockIdx.x / parallel_blocks) * ncols; // Index of the Q/QKV column to work on.
- const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel.
-
- const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix.
- const float2 * Q_f2 = (const float2 *) (Q + nb02* blockIdx.y + nb01*ic0);
- const half2 * K_h2 = (const half2 *) (K + nb12*(blockIdx.y / gqa_ratio));
- const half * V_h = (const half *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape
- const half * maskh = (const half *) mask + ne11*ic0;
-
- const int stride_KV = nb11 / sizeof(half);
- const int stride_KV2 = nb11 / sizeof(half2);
-
- const float slopef = get_alibi_slope(max_bias, blockIdx.y, n_head_log2, m0, m1);
- const half slopeh = __float2half(slopef);
-
- static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64.");
- constexpr int nwarps = D / WARP_SIZE;
- const int tid = WARP_SIZE*threadIdx.y + threadIdx.x;
- __builtin_assume(tid < D);
-
- __shared__ half KQ[ncols*D];
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
- KQ[j*D + tid] = -HALF_MAX_HALF;
- }
- half2 * KQ2 = (half2 *) KQ;
-
- half kqmax[ncols];
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
- kqmax[j] = -HALF_MAX_HALF;
- }
- half kqsum[ncols] = {0.0f};
-
- __shared__ half kqmax_shared[ncols][WARP_SIZE];
- __shared__ half kqsum_shared[ncols][WARP_SIZE];
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
- if (threadIdx.y == 0) {
- kqmax_shared[j][threadIdx.x] = -HALF_MAX_HALF;
- kqsum_shared[j][threadIdx.x] = 0.0f;
- }
- }
- __syncthreads();
-
- // Convert Q to half2 and store in registers:
- half2 Q_h2[ncols][D/(2*WARP_SIZE)];
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
-#pragma unroll
- for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) {
- const int i = i0 + threadIdx.x;
-
- const float2 tmp = ncols <= 2 || ic0 + j < ne01 ? Q_f2[j*(nb01/sizeof(float2)) + i] : make_float2(0.0f, 0.0f);
- Q_h2[j][i0/WARP_SIZE] = make_half2(scale, scale) * make_half2(tmp.x, tmp.y);
- }
- }
-
- half2 VKQ[ncols] = {{0.0f, 0.0f}};
-
- const int k_start = parallel_blocks == 1 ? 0 : ip*D;
- for (int k_VKQ_0 = k_start; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*D) {
- // Calculate KQ tile and keep track of new maximum KQ values:
-
- // For unknown reasons using a half array of size 1 for kqmax_new causes a performance regression,
- // see https://github.com/ggerganov/llama.cpp/pull/7061 .
- // Therefore this variable is defined twice but only used once (so that the compiler can optimize out the unused variable).
- half kqmax_new = kqmax[0];
- half kqmax_new_arr[ncols];
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
- kqmax_new_arr[j] = kqmax[j];
- }
-
-#pragma unroll
- for (int i_KQ_0 = 0; i_KQ_0 < D; i_KQ_0 += nwarps) {
- const int i_KQ = i_KQ_0 + threadIdx.y;
-
- if ((i_KQ_0 + nwarps > D && i_KQ >= D) || (FATTN_KQ_STRIDE % D != 0 && k_VKQ_0 + i_KQ >= ne11)) {
- break;
- }
-
- half2 sum2[ncols] = {{0.0f, 0.0f}};
-#pragma unroll
- for (int k_KQ_0 = 0; k_KQ_0 < D/2; k_KQ_0 += WARP_SIZE) {
- const int k_KQ = k_KQ_0 + threadIdx.x;
-
- const half2 K_ik = K_h2[(k_VKQ_0 + i_KQ)*stride_KV2 + k_KQ];
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
- sum2[j] += K_ik * Q_h2[j][k_KQ_0/WARP_SIZE];
- }
- }
-
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
- sum2[j] = warp_reduce_sum(sum2[j]);
- half sum = __low2half(sum2[j]) + __high2half(sum2[j]);
- sum += mask ? slopeh*maskh[j*ne11 + k_VKQ_0 + i_KQ] : __float2half(0.0f);
-
- if (ncols == 1) {
- kqmax_new = ggml_cuda_hmax(kqmax_new, sum);
- } else {
- kqmax_new_arr[j] = ggml_cuda_hmax(kqmax_new_arr[j], sum);
- }
-
- if (threadIdx.x == 0) {
- KQ[j*D + i_KQ] = sum;
- }
- }
- }
-
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
- half kqmax_new_j = ncols == 1 ? kqmax_new : kqmax_new_arr[j];
-
- kqmax_new_j = warp_reduce_max(kqmax_new_j);
- if (threadIdx.x == 0) {
- kqmax_shared[j][threadIdx.y] = kqmax_new_j;
- }
- }
-
- __syncthreads();
-
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
- half kqmax_new_j = kqmax_shared[j][threadIdx.x];
- kqmax_new_j = warp_reduce_max(kqmax_new_j);
-
- const half KQ_max_scale = hexp(kqmax[j] - kqmax_new_j);
- kqmax[j] = kqmax_new_j;
-
- const half val = hexp(KQ[j*D + tid] - kqmax[j]);
- kqsum[j] = kqsum[j]*KQ_max_scale + val;
- KQ[j*D + tid] = val;
-
- VKQ[j] *= __half2half2(KQ_max_scale);
- }
-
- __syncthreads();
-
-#pragma unroll
- for (int k0 = 0; k0 < D; k0 += 2) {
- if (FATTN_KQ_STRIDE % D != 0 && k_VKQ_0 + k0 >= ne11) {
- break;
- }
-
- half2 V_k;
- reinterpret_cast(V_k.x) = V_h[(k_VKQ_0 + k0 + 0)*stride_KV + tid];
- reinterpret_cast(V_k.y) = V_h[(k_VKQ_0 + k0 + 1)*stride_KV + tid];
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
- VKQ[j] += V_k*KQ2[j*(D/2) + k0/2];
- }
- }
-
- __syncthreads();
- }
-
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
- kqsum[j] = warp_reduce_sum(kqsum[j]);
- if (threadIdx.x == 0) {
- kqsum_shared[j][threadIdx.y] = kqsum[j];
- }
- }
-
- __syncthreads();
-
-#pragma unroll
- for (int j_VKQ = 0; j_VKQ < ncols; ++j_VKQ) {
- if (ncols > 2 && ic0 + j_VKQ >= ne01) {
- break;
- }
-
- kqsum[j_VKQ] = kqsum_shared[j_VKQ][threadIdx.x];
- kqsum[j_VKQ] = warp_reduce_sum(kqsum[j_VKQ]);
-
- half dst_val = (__low2half(VKQ[j_VKQ]) + __high2half(VKQ[j_VKQ]));
- if (parallel_blocks == 1) {
- dst_val /= kqsum[j_VKQ];
- }
- const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip;
- dst[j_dst*D*gridDim.y + D*blockIdx.y + tid] = dst_val;
- }
-
- if (parallel_blocks != 1 && tid < ncols && (ncols <= 2 || ic0 + tid < ne01)) {
- dst_meta[(ic0 + tid)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[tid], kqsum[tid]);
- }
-#else
- NO_DEVICE_CODE;
-#endif // FP16_AVAILABLE
-}
-
-void ggml_cuda_flash_attn_ext_vec_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
- ggml_tensor * KQV = dst;
- ggml_tensor * Q = dst->src[0];
-
- const int32_t precision = KQV->op_params[2];
- GGML_ASSERT(precision == GGML_PREC_DEFAULT);
-
- constexpr int cols_per_block = 1;
- constexpr int parallel_blocks = 4;
- switch (Q->ne[0]) {
- case 64: {
- constexpr int D = 64;
- constexpr int nwarps = D/WARP_SIZE;
- fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f16;
- launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block);
- } break;
- case 128: {
- constexpr int D = 128;
- constexpr int nwarps = D/WARP_SIZE;
- fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f16;
- launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block);
- } break;
- case 256: {
- constexpr int D = 256;
- constexpr int nwarps = D/WARP_SIZE;
- fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f16;
- launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block);
- } break;
- default:
- GGML_ASSERT(false);
- break;
- }
-}
-
-template
-void launch_fattn_vec_f16_64_128(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
- const ggml_tensor * Q = dst->src[0];
- switch (Q->ne[0]) {
- case 64: {
- constexpr int D = 64;
- constexpr int nwarps = D/WARP_SIZE;
- fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f16;
- launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block);
- } break;
- case 128: {
- constexpr int D = 128;
- constexpr int nwarps = D/WARP_SIZE;
- fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f16;
- launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block);
- } break;
- default: {
- GGML_ASSERT(false && "FlashAttention without tensor cores only supports head sizes 64 and 128.");
- } break;
- }
-}
-
-void ggml_cuda_flash_attn_ext_vec_f16_no_mma(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
- const ggml_tensor * KQV = dst;
- const ggml_tensor * Q = dst->src[0];
-
- const int32_t precision = KQV->op_params[2];
- GGML_ASSERT(precision == GGML_PREC_DEFAULT);
-
- if (Q->ne[1] == 1) {
- ggml_cuda_flash_attn_ext_vec_f16(ctx, dst);
- return;
- }
-
- if (Q->ne[1] == 2) {
- constexpr int cols_per_block = 2;
- constexpr int parallel_blocks = 4;
- launch_fattn_vec_f16_64_128(ctx, dst);
- return;
- }
-
- if (Q->ne[1] <= 4) {
- constexpr int cols_per_block = 4;
- constexpr int parallel_blocks = 4;
- launch_fattn_vec_f16_64_128(ctx, dst);
- return;
- }
-
- if (Q->ne[1] <= 8) {
- constexpr int cols_per_block = 8;
- constexpr int parallel_blocks = 4;
- launch_fattn_vec_f16_64_128(ctx, dst);
- return;
- }
-
- constexpr int cols_per_block = 8;
- constexpr int parallel_blocks = 1;
- launch_fattn_vec_f16_64_128(ctx, dst);
-}
diff --git a/ggml-cuda/fattn-vec-f16.cuh b/ggml-cuda/fattn-vec-f16.cuh
index c7023610a..9e1aa2c6b 100644
--- a/ggml-cuda/fattn-vec-f16.cuh
+++ b/ggml-cuda/fattn-vec-f16.cuh
@@ -1,5 +1,397 @@
#include "common.cuh"
+#include "fattn-common.cuh"
-void ggml_cuda_flash_attn_ext_vec_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
+template // D == head size
+#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
+__launch_bounds__(D, 1)
+#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
+static __global__ void flash_attn_vec_ext_f16(
+ const char * __restrict__ Q,
+ const char * __restrict__ K,
+ const char * __restrict__ V,
+ const char * __restrict__ mask,
+ float * __restrict__ dst,
+ float2 * __restrict__ dst_meta,
+ const float scale,
+ const float max_bias,
+ const float m0,
+ const float m1,
+ const uint32_t n_head_log2,
+ const int ne00,
+ const int ne01,
+ const int ne02,
+ const int ne03,
+ const int ne10,
+ const int ne11,
+ const int ne12,
+ const int ne13,
+ const int ne31,
+ const int nb31,
+ const int nb01,
+ const int nb02,
+ const int nb03,
+ const int nb11,
+ const int nb12,
+ const int nb13,
+ const int nb21,
+ const int nb22,
+ const int nb23,
+ const int ne0,
+ const int ne1,
+ const int ne2,
+ const int ne3) {
+#if FP16_AVAILABLE
+ //In this kernel Q, K, V are matrices while i, j, k are matrix indices.
-void ggml_cuda_flash_attn_ext_vec_f16_no_mma(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
+ constexpr vec_dot_KQ_f16_t vec_dot_KQ = get_vec_dot_KQ_f16(type_K);
+ constexpr bool Q_q8_1 = type_K != GGML_TYPE_F16;
+ constexpr dequantize_1_f16_t dequantize_1_v = get_dequantize_1_f16(type_V);
+
+ const int ic0 = (blockIdx.x / parallel_blocks) * ncols; // Index of the Q/QKV column to work on.
+ const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel.
+
+ const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix.
+ Q += nb02* blockIdx.y + nb01*ic0;
+ K += nb12*(blockIdx.y / gqa_ratio);
+ V += nb22*(blockIdx.y / gqa_ratio);
+
+ const half * maskh = (const half *) mask + ne11*ic0;
+
+ const float slopef = get_alibi_slope(max_bias, blockIdx.y, n_head_log2, m0, m1);
+ const half slopeh = __float2half(slopef);
+
+ static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64.");
+ constexpr int nwarps = D / WARP_SIZE;
+ const int tid = WARP_SIZE*threadIdx.y + threadIdx.x;
+ __builtin_assume(tid < D);
+
+ __shared__ half KQ[ncols*D];
+ half2 * KQ2 = (half2 *) KQ;
+
+ half kqmax[ncols];
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ kqmax[j] = -HALF_MAX_HALF;
+ }
+ half kqsum[ncols] = {0.0f};
+
+ __shared__ half kqmax_shared[ncols][WARP_SIZE];
+ __shared__ half kqsum_shared[ncols][WARP_SIZE];
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ if (threadIdx.y == 0) {
+ kqmax_shared[j][threadIdx.x] = -HALF_MAX_HALF;
+ kqsum_shared[j][threadIdx.x] = 0.0f;
+ }
+ }
+ __syncthreads();
+
+ // Convert Q to half2 (f16 K) or q8_1 (quantized K) and store in registers:
+ half2 Q_h2[ncols][D/(2*WARP_SIZE)];
+ int Q_i32[ncols][D/(sizeof(int)*QK8_1) == 0 ? 1 : D/(sizeof(int)*QK8_1)];
+ half2 Q_ds[ncols][D/QK8_1 == 0 ? 1 : D/QK8_1];
+ if (Q_q8_1) {
+#pragma unroll
+ for (int j0 = 0; j0 < ncols; j0 += nwarps) {
+ const int j = j0 + threadIdx.y;
+
+ if (j0 + nwarps > ncols && j >= ncols) {
+ break;
+ }
+
+ // Reuse KQ as temporary storage for converting Q to q8_1:
+ int * tmp_q_i32 = (int *) &KQ[j*D];
+ half2 * tmp_q_ds = (half2 *) (tmp_q_i32 + D/sizeof(int));
+
+ // Set memory to zero if out of bounds:
+ if (ncols > 2 && ic0 + j >= ne01) {
+#pragma unroll
+ for (int i0 = 0; i0 < D/sizeof(int); i0 += WARP_SIZE) {
+ const int i = i0 + threadIdx.x;
+
+ tmp_q_i32[i] = 0;
+ }
+ if (threadIdx.x < D/QK8_1) {
+ tmp_q_ds[threadIdx.x] = make_half2(0.0f, 0.0f);
+ }
+ continue;
+ }
+
+ const float * Q_f = (const float *) (Q + j*nb01);
+#pragma unroll
+ for (int i0 = 0; i0 < D/sizeof(int); i0 += WARP_SIZE) {
+ quantize_q8_1_to_shared(Q_f + 4*i0, scale, tmp_q_i32, tmp_q_ds);
+ }
+ }
+
+ __syncthreads();
+
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ int * tmp_q_i32 = (int *) &KQ[j*D];
+ half2 * tmp_q_ds = (half2 *) (tmp_q_i32 + D/sizeof(int));
+
+#pragma unroll
+ for (int i0 = 0; i0 < D/sizeof(int); i0 += WARP_SIZE) {
+ const int i = i0 + threadIdx.x;
+
+ Q_i32[j][i0/WARP_SIZE] = tmp_q_i32[i];
+ Q_ds[j][i0/WARP_SIZE] = tmp_q_ds[i/QI8_1];
+ }
+ }
+
+ __syncthreads();
+ } else {
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ const float2 * Q_f2_j = (const float2 *) (Q + j*nb01);
+
+#pragma unroll
+ for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) {
+ const int i = i0 + threadIdx.x;
+
+ const float2 tmp = ncols <= 2 || ic0 + j < ne01 ? Q_f2_j[i] : make_float2(0.0f, 0.0f);
+ Q_h2[j][i0/WARP_SIZE] = make_half2(scale, scale) * make_half2(tmp.x, tmp.y);
+ }
+ }
+ }
+
+
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ KQ[j*D + tid] = -HALF_MAX_HALF;
+ }
+
+ half2 VKQ[ncols] = {{0.0f, 0.0f}};
+
+ const int k_start = parallel_blocks == 1 ? 0 : ip*D;
+ for (int k_VKQ_0 = k_start; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*D) {
+ // Calculate KQ tile and keep track of new maximum KQ values:
+
+ // For unknown reasons using a half array of size 1 for kqmax_new causes a performance regression,
+ // see https://github.com/ggerganov/llama.cpp/pull/7061 .
+ // Therefore this variable is defined twice but only used once (so that the compiler can optimize out the unused variable).
+ half kqmax_new = kqmax[0];
+ half kqmax_new_arr[ncols];
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ kqmax_new_arr[j] = kqmax[j];
+ }
+
+#pragma unroll
+ for (int i_KQ_0 = 0; i_KQ_0 < D; i_KQ_0 += nwarps) {
+ const int i_KQ = i_KQ_0 + threadIdx.y;
+
+ if ((i_KQ_0 + nwarps > D && i_KQ >= D) || (FATTN_KQ_STRIDE % D != 0 && k_VKQ_0 + i_KQ >= ne11)) {
+ break;
+ }
+
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ half sum = vec_dot_KQ(K + (k_VKQ_0 + i_KQ)*nb11, Q_h2[j], Q_i32[j], Q_ds[j]);
+ sum = warp_reduce_sum(sum);
+ sum += mask ? slopeh*maskh[j*ne11 + k_VKQ_0 + i_KQ] : __float2half(0.0f);
+
+ if (ncols == 1) {
+ kqmax_new = ggml_cuda_hmax(kqmax_new, sum);
+ } else {
+ kqmax_new_arr[j] = ggml_cuda_hmax(kqmax_new_arr[j], sum);
+ }
+
+ if (threadIdx.x == 0) {
+ KQ[j*D + i_KQ] = sum;
+ }
+ }
+ }
+
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ half kqmax_new_j = ncols == 1 ? kqmax_new : kqmax_new_arr[j];
+
+ kqmax_new_j = warp_reduce_max(kqmax_new_j);
+ if (threadIdx.x == 0) {
+ kqmax_shared[j][threadIdx.y] = kqmax_new_j;
+ }
+ }
+
+ __syncthreads();
+
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ half kqmax_new_j = kqmax_shared[j][threadIdx.x];
+ kqmax_new_j = warp_reduce_max(kqmax_new_j);
+
+ const half KQ_max_scale = hexp(kqmax[j] - kqmax_new_j);
+ kqmax[j] = kqmax_new_j;
+
+ const half val = hexp(KQ[j*D + tid] - kqmax[j]);
+ kqsum[j] = kqsum[j]*KQ_max_scale + val;
+ KQ[j*D + tid] = val;
+
+ VKQ[j] *= __half2half2(KQ_max_scale);
+ }
+
+ __syncthreads();
+
+#pragma unroll
+ for (int k0 = 0; k0 < D; k0 += 2) {
+ if (FATTN_KQ_STRIDE % D != 0 && k_VKQ_0 + k0 >= ne11) {
+ break;
+ }
+
+ half2 V_k;
+ reinterpret_cast(V_k.x) = dequantize_1_v(V + (k_VKQ_0 + k0 + 0)*nb21, tid);
+ reinterpret_cast(V_k.y) = dequantize_1_v(V + (k_VKQ_0 + k0 + 1)*nb21, tid);
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ VKQ[j] += V_k*KQ2[j*(D/2) + k0/2];
+ }
+ }
+
+ __syncthreads();
+ }
+
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ kqsum[j] = warp_reduce_sum(kqsum[j]);
+ if (threadIdx.x == 0) {
+ kqsum_shared[j][threadIdx.y] = kqsum[j];
+ }
+ }
+
+ __syncthreads();
+
+#pragma unroll
+ for (int j_VKQ = 0; j_VKQ < ncols; ++j_VKQ) {
+ if (ncols > 2 && ic0 + j_VKQ >= ne01) {
+ break;
+ }
+
+ kqsum[j_VKQ] = kqsum_shared[j_VKQ][threadIdx.x];
+ kqsum[j_VKQ] = warp_reduce_sum(kqsum[j_VKQ]);
+
+ half dst_val = (__low2half(VKQ[j_VKQ]) + __high2half(VKQ[j_VKQ]));
+ if (parallel_blocks == 1) {
+ dst_val /= kqsum[j_VKQ];
+ }
+ const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip;
+ dst[j_dst*D*gridDim.y + D*blockIdx.y + tid] = dst_val;
+ }
+
+ if (parallel_blocks != 1 && tid < ncols && (ncols <= 2 || ic0 + tid < ne01)) {
+ dst_meta[(ic0 + tid)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[tid], kqsum[tid]);
+ }
+#else
+ NO_DEVICE_CODE;
+#endif // FP16_AVAILABLE
+}
+
+template
+void ggml_cuda_flash_attn_ext_vec_f16_case_impl(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
+ constexpr int nwarps = D/WARP_SIZE;
+ fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f16;
+ constexpr bool need_f16_K = D != 128;
+ constexpr bool need_f16_V = D != 128 && D != 64;
+ launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block, need_f16_K, need_f16_V);
+}
+
+template
+void ggml_cuda_flash_attn_ext_vec_f16_case(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
+ ggml_tensor * KQV = dst;
+ ggml_tensor * Q = dst->src[0];
+ ggml_tensor * K = dst->src[1];
+ ggml_tensor * V = dst->src[2];
+
+ const int32_t precision = KQV->op_params[2];
+ GGML_ASSERT(precision == GGML_PREC_DEFAULT);
+
+ GGML_ASSERT(K->type == type_K);
+ GGML_ASSERT(V->type == type_V);
+
+ if (Q->ne[1] == 1) {
+ constexpr int cols_per_block = 1;
+ constexpr int parallel_blocks = 4;
+ ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst);
+ return;
+ }
+
+ if (Q->ne[1] == 2) {
+ constexpr int cols_per_block = 2;
+ constexpr int parallel_blocks = 4;
+ ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst);
+ return;
+ }
+
+ if (Q->ne[1] <= 4) {
+ constexpr int cols_per_block = 4;
+ constexpr int parallel_blocks = 4;
+ ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst);
+ return;
+ }
+
+ if (Q->ne[1] <= 8) {
+ constexpr int cols_per_block = 8;
+ constexpr int parallel_blocks = 4;
+ ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst);
+ return;
+ }
+
+ constexpr int cols_per_block = 8;
+ constexpr int parallel_blocks = 1;
+ ggml_cuda_flash_attn_ext_vec_f16_case_impl(ctx, dst);
+}
+
+#define DECL_FATTN_VEC_F16_CASE(D, type_K, type_V) \
+ template void ggml_cuda_flash_attn_ext_vec_f16_case \
+ (ggml_backend_cuda_context & ctx, ggml_tensor * dst) \
+
+extern DECL_FATTN_VEC_F16_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q4_0);
+extern DECL_FATTN_VEC_F16_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q4_1);
+extern DECL_FATTN_VEC_F16_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q5_0);
+extern DECL_FATTN_VEC_F16_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q5_1);
+extern DECL_FATTN_VEC_F16_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q8_0);
+extern DECL_FATTN_VEC_F16_CASE( 64, GGML_TYPE_F16, GGML_TYPE_F16);
+
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_0);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_0);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_0);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_0);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_0);
+
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_1);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_1);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_1);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_1);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_1);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_1);
+
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_0);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_0);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_0);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_0);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_0);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_0);
+
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_1);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_1);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_1);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_1);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_1);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_1);
+
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q8_0);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q8_0);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q8_0);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q8_0);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q8_0);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q8_0);
+
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_F16);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_F16);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_F16);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_F16);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_F16);
+extern DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_F16);
+
+extern DECL_FATTN_VEC_F16_CASE(256, GGML_TYPE_F16, GGML_TYPE_F16);
diff --git a/ggml-cuda/fattn-vec-f32.cu b/ggml-cuda/fattn-vec-f32.cu
deleted file mode 100644
index b4652301b..000000000
--- a/ggml-cuda/fattn-vec-f32.cu
+++ /dev/null
@@ -1,279 +0,0 @@
-#include "common.cuh"
-#include "fattn-common.cuh"
-#include "fattn-vec-f32.cuh"
-
-template // D == head size
-#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
-__launch_bounds__(D, 1)
-#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
-static __global__ void flash_attn_vec_ext_f32(
- const char * __restrict__ Q,
- const char * __restrict__ K,
- const char * __restrict__ V,
- const char * __restrict__ mask,
- float * __restrict__ dst,
- float2 * __restrict__ dst_meta,
- const float scale,
- const float max_bias,
- const float m0,
- const float m1,
- const uint32_t n_head_log2,
- const int ne00,
- const int ne01,
- const int ne02,
- const int ne03,
- const int ne10,
- const int ne11,
- const int ne12,
- const int ne13,
- const int ne31,
- const int nb31,
- const int nb01,
- const int nb02,
- const int nb03,
- const int nb11,
- const int nb12,
- const int nb13,
- const int ne0,
- const int ne1,
- const int ne2,
- const int ne3) {
- //In this kernel Q, K, V are matrices while i, j, k are matrix indices.
-
- const int ic0 = (blockIdx.x / parallel_blocks) * ncols; // Index of the Q/QKV column to work on.
- const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel.
-
- const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix.
- const float2 * Q_f2 = (const float2 *) (Q + nb02* blockIdx.y + nb01*ic0);
- const half2 * K_h2 = (const half2 *) (K + nb12*(blockIdx.y / gqa_ratio));
- const half * V_h = (const half *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape
- const half * maskh = (const half *) mask + ne11*ic0;
-
- const int stride_KV = nb11 / sizeof(half);
- const int stride_KV2 = nb11 / sizeof(half2);
-
- const float slope = get_alibi_slope(max_bias, blockIdx.y, n_head_log2, m0, m1);
-
- static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64.");
- constexpr int nwarps = D / WARP_SIZE;
- const int tid = WARP_SIZE*threadIdx.y + threadIdx.x;
- __builtin_assume(tid < D);
-
- __shared__ float KQ[ncols*D];
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
- KQ[j*D + tid] = -FLT_MAX/2.0f;
- }
-
- float kqmax[ncols];
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
- kqmax[j] = -FLT_MAX/2.0f;
- }
- float kqsum[ncols] = {0.0f};
-
- __shared__ float kqmax_shared[ncols][WARP_SIZE];
- __shared__ float kqsum_shared[ncols][WARP_SIZE];
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
- if (threadIdx.y == 0) {
- kqmax_shared[j][threadIdx.x] = -FLT_MAX/2.0f;
- kqsum_shared[j][threadIdx.x] = 0.0f;
- }
- }
- __syncthreads();
-
- // Convert Q to half2 and store in registers:
- float2 Q_h2[ncols][D/(2*WARP_SIZE)];
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
-#pragma unroll
- for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) {
- const int i = i0 + threadIdx.x;
-
- Q_h2[j][i0/WARP_SIZE] = ncols <= 2 || ic0 + j ? Q_f2[j*(nb01/sizeof(float2)) + i] : make_float2(0.0f, 0.0f);
- Q_h2[j][i0/WARP_SIZE].x *= scale;
- Q_h2[j][i0/WARP_SIZE].y *= scale;
- }
- }
-
- float VKQ[ncols] = {0.0f};
-
- const int k_start = parallel_blocks == 1 ? 0 : ip*D;
- for (int k_VKQ_0 = k_start; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*D) {
- // Calculate KQ tile and keep track of new maximum KQ values:
-
- float kqmax_new_arr[ncols];
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
- kqmax_new_arr[j] = kqmax[j];
- }
-
-#pragma unroll
- for (int i_KQ_0 = 0; i_KQ_0 < D; i_KQ_0 += nwarps) {
- const int i_KQ = i_KQ_0 + threadIdx.y;
-
- if ((i_KQ_0 + nwarps > D && i_KQ >= D) || (FATTN_KQ_STRIDE % D != 0 && k_VKQ_0 + i_KQ >= ne11)) {
- break;
- }
-
- float sum[ncols] = {0.0f};
-#pragma unroll
- for (int k_KQ_0 = 0; k_KQ_0 < D/2; k_KQ_0 += WARP_SIZE) {
- const int k_KQ = k_KQ_0 + threadIdx.x;
-
- const half2 K_ik = K_h2[(k_VKQ_0 + i_KQ)*stride_KV2 + k_KQ];
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
- sum[j] += __low2float(K_ik) * Q_h2[j][k_KQ_0/WARP_SIZE].x;
- sum[j] += __high2float(K_ik) * Q_h2[j][k_KQ_0/WARP_SIZE].y;
- }
- }
-
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
- sum[j] = warp_reduce_sum(sum[j]);
- sum[j] += mask ? slope*__half2float(maskh[j*ne11 + k_VKQ_0 + i_KQ]) : 0.0f;
-
- kqmax_new_arr[j] = fmaxf(kqmax_new_arr[j], sum[j]);
-
- if (threadIdx.x == 0) {
- KQ[j*D + i_KQ] = sum[j];
- }
- }
- }
-
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
- float kqmax_new_j = kqmax_new_arr[j];
-
- kqmax_new_j = warp_reduce_max(kqmax_new_j);
- if (threadIdx.x == 0) {
- kqmax_shared[j][threadIdx.y] = kqmax_new_j;
- }
- }
-
- __syncthreads();
-
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
- float kqmax_new_j = kqmax_shared[j][threadIdx.x];
- kqmax_new_j = warp_reduce_max(kqmax_new_j);
-
- const float KQ_max_scale = expf(kqmax[j] - kqmax_new_j);
- kqmax[j] = kqmax_new_j;
-
- const float val = expf(KQ[j*D + tid] - kqmax[j]);
- kqsum[j] = kqsum[j]*KQ_max_scale + val;
- KQ[j*D + tid] = val;
-
- VKQ[j] *= KQ_max_scale;
- }
-
- __syncthreads();
-
-#pragma unroll
- for (int k = 0; k < D; ++k) {
- if (FATTN_KQ_STRIDE % D != 0 && k_VKQ_0 + k >= ne11) {
- break;
- }
-
- const float V_ki = __half2float(V_h[(k_VKQ_0 + k)*stride_KV + tid]);
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
- VKQ[j] += V_ki*KQ[j*D + k];
- }
- }
-
- __syncthreads();
- }
-
-#pragma unroll
- for (int j = 0; j < ncols; ++j) {
- kqsum[j] = warp_reduce_sum(kqsum[j]);
- if (threadIdx.x == 0) {
- kqsum_shared[j][threadIdx.y] = kqsum[j];
- }
- }
-
- __syncthreads();
-
-#pragma unroll
- for (int j_VKQ = 0; j_VKQ < ncols; ++j_VKQ) {
- if (ncols > 2 && ic0 + j_VKQ >= ne01) {
- break;
- }
-
- kqsum[j_VKQ] = kqsum_shared[j_VKQ][threadIdx.x];
- kqsum[j_VKQ] = warp_reduce_sum(kqsum[j_VKQ]);
-
- float dst_val = VKQ[j_VKQ];
- if (parallel_blocks == 1) {
- dst_val /= kqsum[j_VKQ];
- }
- const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip;
- dst[j_dst*D*gridDim.y + D*blockIdx.y + tid] = dst_val;
- }
-
- if (parallel_blocks != 1 && tid < ncols && (ncols <= 2 || ic0 + tid < ne01)) {
- dst_meta[(ic0 + tid)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[tid], kqsum[tid]);
- }
-}
-
-template
-void launch_fattn_vec_f32_64_128(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
- const ggml_tensor * Q = dst->src[0];
- switch (Q->ne[0]) {
- case 64: {
- constexpr int D = 64;
- constexpr int nwarps = D/WARP_SIZE;
- fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f32;
- launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block);
- } break;
- case 128: {
- constexpr int D = 128;
- constexpr int nwarps = D/WARP_SIZE;
- fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f32;
- launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block);
- } break;
- default: {
- GGML_ASSERT(false && "FlashAttention without tensor cores only supports head sizes 64 and 128.");
- } break;
- }
-}
-
-void ggml_cuda_flash_attn_ext_vec_f32(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
- const ggml_tensor * Q = dst->src[0];
-
- if (Q->ne[1] == 1) {
- constexpr int cols_per_block = 1;
- constexpr int parallel_blocks = 4;
- launch_fattn_vec_f32_64_128(ctx, dst);
- return;
- }
-
- if (Q->ne[1] == 2) {
- constexpr int cols_per_block = 2;
- constexpr int parallel_blocks = 4;
- launch_fattn_vec_f32_64_128(ctx, dst);
- return;
- }
-
- if (Q->ne[1] <= 4) {
- constexpr int cols_per_block = 4;
- constexpr int parallel_blocks = 4;
- launch_fattn_vec_f32_64_128(ctx, dst);
- return;
- }
-
- if (Q->ne[1] <= 8) {
- constexpr int cols_per_block = 8;
- constexpr int parallel_blocks = 4;
- launch_fattn_vec_f32_64_128(ctx, dst);
- return;
- }
-
- constexpr int cols_per_block = 8;
- constexpr int parallel_blocks = 1;
- launch_fattn_vec_f32_64_128(ctx, dst);
-}
diff --git a/ggml-cuda/fattn-vec-f32.cuh b/ggml-cuda/fattn-vec-f32.cuh
index 614d54ae3..ce23a4ebd 100644
--- a/ggml-cuda/fattn-vec-f32.cuh
+++ b/ggml-cuda/fattn-vec-f32.cuh
@@ -1,3 +1,378 @@
#include "common.cuh"
+#include "fattn-common.cuh"
-void ggml_cuda_flash_attn_ext_vec_f32(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
+template // D == head size
+#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
+__launch_bounds__(D, 1)
+#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
+static __global__ void flash_attn_vec_ext_f32(
+ const char * __restrict__ Q,
+ const char * __restrict__ K,
+ const char * __restrict__ V,
+ const char * __restrict__ mask,
+ float * __restrict__ dst,
+ float2 * __restrict__ dst_meta,
+ const float scale,
+ const float max_bias,
+ const float m0,
+ const float m1,
+ const uint32_t n_head_log2,
+ const int ne00,
+ const int ne01,
+ const int ne02,
+ const int ne03,
+ const int ne10,
+ const int ne11,
+ const int ne12,
+ const int ne13,
+ const int ne31,
+ const int nb31,
+ const int nb01,
+ const int nb02,
+ const int nb03,
+ const int nb11,
+ const int nb12,
+ const int nb13,
+ const int nb21,
+ const int nb22,
+ const int nb23,
+ const int ne0,
+ const int ne1,
+ const int ne2,
+ const int ne3) {
+ //In this kernel Q, K, V are matrices while i, j, k are matrix indices.
+
+ constexpr vec_dot_KQ_f32_t vec_dot_KQ = get_vec_dot_KQ_f32(type_K);
+ constexpr bool Q_q8_1 = type_K != GGML_TYPE_F16;
+ constexpr dequantize_1_f32_t dequantize_1_v = get_dequantize_1_f32(type_V);
+
+ const int ic0 = (blockIdx.x / parallel_blocks) * ncols; // Index of the Q/QKV column to work on.
+ const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel.
+
+ const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix.
+ Q += nb02* blockIdx.y + nb01*ic0;
+ K += nb12*(blockIdx.y / gqa_ratio);
+ V += nb22*(blockIdx.y / gqa_ratio); // K and V have same shape
+ const half * maskh = (const half *) mask + ne11*ic0;
+
+ const float slope = get_alibi_slope(max_bias, blockIdx.y, n_head_log2, m0, m1);
+
+ static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64.");
+ constexpr int nwarps = D / WARP_SIZE;
+ const int tid = WARP_SIZE*threadIdx.y + threadIdx.x;
+ __builtin_assume(tid < D);
+
+ __shared__ float KQ[ncols*D];
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ KQ[j*D + tid] = -FLT_MAX/2.0f;
+ }
+
+ float kqmax[ncols];
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ kqmax[j] = -FLT_MAX/2.0f;
+ }
+ float kqsum[ncols] = {0.0f};
+
+ __shared__ float kqmax_shared[ncols][WARP_SIZE];
+ __shared__ float kqsum_shared[ncols][WARP_SIZE];
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ if (threadIdx.y == 0) {
+ kqmax_shared[j][threadIdx.x] = -FLT_MAX/2.0f;
+ kqsum_shared[j][threadIdx.x] = 0.0f;
+ }
+ }
+ __syncthreads();
+
+ // Convert Q to float2 (f16 K) or q8_1 (quantized K) and store in registers:
+ float2 Q_f2[ncols][D/(2*WARP_SIZE)];
+ int Q_i32[ncols][D/(sizeof(int)*QK8_1) == 0 ? 1 : D >= D/(sizeof(int)*QK8_1)];
+ float2 Q_ds[ncols][D/QK8_1 == 0 ? 1 : D/QK8_1];
+ if (Q_q8_1) {
+#pragma unroll
+ for (int j0 = 0; j0 < ncols; j0 += nwarps) {
+ const int j = j0 + threadIdx.y;
+
+ if (j0 + nwarps > ncols && j >= ncols) {
+ break;
+ }
+
+ // Reuse KQ as temporary storage for converting Q to q8_1:
+ int * tmp_q_i32 = (int *) &KQ[j*D];
+ float2 * tmp_q_ds = (float2 *) (tmp_q_i32 + D/sizeof(int));
+
+ // Set memory to zero if out of bounds:
+ if (ncols > 2 && ic0 + j >= ne01) {
+#pragma unroll
+ for (int i0 = 0; i0 < D/sizeof(int); i0 += WARP_SIZE) {
+ const int i = i0 + threadIdx.x;
+
+ tmp_q_i32[i] = 0;
+ }
+ if (threadIdx.x < D/QK8_1) {
+ tmp_q_ds[threadIdx.x] = make_float2(0.0f, 0.0f);
+ }
+ continue;
+ }
+
+ const float * Q_f = (const float *) (Q + j*nb01);
+#pragma unroll
+ for (int i0 = 0; i0 < D/sizeof(int); i0 += WARP_SIZE) {
+ quantize_q8_1_to_shared(Q_f + 4*i0, scale, tmp_q_i32, tmp_q_ds);
+ }
+ }
+
+ __syncthreads();
+
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ int * tmp_q_i32 = (int *) &KQ[j*D];
+ float2 * tmp_q_ds = (float2 *) (tmp_q_i32 + D/sizeof(int));
+
+#pragma unroll
+ for (int i0 = 0; i0 < D/sizeof(int); i0 += WARP_SIZE) {
+ const int i = i0 + threadIdx.x;
+
+ Q_i32[j][i0/WARP_SIZE] = tmp_q_i32[i];
+ Q_ds[j][i0/WARP_SIZE] = tmp_q_ds[i/QI8_1];
+ }
+ }
+
+ __syncthreads();
+ } else {
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ const float2 * Q_f2_j = (const float2 *) (Q + j*nb01);
+#pragma unroll
+ for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) {
+ const int i = i0 + threadIdx.x;
+
+ Q_f2[j][i0/WARP_SIZE] = ncols <= 2 || ic0 + j ? Q_f2_j[i] : make_float2(0.0f, 0.0f);
+ Q_f2[j][i0/WARP_SIZE].x *= scale;
+ Q_f2[j][i0/WARP_SIZE].y *= scale;
+ }
+ }
+ }
+
+ float VKQ[ncols] = {0.0f};
+
+ const int k_start = parallel_blocks == 1 ? 0 : ip*D;
+ for (int k_VKQ_0 = k_start; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*D) {
+ // Calculate KQ tile and keep track of new maximum KQ values:
+
+ float kqmax_new_arr[ncols];
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ kqmax_new_arr[j] = kqmax[j];
+ }
+
+#pragma unroll
+ for (int i_KQ_0 = 0; i_KQ_0 < D; i_KQ_0 += nwarps) {
+ const int i_KQ = i_KQ_0 + threadIdx.y;
+
+ if ((i_KQ_0 + nwarps > D && i_KQ >= D) || (FATTN_KQ_STRIDE % D != 0 && k_VKQ_0 + i_KQ >= ne11)) {
+ break;
+ }
+
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ float sum = vec_dot_KQ(K + (k_VKQ_0 + i_KQ)*nb11, Q_f2[j], Q_i32[j], Q_ds[j]);
+ sum = warp_reduce_sum(sum);
+ sum += mask ? slope*__half2float(maskh[j*ne11 + k_VKQ_0 + i_KQ]) : 0.0f;
+
+ kqmax_new_arr[j] = fmaxf(kqmax_new_arr[j], sum);
+
+ if (threadIdx.x == 0) {
+ KQ[j*D + i_KQ] = sum;
+ }
+ }
+ }
+
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ float kqmax_new_j = kqmax_new_arr[j];
+
+ kqmax_new_j = warp_reduce_max(kqmax_new_j);
+ if (threadIdx.x == 0) {
+ kqmax_shared[j][threadIdx.y] = kqmax_new_j;
+ }
+ }
+
+ __syncthreads();
+
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ float kqmax_new_j = kqmax_shared[j][threadIdx.x];
+ kqmax_new_j = warp_reduce_max(kqmax_new_j);
+
+ const float KQ_max_scale = expf(kqmax[j] - kqmax_new_j);
+ kqmax[j] = kqmax_new_j;
+
+ const float val = expf(KQ[j*D + tid] - kqmax[j]);
+ kqsum[j] = kqsum[j]*KQ_max_scale + val;
+ KQ[j*D + tid] = val;
+
+ VKQ[j] *= KQ_max_scale;
+ }
+
+ __syncthreads();
+
+#pragma unroll
+ for (int k = 0; k < D; ++k) {
+ if (FATTN_KQ_STRIDE % D != 0 && k_VKQ_0 + k >= ne11) {
+ break;
+ }
+
+ const float V_ki = dequantize_1_v(V + (k_VKQ_0 + k)*nb21, tid);
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ VKQ[j] += V_ki*KQ[j*D + k];
+ }
+ }
+
+ __syncthreads();
+ }
+
+#pragma unroll
+ for (int j = 0; j < ncols; ++j) {
+ kqsum[j] = warp_reduce_sum(kqsum[j]);
+ if (threadIdx.x == 0) {
+ kqsum_shared[j][threadIdx.y] = kqsum[j];
+ }
+ }
+
+ __syncthreads();
+
+#pragma unroll
+ for (int j_VKQ = 0; j_VKQ < ncols; ++j_VKQ) {
+ if (ncols > 2 && ic0 + j_VKQ >= ne01) {
+ break;
+ }
+
+ kqsum[j_VKQ] = kqsum_shared[j_VKQ][threadIdx.x];
+ kqsum[j_VKQ] = warp_reduce_sum(kqsum[j_VKQ]);
+
+ float dst_val = VKQ[j_VKQ];
+ if (parallel_blocks == 1) {
+ dst_val /= kqsum[j_VKQ];
+ }
+ const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip;
+ dst[j_dst*D*gridDim.y + D*blockIdx.y + tid] = dst_val;
+ }
+
+ if (parallel_blocks != 1 && tid < ncols && (ncols <= 2 || ic0 + tid < ne01)) {
+ dst_meta[(ic0 + tid)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[tid], kqsum[tid]);
+ }
+}
+
+template
+void ggml_cuda_flash_attn_ext_vec_f32_case_impl(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
+ constexpr int nwarps = D/WARP_SIZE;
+ fattn_kernel_t fattn_kernel = flash_attn_vec_ext_f32;
+ constexpr bool need_f16_K = D != 128;
+ constexpr bool need_f16_V = D != 128 && D != 64;
+ launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block, need_f16_K, need_f16_V);
+}
+
+template
+void ggml_cuda_flash_attn_ext_vec_f32_case(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
+ ggml_tensor * KQV = dst;
+ ggml_tensor * Q = dst->src[0];
+ ggml_tensor * K = dst->src[1];
+ ggml_tensor * V = dst->src[2];
+
+ const int32_t precision = KQV->op_params[2];
+ GGML_ASSERT(precision == GGML_PREC_DEFAULT);
+
+ GGML_ASSERT(K->type == type_K);
+ GGML_ASSERT(V->type == type_V);
+
+ if (Q->ne[1] == 1) {
+ constexpr int cols_per_block = 1;
+ constexpr int parallel_blocks = 4;
+ ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst);
+ return;
+ }
+
+ if (Q->ne[1] == 2) {
+ constexpr int cols_per_block = 2;
+ constexpr int parallel_blocks = 4;
+ ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst);
+ return;
+ }
+
+ if (Q->ne[1] <= 4) {
+ constexpr int cols_per_block = 4;
+ constexpr int parallel_blocks = 4;
+ ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst);
+ return;
+ }
+
+ if (Q->ne[1] <= 8) {
+ constexpr int cols_per_block = 8;
+ constexpr int parallel_blocks = 4;
+ ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst);
+ return;
+ }
+
+ constexpr int cols_per_block = 8;
+ constexpr int parallel_blocks = 1;
+ ggml_cuda_flash_attn_ext_vec_f32_case_impl(ctx, dst);
+}
+
+#define DECL_FATTN_VEC_F32_CASE(D, type_K, type_V) \
+ template void ggml_cuda_flash_attn_ext_vec_f32_case \
+ (ggml_backend_cuda_context & ctx, ggml_tensor * dst) \
+
+extern DECL_FATTN_VEC_F32_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q4_0);
+extern DECL_FATTN_VEC_F32_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q4_1);
+extern DECL_FATTN_VEC_F32_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q5_0);
+extern DECL_FATTN_VEC_F32_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q5_1);
+extern DECL_FATTN_VEC_F32_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q8_0);
+extern DECL_FATTN_VEC_F32_CASE( 64, GGML_TYPE_F16, GGML_TYPE_F16);
+
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_0);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_0);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_0);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_0);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_0);
+
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_1);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_1);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_1);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_1);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_1);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_1);
+
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_0);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_0);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_0);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_0);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_0);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_0);
+
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_1);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_1);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_1);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_1);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_1);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_1);
+
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q8_0);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q8_0);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q8_0);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q8_0);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q8_0);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q8_0);
+
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_F16);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_F16);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_F16);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_F16);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_F16);
+extern DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_F16);
+
+extern DECL_FATTN_VEC_F32_CASE(256, GGML_TYPE_F16, GGML_TYPE_F16);
diff --git a/ggml-cuda/fattn-wmma-f16.cuh b/ggml-cuda/fattn-wmma-f16.cuh
new file mode 100644
index 000000000..59cd30d78
--- /dev/null
+++ b/ggml-cuda/fattn-wmma-f16.cuh
@@ -0,0 +1,490 @@
+#include "common.cuh"
+#include "fattn-common.cuh"
+
+#if FP16_MMA_AVAILABLE
+#include
+#endif
+
+// D == head size, VKQ_stride == num VKQ rows calculated in parallel:
+template
+#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
+__launch_bounds__(nwarps*WARP_SIZE, 1)
+#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
+static __global__ void flash_attn_ext_f16(
+ const char * __restrict__ Q,
+ const char * __restrict__ K,
+ const char * __restrict__ V,
+ const char * __restrict__ mask,
+ float * __restrict__ dst,
+ float2 * __restrict__ dst_meta,
+ const float scale,
+ const float max_bias,
+ const float m0,
+ const float m1,
+ const uint32_t n_head_log2,
+ const int ne00,
+ const int ne01,
+ const int ne02,
+ const int ne03,
+ const int ne10,
+ const int ne11,
+ const int ne12,
+ const int ne13,
+ const int ne31,
+ const int nb31,
+ const int nb01,
+ const int nb02,
+ const int nb03,
+ const int nb11,
+ const int nb12,
+ const int nb13,
+ const int nb21,
+ const int nb22,
+ const int nb23,
+ const int ne0,
+ const int ne1,
+ const int ne2,
+ const int ne3) {
+#if FP16_MMA_AVAILABLE
+ //In this kernel Q, K, V are matrices while i, j, k are matrix indices.
+
+ const int ic0 = ncols*(blockIdx.x / parallel_blocks); // Index of the first Q/QKV column to work on.
+ const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel.
+
+ static_assert(D <= FATTN_KQ_STRIDE, "D must be <= FATTN_KQ_STRIDE.");
+ static_assert(ncols == 8 || ncols % 16 == 0, "ncols must be 8 or a multiple of 16.");
+ constexpr int frag_m = ncols == 8 ? 32 : 16;
+ constexpr int frag_n = ncols == 8 ? 8 : 16;
+ static_assert(D % frag_m == 0, "If ncols == 8 then D % frag_m must be 0.");
+ typedef nvcuda::wmma::fragment frag_a_K;
+ typedef nvcuda::wmma::fragment frag_a_V;
+ typedef nvcuda::wmma::fragment frag_b;
+ typedef nvcuda::wmma::fragment frag_c_KQ;
+ typedef nvcuda::wmma::fragment frag_c_VKQ;
+
+ constexpr int KQ_stride_tc = nwarps*frag_m; // Number of KQ rows calculated in parallel.
+ constexpr int VKQ_ratio = KQ_stride_tc/VKQ_stride; // Number of parallel VKQ accumulators needed to keep all warps busy.
+ static_assert(VKQ_ratio <= nwarps, "VKQ_ratio must be <= nwarps.");
+
+ // Pad internal representation of KQ, KQV to reduce shared memory bank conflicts:
+ constexpr int D_padded = D + 8;
+ constexpr int kqs_padded = FATTN_KQ_STRIDE + 8;
+ constexpr int kqar = sizeof(KQ_acc_t)/sizeof(half);
+
+ const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix.
+ const float * Q_f = (const float *) (Q + nb02* blockIdx.y + nb01*ic0);
+ const half * K_h = (const half *) (K + nb12*(blockIdx.y / gqa_ratio));
+ const half * V_h = (const half *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape
+ const half * maskh = (const half *) mask + (nb31/sizeof(half))* ic0;
+ const half2 * mask2 = (const half2 *) mask + (nb31/sizeof(half))*(ic0/2);
+
+ const int stride_Q = nb01 / sizeof(float);
+ const int stride_KV = nb11 / sizeof(half);
+
+ const float slopef = get_alibi_slope(max_bias, blockIdx.y, n_head_log2, m0, m1);
+ const half slopeh = __float2half(slopef);
+ const half2 slope2 = make_half2(slopef, slopef);
+
+ frag_b Q_b[D/16][ncols/frag_n];
+
+ // A single buffer for temporarily holding tiles of KQ and VKQ parts:
+ constexpr int mem_KQ = ncols*kqs_padded*kqar;
+ constexpr int mem_VKQ_parts = VKQ_ratio*ncols*D_padded;
+ __shared__ half KQ[mem_KQ >= mem_VKQ_parts ? mem_KQ : mem_VKQ_parts];
+ float * KQ_f = (float *) KQ;
+ half2 * KQ2 = (half2 *) KQ;
+
+ float KQ_rowsum_f[ncols/nwarps] = {0.0f};
+ float KQ_max_f[ncols/nwarps];
+ float KQ_max_scale_f[ncols/nwarps] = {0.0f};
+
+#pragma unroll
+ for (int j = 0; j < ncols/nwarps; ++j) {
+ KQ_max_f[j] = -FLT_MAX/2.0f;
+ }
+
+ half2 KQ_rowsum_h2[ncols/nwarps] = {{0.0f, 0.0f}};
+ half2 KQ_max_h2[ncols/nwarps];
+ half2 KQ_max_scale_h2[ncols/nwarps] = {{0.0f, 0.0f}};
+
+#pragma unroll
+ for (int j = 0; j < ncols/nwarps; ++j) {
+ KQ_max_h2[j] = make_half2(-HALF_MAX_HALF, -HALF_MAX_HALF);
+ }
+
+ __shared__ half VKQ[ncols*D_padded]; // Accumulator for final VKQ slice.
+ half2 * VKQ2 = (half2 *) VKQ;
+#pragma unroll
+ for (int j0 = 0; j0 < ncols; j0 += nwarps) {
+ const int j = j0 + threadIdx.y;
+#pragma unroll
+ for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) {
+ const int i = i0 + threadIdx.x;
+ if (i0 + WARP_SIZE > D/2 && i >= D/2) {
+ break;
+ }
+ VKQ2[j*(D_padded/2) + i] = make_half2(0.0f, 0.0f);
+ }
+ }
+
+ // Convert Q to half and apply scale, temporarily store in KQ:
+#pragma unroll
+ for (int j0 = 0; j0 < ncols; j0 += nwarps) {
+ const int j = j0 + threadIdx.y;
+#pragma unroll
+ for (int i0 = 0; i0 < D; i0 += WARP_SIZE) {
+ const int i = i0 + threadIdx.x;
+ if (i0 + WARP_SIZE > D && i >= D) {
+ break;
+ }
+ KQ[j*D_padded + i] = ic0 + j < ne01 ? Q_f[j*stride_Q + i] * scale : 0.0f;
+ }
+ }
+
+ __syncthreads();
+
+ // Load Q into tensor core fragments/registers since it will be used frequently:
+#pragma unroll
+ for (int i0 = 0; i0 < D; i0 += 16) {
+#pragma unroll
+ for (int j0 = 0; j0 < ncols; j0 += frag_n) {
+ nvcuda::wmma::load_matrix_sync(Q_b[i0/16][j0/frag_n], KQ + j0*D_padded + i0, D_padded);
+ }
+ }
+
+ __syncthreads();
+
+ // Iterate over ne11 == previous tokens:
+ for (int k_VKQ_0 = ip*FATTN_KQ_STRIDE; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*FATTN_KQ_STRIDE) {
+ // Calculate tile of KQ:
+#pragma unroll
+ for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE; i_KQ_0 += KQ_stride_tc) {
+ frag_c_KQ KQ_c[ncols/frag_n];
+#pragma unroll
+ for (int j = 0; j < ncols/frag_n; ++j) {
+ nvcuda::wmma::fill_fragment(KQ_c[j], 0.0f);
+ }
+#pragma unroll
+ for (int k_KQ_0 = 0; k_KQ_0 < D; k_KQ_0 += 16) {
+ frag_a_K K_a;
+ nvcuda::wmma::load_matrix_sync(K_a, K_h + (k_VKQ_0 + i_KQ_0 + frag_m*threadIdx.y)*stride_KV + k_KQ_0, stride_KV);
+#pragma unroll
+ for (int j = 0; j < ncols/frag_n; ++j) {
+ nvcuda::wmma::mma_sync(KQ_c[j], K_a, Q_b[k_KQ_0/16][j], KQ_c[j]);
+ }
+ }
+#pragma unroll
+ for (int j0 = 0; j0 < ncols; j0 += frag_n) {
+ nvcuda::wmma::store_matrix_sync((KQ_acc_t *) KQ + j0*kqs_padded + i_KQ_0 + frag_m*threadIdx.y, KQ_c[j0/frag_n], kqs_padded, nvcuda::wmma::mem_col_major);
+ }
+ }
+
+ __syncthreads();
+
+ // Calculate softmax for each KQ column using the current max. value.
+ // The divisor is stored in KQ_rowsum and will be applied at the end.
+#pragma unroll
+ for (int j0 = 0; j0 < ncols; j0 += nwarps) {
+ const int j = j0 + threadIdx.y;
+
+ if (std::is_same::value) {
+ float KQ_f_tmp[FATTN_KQ_STRIDE / WARP_SIZE];
+#pragma unroll
+ for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) {
+ const int k = k0 + threadIdx.x;
+
+ KQ_f_tmp[k0/WARP_SIZE] = KQ_f[j*kqs_padded + k];
+ }
+
+ float KQ_max_new = KQ_max_f[j0/nwarps];
+#pragma unroll
+ for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) {
+ const int k = k0 + threadIdx.x;
+
+ KQ_f_tmp[k0/WARP_SIZE] += mask ? __half2float(slopeh*maskh[j*(nb31/sizeof(half)) + k_VKQ_0 + k]) : 0.0f;
+ KQ_max_new = max(KQ_max_new, KQ_f_tmp[k0/WARP_SIZE]);
+ }
+ KQ_max_new = warp_reduce_max(KQ_max_new);
+
+ const float diff = KQ_max_f[j0/nwarps] - KQ_max_new;
+ KQ_max_scale_f[j0/nwarps] = expf(diff);
+ if (diff <= SOFTMAX_FTZ_THRESHOLD) {
+ KQ_max_scale_f[j0/nwarps] = 0.0f;
+ }
+ KQ_max_f[j0/nwarps] = KQ_max_new;
+
+ float KQ_rowsum_add = 0.0f;
+#pragma unroll
+ for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) {
+ const int k = k0 + threadIdx.x;
+
+ const float diff = KQ_f_tmp[k0/WARP_SIZE] - KQ_max_f[j0/nwarps];
+ KQ_f_tmp[k0/WARP_SIZE] = expf(diff);
+ if (diff <= SOFTMAX_FTZ_THRESHOLD) {
+ KQ_f_tmp[k0/WARP_SIZE] = 0.0f;
+ }
+ KQ_rowsum_add += KQ_f_tmp[k0/WARP_SIZE];
+ KQ[j*(kqar*kqs_padded) + k] = KQ_f_tmp[k0/WARP_SIZE];
+ }
+ KQ_rowsum_add = warp_reduce_sum(KQ_rowsum_add);
+
+ // Scale previous KQ_rowsum to account for a potential increase in KQ_max:
+ KQ_rowsum_f[j0/nwarps] = KQ_max_scale_f[j0/nwarps]*KQ_rowsum_f[j0/nwarps] + KQ_rowsum_add;
+ } else {
+ half2 KQ2_tmp[FATTN_KQ_STRIDE/(2*WARP_SIZE)];
+#pragma unroll
+ for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) {
+ const int k = k0 + threadIdx.x;
+
+ KQ2_tmp[k0/WARP_SIZE] = KQ2[j*(kqs_padded/2) + k];
+ }
+
+ half2 KQ_max_new = KQ_max_h2[j0/nwarps];
+#pragma unroll
+ for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) {
+ const int k = k0 + threadIdx.x;
+
+ KQ2_tmp[k0/WARP_SIZE] += mask ? slope2*mask2[(j*ne11 + k_VKQ_0)/2 + k] : make_half2(0.0f, 0.0f);
+ KQ_max_new = ggml_cuda_hmax2(KQ_max_new, KQ2_tmp[k0/WARP_SIZE]);
+ }
+ KQ_max_new = __half2half2(warp_reduce_max(ggml_cuda_hmax(__low2half(KQ_max_new), __high2half(KQ_max_new))));
+ const half2 diff = KQ_max_h2[j0/nwarps] - KQ_max_new;
+ KQ_max_scale_h2[j0/nwarps] = h2exp(diff);
+ const uint32_t ftz_mask = __hgt2_mask(diff, make_half2(SOFTMAX_FTZ_THRESHOLD, SOFTMAX_FTZ_THRESHOLD));
+ *((uint32_t *) &KQ_max_scale_h2[j0/nwarps]) &= ftz_mask;
+ KQ_max_h2[j0/nwarps] = KQ_max_new;
+
+ half2 KQ_rowsum_add = make_half2(0.0f, 0.0f);
+#pragma unroll
+ for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) {
+ const int k = k0 + threadIdx.x;
+
+ const half2 diff = KQ2_tmp[k0/WARP_SIZE] - KQ_max_h2[j0/nwarps];
+ KQ2_tmp[k0/WARP_SIZE] = h2exp(diff);
+ const uint32_t ftz_mask = __hgt2_mask(diff, make_half2(SOFTMAX_FTZ_THRESHOLD, SOFTMAX_FTZ_THRESHOLD));
+ *((uint32_t *) &KQ2_tmp[k0/WARP_SIZE]) &= ftz_mask;
+ KQ_rowsum_add += KQ2_tmp[k0/WARP_SIZE];
+ KQ2[j*(kqs_padded/2) + k] = KQ2_tmp[k0/WARP_SIZE];
+ }
+ KQ_rowsum_add = warp_reduce_sum(KQ_rowsum_add);
+
+ // Scale previous KQ_rowsum to account for a potential increase in KQ_max:
+ KQ_rowsum_h2[j0/nwarps] = KQ_max_scale_h2[j0/nwarps]*KQ_rowsum_h2[j0/nwarps] + KQ_rowsum_add;
+ }
+ }
+
+ __syncthreads();
+
+ frag_b KQ_b[FATTN_KQ_STRIDE/(VKQ_ratio*16)][ncols/frag_n];
+#pragma unroll
+ for (int j0 = 0; j0 < ncols; j0 += frag_n) {
+#pragma unroll
+ for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += VKQ_ratio*16) {
+ const int k = k0 + (threadIdx.y % VKQ_ratio)*16;
+ nvcuda::wmma::load_matrix_sync(
+ KQ_b[k0/(VKQ_ratio*16)][j0/frag_n],
+ KQ + j0*(kqar*kqs_padded) + k,
+ kqar*kqs_padded);
+ }
+ }
+
+ frag_c_VKQ VKQ_c[D/VKQ_stride][ncols/frag_n];
+#pragma unroll
+ for (int i_VKQ_0 = 0; i_VKQ_0 < D; i_VKQ_0 += VKQ_stride) {
+#pragma unroll
+ for (int j = 0; j < ncols/frag_n; ++j) {
+ nvcuda::wmma::fill_fragment(VKQ_c[i_VKQ_0/VKQ_stride][j], 0.0f);
+ }
+
+#pragma unroll
+ for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += VKQ_ratio*16) {
+ const int k = k0 + (threadIdx.y % VKQ_ratio)*16;
+
+ frag_a_V v_a;
+ nvcuda::wmma::load_matrix_sync(v_a, V_h + (k_VKQ_0 + k)*stride_KV + i_VKQ_0 + frag_m*(threadIdx.y/VKQ_ratio), stride_KV);
+#pragma unroll
+ for (int j = 0; j < ncols/frag_n; ++j) {
+ nvcuda::wmma::mma_sync(VKQ_c[i_VKQ_0/VKQ_stride][j], v_a, KQ_b[k0/(VKQ_ratio*16)][j], VKQ_c[i_VKQ_0/VKQ_stride][j]);
+ }
+ }
+ }
+
+ __syncthreads();
+
+ const int offset_k = (threadIdx.y % VKQ_ratio) * (ncols*D_padded);
+#pragma unroll
+ for (int i_KQ_0 = 0; i_KQ_0 < D; i_KQ_0 += VKQ_stride) {
+#pragma unroll
+ for (int j0 = 0; j0 < ncols; j0 += frag_n) {
+ nvcuda::wmma::store_matrix_sync(
+ KQ + offset_k + j0*D_padded + i_KQ_0 + frag_m*(threadIdx.y/VKQ_ratio),
+ VKQ_c[i_KQ_0/VKQ_stride][j0/frag_n],
+ D_padded, nvcuda::wmma::mem_col_major);
+ }
+ }
+
+ __syncthreads();
+
+#pragma unroll
+ for (int j0 = 0; j0 < ncols; j0 += nwarps) {
+ const int j = j0 + threadIdx.y;
+
+ half2 VKQ_scale;
+ if (std::is_same::value) {
+ VKQ_scale = make_half2(KQ_max_scale_f[j0/nwarps], KQ_max_scale_f[j0/nwarps]);
+ } else {
+ VKQ_scale = KQ_max_scale_h2[j0/nwarps];
+ }
+
+#pragma unroll
+ for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) {
+ const int i = i0 + threadIdx.x;
+ if (i0 + WARP_SIZE > D/2 && i >= D/2) {
+ break;
+ }
+
+ half2 VKQ_add = make_half2(0.0f, 0.0f);
+#pragma unroll
+ for (int l = 0; l < VKQ_ratio; ++l) {
+ VKQ_add += KQ2[l*(ncols*D_padded/2) + j*(D_padded/2) + i];
+ }
+ VKQ2[j*(D_padded/2) + i] = VKQ_scale*VKQ2[j*(D_padded/2) + i] + VKQ_add;
+ }
+ }
+
+ __syncthreads();
+ }
+
+#pragma unroll
+ for (int j0 = 0; j0 < ncols; j0 += nwarps) {
+ const int j_VKQ = j0 + threadIdx.y;
+ if (ic0 + j_VKQ >= ne01) {
+ return;
+ }
+ const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip;
+
+ float KQ_rowsum_j;
+ if (std::is_same::value) {
+ KQ_rowsum_j = KQ_rowsum_f[j0/nwarps];
+ } else {
+ KQ_rowsum_j = __low2float(KQ_rowsum_h2[j0/nwarps]) + __high2float(KQ_rowsum_h2[j0/nwarps]);
+ }
+
+#pragma unroll
+ for (int i0 = 0; i0 < D; i0 += WARP_SIZE) {
+ const int i = i0 + threadIdx.x;
+ if (i0 + WARP_SIZE > D && i >= D) {
+ break;
+ }
+ float dst_val = VKQ[j_VKQ*D_padded + i];
+ if (parallel_blocks == 1) {
+ dst_val /= KQ_rowsum_j;
+ }
+ dst[j_dst*gridDim.y*D + blockIdx.y*D + i] = dst_val;
+ }
+
+ if (parallel_blocks == 1 || threadIdx.x != 0) {
+ continue;
+ }
+
+ float2 dst_meta_val;
+ if (std::is_same::value) {
+ dst_meta_val.x = KQ_max_f[j0/nwarps];
+ } else {
+ dst_meta_val.x = __low2float(KQ_max_h2[j0/nwarps]);
+ }
+ dst_meta_val.y = KQ_rowsum_j;
+ dst_meta[(ic0 + j_VKQ)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = dst_meta_val;
+ }
+#else
+ NO_DEVICE_CODE;
+#endif // FP16_MMA_AVAILABLE
+}
+
+constexpr int get_max_power_of_2(int x) {
+ return x % 2 == 0 ? 2*get_max_power_of_2(x/2) : 1;
+}
+
+static_assert(get_max_power_of_2(1) == 1, "Test failed.");
+static_assert(get_max_power_of_2(2) == 2, "Test failed.");
+static_assert(get_max_power_of_2(4) == 4, "Test failed.");
+static_assert(get_max_power_of_2(6) == 2, "Test failed.");
+
+// Number of VKQ rows calculated in parallel:
+constexpr int get_VKQ_stride(int D, int nwarps, int frag_m) {
+ return (get_max_power_of_2(D/frag_m) < nwarps ? get_max_power_of_2(D/frag_m) : nwarps)*frag_m;
+}
+
+static_assert(get_VKQ_stride(128, 1, 32) == 32, "Test failed.");
+static_assert(get_VKQ_stride(128, 2, 32) == 64, "Test failed.");
+static_assert(get_VKQ_stride(128, 4, 32) == 128, "Test failed.");
+static_assert(get_VKQ_stride( 64, 1, 32) == 32, "Test failed.");
+static_assert(get_VKQ_stride( 64, 2, 32) == 64, "Test failed.");
+static_assert(get_VKQ_stride( 64, 4, 32) == 64, "Test failed.");
+static_assert(get_VKQ_stride( 80, 1, 16) == 16, "Test failed.");
+static_assert(get_VKQ_stride( 80, 2, 16) == 16, "Test failed.");
+static_assert(get_VKQ_stride( 80, 4, 16) == 16, "Test failed.");
+
+template
+void ggml_cuda_flash_attn_ext_wmma_f16_case(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
+ const ggml_tensor * Q = dst->src[0];
+
+ constexpr int nwarps = 4;
+
+ constexpr int frag_m = cols_per_block == 8 && D % 32 == 0 ? 32 : 16;
+ const int blocks_num_pb1 = ((Q->ne[1] + cols_per_block - 1) / cols_per_block)*Q->ne[2]*Q->ne[3];
+ const int nsm = ggml_cuda_info().devices[ggml_cuda_get_device()].nsm;
+
+ if (4*blocks_num_pb1 < 2*nsm) {
+ constexpr int parallel_blocks = 4;
+ fattn_kernel_t fattn_kernel = flash_attn_ext_f16;
+ launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block, true, true);
+ return;
+ }
+ if (2*blocks_num_pb1 < 2*nsm) {
+ constexpr int parallel_blocks = 2;
+ fattn_kernel_t fattn_kernel = flash_attn_ext_f16;
+ launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block, true, true);
+ return;
+ }
+ constexpr int parallel_blocks = 1;
+ fattn_kernel_t fattn_kernel = flash_attn_ext_f16;
+ launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block, true, true);
+}
+
+#define DECL_FATTN_WMMA_F16_CASE(D, cols_per_block, KQ_acc_t) \
+ template void ggml_cuda_flash_attn_ext_wmma_f16_case \
+ (ggml_backend_cuda_context & ctx, ggml_tensor * dst) \
+
+extern DECL_FATTN_WMMA_F16_CASE( 64, 16, float);
+extern DECL_FATTN_WMMA_F16_CASE( 80, 16, float);
+extern DECL_FATTN_WMMA_F16_CASE( 96, 16, float);
+extern DECL_FATTN_WMMA_F16_CASE(112, 16, float);
+extern DECL_FATTN_WMMA_F16_CASE(128, 16, float);
+extern DECL_FATTN_WMMA_F16_CASE(256, 16, float);
+
+extern DECL_FATTN_WMMA_F16_CASE( 64, 32, float);
+extern DECL_FATTN_WMMA_F16_CASE( 80, 32, float);
+extern DECL_FATTN_WMMA_F16_CASE( 96, 32, float);
+extern DECL_FATTN_WMMA_F16_CASE(112, 32, float);
+extern DECL_FATTN_WMMA_F16_CASE(128, 32, float);
+// extern DECL_FATTN_WMMA_F16_CASE(256, 16, float);
+
+extern DECL_FATTN_WMMA_F16_CASE( 64, 8, half);
+extern DECL_FATTN_WMMA_F16_CASE( 96, 8, half);
+extern DECL_FATTN_WMMA_F16_CASE(128, 8, half);
+extern DECL_FATTN_WMMA_F16_CASE(256, 8, half);
+
+extern DECL_FATTN_WMMA_F16_CASE( 64, 16, half);
+extern DECL_FATTN_WMMA_F16_CASE( 80, 16, half);
+extern DECL_FATTN_WMMA_F16_CASE( 96, 16, half);
+extern DECL_FATTN_WMMA_F16_CASE(112, 16, half);
+extern DECL_FATTN_WMMA_F16_CASE(128, 16, half);
+extern DECL_FATTN_WMMA_F16_CASE(256, 16, half);
+
+extern DECL_FATTN_WMMA_F16_CASE( 64, 32, half);
+extern DECL_FATTN_WMMA_F16_CASE( 80, 32, half);
+extern DECL_FATTN_WMMA_F16_CASE( 96, 32, half);
+extern DECL_FATTN_WMMA_F16_CASE(112, 32, half);
+extern DECL_FATTN_WMMA_F16_CASE(128, 32, half);
+extern DECL_FATTN_WMMA_F16_CASE(256, 16, half);
diff --git a/ggml-cuda/fattn.cu b/ggml-cuda/fattn.cu
index af7c95232..38d30b210 100644
--- a/ggml-cuda/fattn.cu
+++ b/ggml-cuda/fattn.cu
@@ -4,454 +4,295 @@
#include "fattn-tile-f32.cuh"
#include "fattn-vec-f16.cuh"
#include "fattn-vec-f32.cuh"
+#include "fattn-wmma-f16.cuh"
#include "fattn.cuh"
#include
-#if FP16_MMA_AVAILABLE
-#include
-#endif
+static void ggml_cuda_flash_attn_ext_wmma_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
+ const ggml_tensor * KQV = dst;
+ const ggml_tensor * Q = dst->src[0];
-// D == head size, VKQ_stride == num VKQ rows calculated in parallel:
-template
-#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
-__launch_bounds__(nwarps*WARP_SIZE, 1)
-#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
-static __global__ void flash_attn_ext_f16(
- const char * __restrict__ Q,
- const char * __restrict__ K,
- const char * __restrict__ V,
- const char * __restrict__ mask,
- float * __restrict__ dst,
- float2 * __restrict__ dst_meta,
- const float scale,
- const float max_bias,
- const float m0,
- const float m1,
- const uint32_t n_head_log2,
- const int ne00,
- const int ne01,
- const int ne02,
- const int ne03,
- const int ne10,
- const int ne11,
- const int ne12,
- const int ne13,
- const int ne31,
- const int nb31,
- const int nb01,
- const int nb02,
- const int nb03,
- const int nb11,
- const int nb12,
- const int nb13,
- const int ne0,
- const int ne1,
- const int ne2,
- const int ne3) {
-#if FP16_MMA_AVAILABLE
- //In this kernel Q, K, V are matrices while i, j, k are matrix indices.
+ const int32_t precision = KQV->op_params[2];
- const int ic0 = ncols*(blockIdx.x / parallel_blocks); // Index of the first Q/QKV column to work on.
- const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel.
-
- static_assert(D <= FATTN_KQ_STRIDE, "D must be <= FATTN_KQ_STRIDE.");
- static_assert(ncols == 8 || ncols % 16 == 0, "ncols must be 8 or a multiple of 16.");
- constexpr int frag_m = ncols == 8 ? 32 : 16;
- constexpr int frag_n = ncols == 8 ? 8 : 16;
- static_assert(D % frag_m == 0, "If ncols == 8 then D % frag_m must be 0.");
- typedef nvcuda::wmma::fragment frag_a_K;
- typedef nvcuda::wmma::fragment frag_a_V;
- typedef nvcuda::wmma::fragment frag_b;
- typedef nvcuda::wmma::fragment frag_c_KQ;
- typedef nvcuda::wmma::fragment frag_c_VKQ;
-
- constexpr int KQ_stride_tc = nwarps*frag_m; // Number of KQ rows calculated in parallel.
- constexpr int VKQ_ratio = KQ_stride_tc/VKQ_stride; // Number of parallel VKQ accumulators needed to keep all warps busy.
- static_assert(VKQ_ratio <= nwarps, "VKQ_ratio must be <= nwarps.");
-
- // Pad internal representation of KQ, KQV to reduce shared memory bank conflicts:
- constexpr int D_padded = D + 8;
- constexpr int kqs_padded = FATTN_KQ_STRIDE + 8;
- constexpr int kqar = sizeof(KQ_acc_t)/sizeof(half);
-
- const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix.
- const float * Q_f = (const float *) (Q + nb02* blockIdx.y + nb01*ic0);
- const half * K_h = (const half *) (K + nb12*(blockIdx.y / gqa_ratio));
- const half * V_h = (const half *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape
- const half * maskh = (const half *) mask + (nb31/sizeof(half))* ic0;
- const half2 * mask2 = (const half2 *) mask + (nb31/sizeof(half))*(ic0/2);
-
- const int stride_Q = nb01 / sizeof(float);
- const int stride_KV = nb11 / sizeof(half);
-
- const float slopef = get_alibi_slope(max_bias, blockIdx.y, n_head_log2, m0, m1);
- const half slopeh = __float2half(slopef);
- const half2 slope2 = make_half2(slopef, slopef);
-
- frag_b Q_b[D/16][ncols/frag_n];
-
- // A single buffer for temporarily holding tiles of KQ and VKQ parts:
- constexpr int mem_KQ = ncols*kqs_padded*kqar;
- constexpr int mem_VKQ_parts = VKQ_ratio*ncols*D_padded;
- __shared__ half KQ[mem_KQ >= mem_VKQ_parts ? mem_KQ : mem_VKQ_parts];
- float * KQ_f = (float *) KQ;
- half2 * KQ2 = (half2 *) KQ;
-
- float KQ_rowsum_f[ncols/nwarps] = {0.0f};
- float KQ_max_f[ncols/nwarps];
- float KQ_max_scale_f[ncols/nwarps] = {0.0f};
-
-#pragma unroll
- for (int j = 0; j < ncols/nwarps; ++j) {
- KQ_max_f[j] = -FLT_MAX/2.0f;
- }
-
- half2 KQ_rowsum_h2[ncols/nwarps] = {{0.0f, 0.0f}};
- half2 KQ_max_h2[ncols/nwarps];
- half2 KQ_max_scale_h2[ncols/nwarps] = {{0.0f, 0.0f}};
-
-#pragma unroll
- for (int j = 0; j < ncols/nwarps; ++j) {
- KQ_max_h2[j] = make_half2(-HALF_MAX_HALF, -HALF_MAX_HALF);
- }
-
- __shared__ half VKQ[ncols*D_padded]; // Accumulator for final VKQ slice.
- half2 * VKQ2 = (half2 *) VKQ;
-#pragma unroll
- for (int j0 = 0; j0 < ncols; j0 += nwarps) {
- const int j = j0 + threadIdx.y;
-#pragma unroll
- for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) {
- const int i = i0 + threadIdx.x;
- if (i0 + WARP_SIZE > D/2 && i >= D/2) {
- break;
- }
- VKQ2[j*(D_padded/2) + i] = make_half2(0.0f, 0.0f);
- }
- }
-
- // Convert Q to half and apply scale, temporarily store in KQ:
-#pragma unroll
- for (int j0 = 0; j0 < ncols; j0 += nwarps) {
- const int j = j0 + threadIdx.y;
-#pragma unroll
- for (int i0 = 0; i0 < D; i0 += WARP_SIZE) {
- const int i = i0 + threadIdx.x;
- if (i0 + WARP_SIZE > D && i >= D) {
- break;
- }
- KQ[j*D_padded + i] = ic0 + j < ne01 ? Q_f[j*stride_Q + i] * scale : 0.0f;
- }
- }
-
- __syncthreads();
-
- // Load Q into tensor core fragments/registers since it will be used frequently:
-#pragma unroll
- for (int i0 = 0; i0 < D; i0 += 16) {
-#pragma unroll
- for (int j0 = 0; j0 < ncols; j0 += frag_n) {
- nvcuda::wmma::load_matrix_sync(Q_b[i0/16][j0/frag_n], KQ + j0*D_padded + i0, D_padded);
- }
- }
-
- __syncthreads();
-
- // Iterate over ne11 == previous tokens:
- for (int k_VKQ_0 = ip*FATTN_KQ_STRIDE; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*FATTN_KQ_STRIDE) {
- // Calculate tile of KQ:
-#pragma unroll
- for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE; i_KQ_0 += KQ_stride_tc) {
- frag_c_KQ KQ_c[ncols/frag_n];
-#pragma unroll
- for (int j = 0; j < ncols/frag_n; ++j) {
- nvcuda::wmma::fill_fragment(KQ_c[j], 0.0f);
- }
-#pragma unroll
- for (int k_KQ_0 = 0; k_KQ_0 < D; k_KQ_0 += 16) {
- frag_a_K K_a;
- nvcuda::wmma::load_matrix_sync(K_a, K_h + (k_VKQ_0 + i_KQ_0 + frag_m*threadIdx.y)*stride_KV + k_KQ_0, stride_KV);
-#pragma unroll
- for (int j = 0; j < ncols/frag_n; ++j) {
- nvcuda::wmma::mma_sync(KQ_c[j], K_a, Q_b[k_KQ_0/16][j], KQ_c[j]);
- }
- }
-#pragma unroll
- for (int j0 = 0; j0 < ncols; j0 += frag_n) {
- nvcuda::wmma::store_matrix_sync((KQ_acc_t *) KQ + j0*kqs_padded + i_KQ_0 + frag_m*threadIdx.y, KQ_c[j0/frag_n], kqs_padded, nvcuda::wmma::mem_col_major);
- }
- }
-
- __syncthreads();
-
- // Calculate softmax for each KQ column using the current max. value.
- // The divisor is stored in KQ_rowsum and will be applied at the end.
-#pragma unroll
- for (int j0 = 0; j0 < ncols; j0 += nwarps) {
- const int j = j0 + threadIdx.y;
-
- if (std::is_same::value) {
- float KQ_f_tmp[FATTN_KQ_STRIDE / WARP_SIZE];
-#pragma unroll
- for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) {
- const int k = k0 + threadIdx.x;
-
- KQ_f_tmp[k0/WARP_SIZE] = KQ_f[j*kqs_padded + k];
- }
-
- float KQ_max_new = KQ_max_f[j0/nwarps];
-#pragma unroll
- for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) {
- const int k = k0 + threadIdx.x;
-
- KQ_f_tmp[k0/WARP_SIZE] += mask ? __half2float(slopeh*maskh[j*(nb31/sizeof(half)) + k_VKQ_0 + k]) : 0.0f;
- KQ_max_new = max(KQ_max_new, KQ_f_tmp[k0/WARP_SIZE]);
- }
- KQ_max_new = warp_reduce_max(KQ_max_new);
-
- const float diff = KQ_max_f[j0/nwarps] - KQ_max_new;
- KQ_max_scale_f[j0/nwarps] = expf(diff);
- if (diff <= SOFTMAX_FTZ_THRESHOLD) {
- KQ_max_scale_f[j0/nwarps] = 0.0f;
- }
- KQ_max_f[j0/nwarps] = KQ_max_new;
-
- float KQ_rowsum_add = 0.0f;
-#pragma unroll
- for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) {
- const int k = k0 + threadIdx.x;
-
- const float diff = KQ_f_tmp[k0/WARP_SIZE] - KQ_max_f[j0/nwarps];
- KQ_f_tmp[k0/WARP_SIZE] = expf(diff);
- if (diff <= SOFTMAX_FTZ_THRESHOLD) {
- KQ_f_tmp[k0/WARP_SIZE] = 0.0f;
- }
- KQ_rowsum_add += KQ_f_tmp[k0/WARP_SIZE];
- KQ[j*(kqar*kqs_padded) + k] = KQ_f_tmp[k0/WARP_SIZE];
- }
- KQ_rowsum_add = warp_reduce_sum(KQ_rowsum_add);
-
- // Scale previous KQ_rowsum to account for a potential increase in KQ_max:
- KQ_rowsum_f[j0/nwarps] = KQ_max_scale_f[j0/nwarps]*KQ_rowsum_f[j0/nwarps] + KQ_rowsum_add;
- } else {
- half2 KQ2_tmp[FATTN_KQ_STRIDE/(2*WARP_SIZE)];
-#pragma unroll
- for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) {
- const int k = k0 + threadIdx.x;
-
- KQ2_tmp[k0/WARP_SIZE] = KQ2[j*(kqs_padded/2) + k];
- }
-
- half2 KQ_max_new = KQ_max_h2[j0/nwarps];
-#pragma unroll
- for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) {
- const int k = k0 + threadIdx.x;
-
- KQ2_tmp[k0/WARP_SIZE] += mask ? slope2*mask2[(j*ne11 + k_VKQ_0)/2 + k] : make_half2(0.0f, 0.0f);
- KQ_max_new = ggml_cuda_hmax2(KQ_max_new, KQ2_tmp[k0/WARP_SIZE]);
- }
- KQ_max_new = __half2half2(warp_reduce_max(ggml_cuda_hmax(__low2half(KQ_max_new), __high2half(KQ_max_new))));
- const half2 diff = KQ_max_h2[j0/nwarps] - KQ_max_new;
- KQ_max_scale_h2[j0/nwarps] = h2exp(diff);
- const uint32_t ftz_mask = __hgt2_mask(diff, make_half2(SOFTMAX_FTZ_THRESHOLD, SOFTMAX_FTZ_THRESHOLD));
- *((uint32_t *) &KQ_max_scale_h2[j0/nwarps]) &= ftz_mask;
- KQ_max_h2[j0/nwarps] = KQ_max_new;
-
- half2 KQ_rowsum_add = make_half2(0.0f, 0.0f);
-#pragma unroll
- for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) {
- const int k = k0 + threadIdx.x;
-
- const half2 diff = KQ2_tmp[k0/WARP_SIZE] - KQ_max_h2[j0/nwarps];
- KQ2_tmp[k0/WARP_SIZE] = h2exp(diff);
- const uint32_t ftz_mask = __hgt2_mask(diff, make_half2(SOFTMAX_FTZ_THRESHOLD, SOFTMAX_FTZ_THRESHOLD));
- *((uint32_t *) &KQ2_tmp[k0/WARP_SIZE]) &= ftz_mask;
- KQ_rowsum_add += KQ2_tmp[k0/WARP_SIZE];
- KQ2[j*(kqs_padded/2) + k] = KQ2_tmp[k0/WARP_SIZE];
- }
- KQ_rowsum_add = warp_reduce_sum(KQ_rowsum_add);
-
- // Scale previous KQ_rowsum to account for a potential increase in KQ_max:
- KQ_rowsum_h2[j0/nwarps] = KQ_max_scale_h2[j0/nwarps]*KQ_rowsum_h2[j0/nwarps] + KQ_rowsum_add;
- }
- }
-
- __syncthreads();
-
- frag_b KQ_b[FATTN_KQ_STRIDE/(VKQ_ratio*16)][ncols/frag_n];
-#pragma unroll
- for (int j0 = 0; j0 < ncols; j0 += frag_n) {
-#pragma unroll
- for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += VKQ_ratio*16) {
- const int k = k0 + (threadIdx.y % VKQ_ratio)*16;
- nvcuda::wmma::load_matrix_sync(
- KQ_b[k0/(VKQ_ratio*16)][j0/frag_n],
- KQ + j0*(kqar*kqs_padded) + k,
- kqar*kqs_padded);
- }
- }
-
- frag_c_VKQ VKQ_c[D/VKQ_stride][ncols/frag_n];
-#pragma unroll
- for (int i_VKQ_0 = 0; i_VKQ_0 < D; i_VKQ_0 += VKQ_stride) {
-#pragma unroll
- for (int j = 0; j < ncols/frag_n; ++j) {
- nvcuda::wmma::fill_fragment(VKQ_c[i_VKQ_0/VKQ_stride][j], 0.0f);
- }
-
-#pragma unroll
- for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += VKQ_ratio*16) {
- const int k = k0 + (threadIdx.y % VKQ_ratio)*16;
-
- frag_a_V v_a;
- nvcuda::wmma::load_matrix_sync(v_a, V_h + (k_VKQ_0 + k)*stride_KV + i_VKQ_0 + frag_m*(threadIdx.y/VKQ_ratio), stride_KV);
-#pragma unroll
- for (int j = 0; j < ncols/frag_n; ++j) {
- nvcuda::wmma::mma_sync(VKQ_c[i_VKQ_0/VKQ_stride][j], v_a, KQ_b[k0/(VKQ_ratio*16)][j], VKQ_c[i_VKQ_0/VKQ_stride][j]);
- }
- }
- }
-
- __syncthreads();
-
- const int offset_k = (threadIdx.y % VKQ_ratio) * (ncols*D_padded);
-#pragma unroll
- for (int i_KQ_0 = 0; i_KQ_0 < D; i_KQ_0 += VKQ_stride) {
-#pragma unroll
- for (int j0 = 0; j0 < ncols; j0 += frag_n) {
- nvcuda::wmma::store_matrix_sync(
- KQ + offset_k + j0*D_padded + i_KQ_0 + frag_m*(threadIdx.y/VKQ_ratio),
- VKQ_c[i_KQ_0/VKQ_stride][j0/frag_n],
- D_padded, nvcuda::wmma::mem_col_major);
- }
- }
-
- __syncthreads();
-
-#pragma unroll
- for (int j0 = 0; j0 < ncols; j0 += nwarps) {
- const int j = j0 + threadIdx.y;
-
- half2 VKQ_scale;
- if (std::is_same::value) {
- VKQ_scale = make_half2(KQ_max_scale_f[j0/nwarps], KQ_max_scale_f[j0/nwarps]);
- } else {
- VKQ_scale = KQ_max_scale_h2[j0/nwarps];
- }
-
-#pragma unroll
- for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) {
- const int i = i0 + threadIdx.x;
- if (i0 + WARP_SIZE > D/2 && i >= D/2) {
+ if (precision != GGML_PREC_DEFAULT) {
+ if (Q->ne[1] <= 32 || Q->ne[0] > 128) {
+ constexpr int cols_per_block = 16;
+ switch (Q->ne[0]) {
+ case 64:
+ ggml_cuda_flash_attn_ext_wmma_f16_case< 64, cols_per_block, float>(ctx, dst);
+ break;
+ case 80:
+ ggml_cuda_flash_attn_ext_wmma_f16_case< 80, cols_per_block, float>(ctx, dst);
+ break;
+ case 96:
+ ggml_cuda_flash_attn_ext_wmma_f16_case< 96, cols_per_block, float>(ctx, dst);
+ break;
+ case 112:
+ ggml_cuda_flash_attn_ext_wmma_f16_case<112, cols_per_block, float>(ctx, dst);
+ break;
+ case 128:
+ ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, float>(ctx, dst);
+ break;
+ case 256:
+ ggml_cuda_flash_attn_ext_wmma_f16_case<256, cols_per_block, float>(ctx, dst);
+ break;
+ default:
+ GGML_ASSERT(false);
+ break;
+ }
+ } else {
+ constexpr int cols_per_block = 32;
+ switch (Q->ne[0]) {
+ case 64:
+ ggml_cuda_flash_attn_ext_wmma_f16_case< 64, cols_per_block, float>(ctx, dst);
+ break;
+ case 80:
+ ggml_cuda_flash_attn_ext_wmma_f16_case< 80, cols_per_block, float>(ctx, dst);
+ break;
+ case 96:
+ ggml_cuda_flash_attn_ext_wmma_f16_case< 96, cols_per_block, float>(ctx, dst);
+ break;
+ case 112:
+ ggml_cuda_flash_attn_ext_wmma_f16_case<112, cols_per_block, float>(ctx, dst);
+ break;
+ case 128:
+ ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, float>(ctx, dst);
+ break;
+ // case 256:
+ // ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, float>(ctx, dst);
+ // break;
+ default:
+ GGML_ASSERT(false);
break;
- }
-
- half2 VKQ_add = make_half2(0.0f, 0.0f);
-#pragma unroll
- for (int l = 0; l < VKQ_ratio; ++l) {
- VKQ_add += KQ2[l*(ncols*D_padded/2) + j*(D_padded/2) + i];
- }
- VKQ2[j*(D_padded/2) + i] = VKQ_scale*VKQ2[j*(D_padded/2) + i] + VKQ_add;
}
}
-
- __syncthreads();
+ return;
}
-#pragma unroll
- for (int j0 = 0; j0 < ncols; j0 += nwarps) {
- const int j_VKQ = j0 + threadIdx.y;
- if (ic0 + j_VKQ >= ne01) {
- return;
- }
- const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip;
-
- float KQ_rowsum_j;
- if (std::is_same::value) {
- KQ_rowsum_j = KQ_rowsum_f[j0/nwarps];
- } else {
- KQ_rowsum_j = __low2float(KQ_rowsum_h2[j0/nwarps]) + __high2float(KQ_rowsum_h2[j0/nwarps]);
- }
-
-#pragma unroll
- for (int i0 = 0; i0 < D; i0 += WARP_SIZE) {
- const int i = i0 + threadIdx.x;
- if (i0 + WARP_SIZE > D && i >= D) {
+ if (Q->ne[1] <= 8 && Q->ne[0] % WARP_SIZE == 0) {
+ constexpr int cols_per_block = 8;
+ switch (Q->ne[0]) {
+ case 64:
+ ggml_cuda_flash_attn_ext_wmma_f16_case< 64, cols_per_block, half>(ctx, dst);
+ break;
+ case 96:
+ ggml_cuda_flash_attn_ext_wmma_f16_case< 96, cols_per_block, half>(ctx, dst);
+ break;
+ case 128:
+ ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, half>(ctx, dst);
+ break;
+ case 256:
+ ggml_cuda_flash_attn_ext_wmma_f16_case<256, cols_per_block, half>(ctx, dst);
+ break;
+ default:
+ GGML_ASSERT(false);
break;
- }
- float dst_val = VKQ[j_VKQ*D_padded + i];
- if (parallel_blocks == 1) {
- dst_val /= KQ_rowsum_j;
- }
- dst[j_dst*gridDim.y*D + blockIdx.y*D + i] = dst_val;
}
-
- if (parallel_blocks == 1 || threadIdx.x != 0) {
- continue;
- }
-
- float2 dst_meta_val;
- if (std::is_same::value) {
- dst_meta_val.x = KQ_max_f[j0/nwarps];
- } else {
- dst_meta_val.x = __low2float(KQ_max_h2[j0/nwarps]);
- }
- dst_meta_val.y = KQ_rowsum_j;
- dst_meta[(ic0 + j_VKQ)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = dst_meta_val;
+ return;
}
+
+ if (Q->ne[1] <= 32) {
+ constexpr int cols_per_block = 16;
+ switch (Q->ne[0]) {
+ case 64:
+ ggml_cuda_flash_attn_ext_wmma_f16_case< 64, cols_per_block, half>(ctx, dst);
+ break;
+ case 80:
+ ggml_cuda_flash_attn_ext_wmma_f16_case< 80, cols_per_block, half>(ctx, dst);
+ break;
+ case 96:
+ ggml_cuda_flash_attn_ext_wmma_f16_case< 96, cols_per_block, half>(ctx, dst);
+ break;
+ case 112:
+ ggml_cuda_flash_attn_ext_wmma_f16_case<112, cols_per_block, half>(ctx, dst);
+ break;
+ case 128:
+ ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, half>(ctx, dst);
+ break;
+ case 256:
+ ggml_cuda_flash_attn_ext_wmma_f16_case<256, cols_per_block, half>(ctx, dst);
+ break;
+ default:
+ GGML_ASSERT(false);
+ break;
+ }
+ return;
+ }
+
+ constexpr int cols_per_block = 32;
+ switch (Q->ne[0]) {
+ case 64:
+ ggml_cuda_flash_attn_ext_wmma_f16_case< 64, cols_per_block, half>(ctx, dst);
+ break;
+ case 80:
+ ggml_cuda_flash_attn_ext_wmma_f16_case< 80, cols_per_block, half>(ctx, dst);
+ break;
+ case 96:
+ ggml_cuda_flash_attn_ext_wmma_f16_case< 96, cols_per_block, half>(ctx, dst);
+ break;
+ case 112:
+ ggml_cuda_flash_attn_ext_wmma_f16_case<112, cols_per_block, half>(ctx, dst);
+ break;
+ case 128:
+ ggml_cuda_flash_attn_ext_wmma_f16_case<128, cols_per_block, half>(ctx, dst);
+ break;
+ case 256:
+ ggml_cuda_flash_attn_ext_wmma_f16_case<256, cols_per_block, half>(ctx, dst);
+ break;
+ default:
+ GGML_ASSERT(false);
+ break;
+ }
+}
+#define FATTN_VEC_F16_CASE(D, type_K, type_V) \
+ if (Q->ne[0] == (D) && K->type == (type_K) && V->type == (type_V)) { \
+ ggml_cuda_flash_attn_ext_vec_f16_case(ctx, dst); \
+ return; \
+ } \
+
+static void ggml_cuda_flash_attn_ext_vec_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
+ ggml_tensor * Q = dst->src[1];
+ ggml_tensor * K = dst->src[1];
+ ggml_tensor * V = dst->src[2];
+
+#ifdef GGML_CUDA_FA_ALL_QUANTS
+ FATTN_VEC_F16_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q4_0)
+ FATTN_VEC_F16_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q4_1)
+ FATTN_VEC_F16_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q5_0)
+ FATTN_VEC_F16_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q5_1)
+ FATTN_VEC_F16_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q8_0)
+ FATTN_VEC_F16_CASE( 64, GGML_TYPE_F16, GGML_TYPE_F16 )
+
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_0)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_0)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_0)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_0)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_0)
+
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_1)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_1)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_1)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_1)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_1)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_1)
+
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_0)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_0)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_0)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_0)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_0)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_0)
+
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_1)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_1)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_1)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_1)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_1)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_1)
+
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q8_0)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q8_0)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q8_0)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q8_0)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q8_0)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q8_0)
+
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_F16)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_F16)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_F16)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_F16)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_F16)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_F16)
+
+ FATTN_VEC_F16_CASE(256, GGML_TYPE_F16, GGML_TYPE_F16)
#else
- NO_DEVICE_CODE;
-#endif // FP16_MMA_AVAILABLE
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_0)
+
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q8_0)
+
+ FATTN_VEC_F16_CASE( 64, GGML_TYPE_F16, GGML_TYPE_F16)
+ FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_F16)
+ FATTN_VEC_F16_CASE(256, GGML_TYPE_F16, GGML_TYPE_F16)
+#endif // GGML_CUDA_FA_ALL_QUANTS
+
+ on_no_fattn_vec_case(Q->ne[0]);
}
-constexpr int get_max_power_of_2(int x) {
- return x % 2 == 0 ? 2*get_max_power_of_2(x/2) : 1;
-}
+#define FATTN_VEC_F32_CASE(D, type_K, type_V) \
+ if (Q->ne[0] == (D) && K->type == (type_K) && V->type == (type_V)) { \
+ ggml_cuda_flash_attn_ext_vec_f32_case(ctx, dst); \
+ return; \
+ } \
-static_assert(get_max_power_of_2(1) == 1, "Test failed.");
-static_assert(get_max_power_of_2(2) == 2, "Test failed.");
-static_assert(get_max_power_of_2(4) == 4, "Test failed.");
-static_assert(get_max_power_of_2(6) == 2, "Test failed.");
+static void ggml_cuda_flash_attn_ext_vec_f32(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
+ ggml_tensor * Q = dst->src[1];
+ ggml_tensor * K = dst->src[1];
+ ggml_tensor * V = dst->src[2];
-// Number of VKQ rows calculated in parallel:
-constexpr int get_VKQ_stride(int D, int nwarps, int frag_m) {
- return (get_max_power_of_2(D/frag_m) < nwarps ? get_max_power_of_2(D/frag_m) : nwarps)*frag_m;
-}
+#ifdef GGML_CUDA_FA_ALL_QUANTS
+ FATTN_VEC_F32_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q4_0)
+ FATTN_VEC_F32_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q4_1)
+ FATTN_VEC_F32_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q5_0)
+ FATTN_VEC_F32_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q5_1)
+ FATTN_VEC_F32_CASE( 64, GGML_TYPE_F16, GGML_TYPE_Q8_0)
+ FATTN_VEC_F32_CASE( 64, GGML_TYPE_F16, GGML_TYPE_F16)
-static_assert(get_VKQ_stride(128, 1, 32) == 32, "Test failed.");
-static_assert(get_VKQ_stride(128, 2, 32) == 64, "Test failed.");
-static_assert(get_VKQ_stride(128, 4, 32) == 128, "Test failed.");
-static_assert(get_VKQ_stride( 64, 1, 32) == 32, "Test failed.");
-static_assert(get_VKQ_stride( 64, 2, 32) == 64, "Test failed.");
-static_assert(get_VKQ_stride( 64, 4, 32) == 64, "Test failed.");
-static_assert(get_VKQ_stride( 80, 1, 16) == 16, "Test failed.");
-static_assert(get_VKQ_stride( 80, 2, 16) == 16, "Test failed.");
-static_assert(get_VKQ_stride( 80, 4, 16) == 16, "Test failed.");
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_0)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_0)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_0)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_0)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_0)
-template
-void launch_fattn_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
- const ggml_tensor * Q = dst->src[0];
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_1)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_1)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_1)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_1)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_1)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_1)
- constexpr int frag_m = cols_per_block == 8 && D % 32 == 0 ? 32 : 16;
- const int blocks_num_pb1 = ((Q->ne[1] + cols_per_block - 1) / cols_per_block)*Q->ne[2]*Q->ne[3];
- const int nsm = ggml_cuda_info().devices[ggml_cuda_get_device()].nsm;
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_0)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_0)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_0)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_0)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_0)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_0)
- if (4*blocks_num_pb1 < 2*nsm) {
- constexpr int parallel_blocks = 4;
- fattn_kernel_t fattn_kernel = flash_attn_ext_f16;
- launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block);
- return;
- }
- if (2*blocks_num_pb1 < 2*nsm) {
- constexpr int parallel_blocks = 2;
- fattn_kernel_t fattn_kernel = flash_attn_ext_f16;
- launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block);
- return;
- }
- constexpr int parallel_blocks = 1;
- fattn_kernel_t fattn_kernel = flash_attn_ext_f16;
- launch_fattn(ctx, dst, fattn_kernel, nwarps, cols_per_block);
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_1)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_1)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_1)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_1)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_1)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_1)
+
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q8_0)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q8_0)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q8_0)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q8_0)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q8_0)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q8_0)
+
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_F16)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_F16)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_F16)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_F16)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_F16)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_F16)
+
+ FATTN_VEC_F32_CASE(256, GGML_TYPE_F16, GGML_TYPE_F16)
+#else
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_0)
+
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q8_0)
+
+ FATTN_VEC_F32_CASE( 64, GGML_TYPE_F16, GGML_TYPE_F16)
+ FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_F16)
+ FATTN_VEC_F32_CASE(256, GGML_TYPE_F16, GGML_TYPE_F16)
+#endif // GGML_CUDA_FA_ALL_QUANTS
+
+ on_no_fattn_vec_case(Q->ne[0]);
}
void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
@@ -464,8 +305,8 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst
// On AMD the tile kernels perform poorly, use the vec kernel instead:
if (cc >= CC_OFFSET_AMD) {
- if (precision == GGML_PREC_DEFAULT) {
- ggml_cuda_flash_attn_ext_vec_f16_no_mma(ctx, dst);
+ if (precision == GGML_PREC_DEFAULT && fast_fp16_available(cc)) {
+ ggml_cuda_flash_attn_ext_vec_f16(ctx, dst);
} else {
ggml_cuda_flash_attn_ext_vec_f32(ctx, dst);
}
@@ -483,156 +324,22 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst
if (!fp16_mma_available(cc)) {
if (Q->ne[1] <= 8) {
- ggml_cuda_flash_attn_ext_vec_f16_no_mma(ctx, dst);
+ ggml_cuda_flash_attn_ext_vec_f16(ctx, dst);
} else {
ggml_cuda_flash_attn_ext_tile_f16(ctx, dst);
}
return;
}
- if (precision != GGML_PREC_DEFAULT) {
- if (Q->ne[1] == 1 && (Q->ne[0] == 64 || Q->ne[0] == 128)) {
+ if (Q->ne[1] == 1 && Q->ne[0] % (2*WARP_SIZE) == 0) {
+ if (precision == GGML_PREC_DEFAULT) {
+ ggml_cuda_flash_attn_ext_vec_f16(ctx, dst);
+ return;
+ } else if(Q->ne[0] <= 128) {
ggml_cuda_flash_attn_ext_vec_f32(ctx, dst);
return;
}
-
- if (Q->ne[1] <= 32 || Q->ne[0] > 128) {
- constexpr int cols_per_block = 16;
- constexpr int nwarps = 4;
- switch (Q->ne[0]) {
- case 64:
- launch_fattn_f16< 64, cols_per_block, nwarps, float>(ctx, dst);
- break;
- case 80:
- launch_fattn_f16< 80, cols_per_block, nwarps, float>(ctx, dst);
- break;
- case 96:
- launch_fattn_f16< 96, cols_per_block, nwarps, float>(ctx, dst);
- break;
- case 112:
- launch_fattn_f16<112, cols_per_block, nwarps, float>(ctx, dst);
- break;
- case 128:
- launch_fattn_f16<128, cols_per_block, nwarps, float>(ctx, dst);
- break;
- case 256:
- launch_fattn_f16<256, cols_per_block, nwarps, float>(ctx, dst);
- break;
- default:
- GGML_ASSERT(false);
- break;
- }
- } else {
- constexpr int cols_per_block = 32;
- constexpr int nwarps = 4;
- switch (Q->ne[0]) {
- case 64:
- launch_fattn_f16< 64, cols_per_block, nwarps, float>(ctx, dst);
- break;
- case 80:
- launch_fattn_f16< 80, cols_per_block, nwarps, float>(ctx, dst);
- break;
- case 96:
- launch_fattn_f16< 96, cols_per_block, nwarps, float>(ctx, dst);
- break;
- case 112:
- launch_fattn_f16<112, cols_per_block, nwarps, float>(ctx, dst);
- break;
- case 128:
- launch_fattn_f16<128, cols_per_block, nwarps, float>(ctx, dst);
- break;
- // case 256:
- // launch_fattn_f16<256, cols_per_block, nwarps, float>(ctx, dst);
- // break;
- default:
- GGML_ASSERT(false);
- break;
- }
- }
- return;
}
- if (Q->ne[1] == 1 && Q->ne[0] % (2*WARP_SIZE) == 0) {
- ggml_cuda_flash_attn_ext_vec_f16(ctx, dst);
- return;
- }
-
- if (Q->ne[1] <= 8 && Q->ne[0] % WARP_SIZE == 0) {
- constexpr int cols_per_block = 8;
- constexpr int nwarps = 4;
- switch (Q->ne[0]) {
- case 64:
- launch_fattn_f16< 64, cols_per_block, nwarps, half>(ctx, dst);
- break;
- case 96:
- launch_fattn_f16< 96, cols_per_block, nwarps, half>(ctx, dst);
- break;
- case 128:
- launch_fattn_f16<128, cols_per_block, nwarps, half>(ctx, dst);
- break;
- case 256:
- launch_fattn_f16<256, cols_per_block, nwarps, half>(ctx, dst);
- break;
- default:
- GGML_ASSERT(false);
- break;
- }
- return;
- }
-
- if (Q->ne[1] <= 32) {
- constexpr int cols_per_block = 16;
- constexpr int nwarps = 4;
- switch (Q->ne[0]) {
- case 64:
- launch_fattn_f16< 64, cols_per_block, nwarps, half>(ctx, dst);
- break;
- case 80:
- launch_fattn_f16< 80, cols_per_block, nwarps, half>(ctx, dst);
- break;
- case 96:
- launch_fattn_f16< 96, cols_per_block, nwarps, half>(ctx, dst);
- break;
- case 112:
- launch_fattn_f16<112, cols_per_block, nwarps, half>(ctx, dst);
- break;
- case 128:
- launch_fattn_f16<128, cols_per_block, nwarps, half>(ctx, dst);
- break;
- case 256:
- launch_fattn_f16<256, cols_per_block, nwarps, half>(ctx, dst);
- break;
- default:
- GGML_ASSERT(false);
- break;
- }
- return;
- }
-
- constexpr int cols_per_block = 32;
- constexpr int nwarps = 4;
- switch (Q->ne[0]) {
- case 64:
- launch_fattn_f16< 64, cols_per_block, nwarps, half>(ctx, dst);
- break;
- case 80:
- launch_fattn_f16< 80, cols_per_block, nwarps, half>(ctx, dst);
- break;
- case 96:
- launch_fattn_f16< 96, cols_per_block, nwarps, half>(ctx, dst);
- break;
- case 112:
- launch_fattn_f16<112, cols_per_block, nwarps, half>(ctx, dst);
- break;
- case 128:
- launch_fattn_f16<128, cols_per_block, nwarps, half>(ctx, dst);
- break;
- case 256:
- launch_fattn_f16<256, cols_per_block, nwarps, half>(ctx, dst);
- break;
- default:
- GGML_ASSERT(false);
- break;
- }
- return;
+ ggml_cuda_flash_attn_ext_wmma_f16(ctx, dst);
}
diff --git a/ggml-cuda/mmq.cu b/ggml-cuda/mmq.cu
index c0a66d9b6..ebe1dc5c8 100644
--- a/ggml-cuda/mmq.cu
+++ b/ggml-cuda/mmq.cu
@@ -386,7 +386,7 @@ static __device__ __forceinline__ float vec_dot_q5_0_q8_1_mul_mat(
u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_0) % WARP_SIZE];
}
- return vec_dot_q8_0_q8_1_impl
+ return vec_dot_q8_0_q8_1_impl
(&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dmf[index_bx], y_df[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
}
@@ -547,7 +547,7 @@ static __device__ __forceinline__ float vec_dot_q8_0_q8_1_mul_mat(
const float * x_dmf = (const float *) x_dm;
const float * y_df = (const float *) y_ds;
- return vec_dot_q8_0_q8_1_impl
+ return vec_dot_q8_0_q8_1_impl
(&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[j * WARP_SIZE + k], x_dmf[i * (WARP_SIZE/QI8_0) + i/QI8_0 + k/QI8_0],
y_df[j * (WARP_SIZE/QI8_1) + k/QI8_1]);
}
diff --git a/ggml-cuda/norm.cu b/ggml-cuda/norm.cu
index 86f774534..30866d512 100644
--- a/ggml-cuda/norm.cu
+++ b/ggml-cuda/norm.cu
@@ -170,6 +170,8 @@ void ggml_cuda_op_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
float * dst_d = (float *)dst->data;
cudaStream_t stream = ctx.stream();
+ GGML_ASSERT(ggml_is_contiguous(src0));
+
GGML_ASSERT(src0->type == GGML_TYPE_F32);
GGML_ASSERT( dst->type == GGML_TYPE_F32);
@@ -188,6 +190,8 @@ void ggml_cuda_op_group_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst)
float * dst_d = (float *)dst->data;
cudaStream_t stream = ctx.stream();
+ GGML_ASSERT(ggml_is_contiguous(src0));
+
GGML_ASSERT(src0->type == GGML_TYPE_F32);
GGML_ASSERT( dst->type == GGML_TYPE_F32);
@@ -202,6 +206,8 @@ void ggml_cuda_op_rms_norm(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
float * dst_d = (float *)dst->data;
cudaStream_t stream = ctx.stream();
+ GGML_ASSERT(ggml_is_contiguous(src0));
+
GGML_ASSERT(src0->type == GGML_TYPE_F32);
GGML_ASSERT( dst->type == GGML_TYPE_F32);
diff --git a/ggml-cuda/rope.cu b/ggml-cuda/rope.cu
index 50f2cf415..0dd07977e 100644
--- a/ggml-cuda/rope.cu
+++ b/ggml-cuda/rope.cu
@@ -61,7 +61,7 @@ static __global__ void rope(
template
static __global__ void rope_neox(
const T * x, T * dst, int ncols, int n_dims, const int32_t * pos, float freq_scale, int p_delta_rows,
- float ext_factor, float attn_factor, rope_corr_dims corr_dims, float theta_scale, float inv_ndims, const float * freq_factors
+ float ext_factor, float attn_factor, rope_corr_dims corr_dims, float theta_scale, const float * freq_factors
) {
const int col = 2*(blockDim.y*blockIdx.y + threadIdx.y);
@@ -85,15 +85,13 @@ static __global__ void rope_neox(
const int i = row*ncols + ib*n_dims + ic/2;
const int i2 = row/p_delta_rows;
- float cur_rot = inv_ndims * ic - ib;
-
const int p = has_pos ? pos[i2] : 0;
const float freq_factor = has_freq_facs ? freq_factors[ic/2] : 1.0f;
- const float theta_base = p*freq_scale*powf(theta_scale, col/2.0f)/freq_factor;
+ const float theta_base = p*powf(theta_scale, col/2.0f)/freq_factor;
float cos_theta, sin_theta;
- rope_yarn(theta_base, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor, &cos_theta, &sin_theta);
+ rope_yarn(theta_base, freq_scale, corr_dims, ic, ext_factor, attn_factor, &cos_theta, &sin_theta);
const float x0 = x[i + 0];
const float x1 = x[i + n_dims/2];
@@ -174,30 +172,29 @@ static void rope_neox_cuda(
const dim3 block_nums(nrows, num_blocks_x, 1);
const float theta_scale = powf(freq_base, -2.0f/n_dims);
- const float inv_ndims = -1.0f / n_dims;
if (pos == nullptr) {
if (freq_factors == nullptr) {
rope_neox<<>>(
x, dst, ncols, n_dims, pos, freq_scale, p_delta_rows, ext_factor, attn_factor, corr_dims,
- theta_scale, inv_ndims, freq_factors
+ theta_scale, freq_factors
);
} else {
rope_neox<<>>(
x, dst, ncols, n_dims, pos, freq_scale, p_delta_rows, ext_factor, attn_factor, corr_dims,
- theta_scale, inv_ndims, freq_factors
+ theta_scale, freq_factors
);
}
} else {
if (freq_factors == nullptr) {
rope_neox<<>>(
x, dst, ncols, n_dims, pos, freq_scale, p_delta_rows, ext_factor, attn_factor, corr_dims,
- theta_scale, inv_ndims, freq_factors
+ theta_scale, freq_factors
);
} else {
rope_neox<<>>(
x, dst, ncols, n_dims, pos, freq_scale, p_delta_rows, ext_factor, attn_factor, corr_dims,
- theta_scale, inv_ndims, freq_factors
+ theta_scale, freq_factors
);
}
}
@@ -254,6 +251,7 @@ void ggml_cuda_op_rope(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
float * dst_d = (float *)dst->data;
cudaStream_t stream = ctx.stream();
+ GGML_ASSERT(ggml_is_contiguous(src0));
GGML_ASSERT(src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16);
GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
GGML_ASSERT(src0->type == dst->type);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu
new file mode 100644
index 000000000..d7f103475
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-f16.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_F16);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu
new file mode 100644
index 000000000..f3d8d2eda
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu
new file mode 100644
index 000000000..9beb05ca2
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q4_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu
new file mode 100644
index 000000000..0c163dcba
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu
new file mode 100644
index 000000000..3980167b3
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q5_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu
new file mode 100644
index 000000000..fe099921d
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-f16-q8_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q8_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu
new file mode 100644
index 000000000..d4d5e7999
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-f16.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_F16);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu
new file mode 100644
index 000000000..f08b10c4d
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu
new file mode 100644
index 000000000..e8c3f8adc
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q4_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu
new file mode 100644
index 000000000..c01416a13
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu
new file mode 100644
index 000000000..46615f281
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q5_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu
new file mode 100644
index 000000000..72dcc1a2f
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_0-q8_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q8_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu
new file mode 100644
index 000000000..9fa8a377d
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-f16.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_F16);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu
new file mode 100644
index 000000000..20ea86c6d
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu
new file mode 100644
index 000000000..ed815957c
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q4_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu
new file mode 100644
index 000000000..bbe9e6a1c
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu
new file mode 100644
index 000000000..d12a61699
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q5_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu
new file mode 100644
index 000000000..1e901afcb
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q4_1-q8_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q8_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu
new file mode 100644
index 000000000..a3f98ce37
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-f16.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_F16);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu
new file mode 100644
index 000000000..1bae97243
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu
new file mode 100644
index 000000000..7258e9775
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q4_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu
new file mode 100644
index 000000000..08435c005
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu
new file mode 100644
index 000000000..17864e8e9
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q5_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu
new file mode 100644
index 000000000..9239138c9
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_0-q8_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q8_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu
new file mode 100644
index 000000000..e387d9c1d
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-f16.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_F16);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu
new file mode 100644
index 000000000..d69d3bbd6
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu
new file mode 100644
index 000000000..61a478816
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q4_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu
new file mode 100644
index 000000000..89995080a
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu
new file mode 100644
index 000000000..9e6a58dff
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q5_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu
new file mode 100644
index 000000000..153cbfd86
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q5_1-q8_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q8_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu
new file mode 100644
index 000000000..09d576558
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-f16.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_F16);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu
new file mode 100644
index 000000000..3e3c91e68
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu
new file mode 100644
index 000000000..7b973058f
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q4_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu
new file mode 100644
index 000000000..a43a475d4
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu
new file mode 100644
index 000000000..5b570c0a3
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q5_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu
new file mode 100644
index 000000000..bf2cc684e
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs128-q8_0-q8_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q8_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu
new file mode 100644
index 000000000..7428e45ea
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs256-f16-f16.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(256, GGML_TYPE_F16, GGML_TYPE_F16);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu
new file mode 100644
index 000000000..4aee830de
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-f16.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(64, GGML_TYPE_F16, GGML_TYPE_F16);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu
new file mode 100644
index 000000000..36acb6319
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q4_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu
new file mode 100644
index 000000000..a4090c390
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q4_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q4_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu
new file mode 100644
index 000000000..17b6b2d11
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q5_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu
new file mode 100644
index 000000000..549e1cea1
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q5_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q5_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu
new file mode 100644
index 000000000..66bcd820f
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f16-instance-hs64-f16-q8_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f16.cuh"
+
+DECL_FATTN_VEC_F16_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q8_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu
new file mode 100644
index 000000000..15933a299
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-f16.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_F16);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu
new file mode 100644
index 000000000..8aa785583
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu
new file mode 100644
index 000000000..bde3924fd
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q4_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q4_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu
new file mode 100644
index 000000000..1708181c1
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu
new file mode 100644
index 000000000..30fa6fa4c
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q5_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q5_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu
new file mode 100644
index 000000000..69673d50f
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-f16-q8_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_F16, GGML_TYPE_Q8_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu
new file mode 100644
index 000000000..d8b2b2e18
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-f16.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_F16);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu
new file mode 100644
index 000000000..01cce7ab5
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu
new file mode 100644
index 000000000..fd5563b39
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q4_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q4_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu
new file mode 100644
index 000000000..b13cc4a0c
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu
new file mode 100644
index 000000000..86f1fc637
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q5_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q5_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu
new file mode 100644
index 000000000..26e7df4be
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_0-q8_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_0, GGML_TYPE_Q8_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu
new file mode 100644
index 000000000..e4fda8952
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-f16.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_F16);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu
new file mode 100644
index 000000000..bd15117b4
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu
new file mode 100644
index 000000000..cb6c6a760
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q4_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q4_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu
new file mode 100644
index 000000000..201b6641d
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu
new file mode 100644
index 000000000..6da57a44a
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q5_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q5_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu
new file mode 100644
index 000000000..47623c9bf
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q4_1-q8_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q4_1, GGML_TYPE_Q8_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu
new file mode 100644
index 000000000..82c6861d2
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-f16.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_F16);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu
new file mode 100644
index 000000000..24a80c2b0
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu
new file mode 100644
index 000000000..b95eaf7e1
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q4_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q4_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu
new file mode 100644
index 000000000..275f2efcc
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu
new file mode 100644
index 000000000..3673f7fd5
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q5_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q5_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu
new file mode 100644
index 000000000..2c4d59947
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_0-q8_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_0, GGML_TYPE_Q8_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu
new file mode 100644
index 000000000..2457cdf3f
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-f16.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_F16);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu
new file mode 100644
index 000000000..b3b411ed3
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu
new file mode 100644
index 000000000..b7f308a4d
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q4_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q4_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu
new file mode 100644
index 000000000..739686697
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu
new file mode 100644
index 000000000..708d03113
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q5_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q5_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu
new file mode 100644
index 000000000..df891be60
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q5_1-q8_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q5_1, GGML_TYPE_Q8_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu
new file mode 100644
index 000000000..f49b6d1f9
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-f16.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_F16);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu
new file mode 100644
index 000000000..1de92148b
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu
new file mode 100644
index 000000000..7a1ba7f8d
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q4_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q4_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu
new file mode 100644
index 000000000..25493e4ba
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu
new file mode 100644
index 000000000..3cd650c7b
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q5_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q5_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu
new file mode 100644
index 000000000..88ffa43d6
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs128-q8_0-q8_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(128, GGML_TYPE_Q8_0, GGML_TYPE_Q8_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu
new file mode 100644
index 000000000..8c7bac6c2
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs256-f16-f16.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(256, GGML_TYPE_F16, GGML_TYPE_F16);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu
new file mode 100644
index 000000000..a28f62e7b
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-f16.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(64, GGML_TYPE_F16, GGML_TYPE_F16);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu
new file mode 100644
index 000000000..d39838b96
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q4_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu
new file mode 100644
index 000000000..834d40f6c
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q4_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q4_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu
new file mode 100644
index 000000000..f7d54668b
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q5_0);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu
new file mode 100644
index 000000000..59e00ad83
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q5_1.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q5_1);
diff --git a/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu
new file mode 100644
index 000000000..6e63893de
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-vec-f32-instance-hs64-f16-q8_0.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-vec-f32.cuh"
+
+DECL_FATTN_VEC_F32_CASE(64, GGML_TYPE_F16, GGML_TYPE_Q8_0);
diff --git a/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu b/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu
new file mode 100644
index 000000000..ca356ad6c
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb16.cu
@@ -0,0 +1,10 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-wmma-f16.cuh"
+
+DECL_FATTN_WMMA_F16_CASE(64, 16, float);
+DECL_FATTN_WMMA_F16_CASE(80, 16, float);
+DECL_FATTN_WMMA_F16_CASE(96, 16, float);
+DECL_FATTN_WMMA_F16_CASE(112, 16, float);
+DECL_FATTN_WMMA_F16_CASE(128, 16, float);
+DECL_FATTN_WMMA_F16_CASE(256, 16, float);
diff --git a/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu b/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu
new file mode 100644
index 000000000..430ee64eb
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqfloat-cpb32.cu
@@ -0,0 +1,9 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-wmma-f16.cuh"
+
+DECL_FATTN_WMMA_F16_CASE(64, 32, float);
+DECL_FATTN_WMMA_F16_CASE(80, 32, float);
+DECL_FATTN_WMMA_F16_CASE(96, 32, float);
+DECL_FATTN_WMMA_F16_CASE(112, 32, float);
+DECL_FATTN_WMMA_F16_CASE(128, 32, float);
diff --git a/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu b/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu
new file mode 100644
index 000000000..d421d17cc
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb16.cu
@@ -0,0 +1,10 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-wmma-f16.cuh"
+
+DECL_FATTN_WMMA_F16_CASE(64, 16, half);
+DECL_FATTN_WMMA_F16_CASE(80, 16, half);
+DECL_FATTN_WMMA_F16_CASE(96, 16, half);
+DECL_FATTN_WMMA_F16_CASE(112, 16, half);
+DECL_FATTN_WMMA_F16_CASE(128, 16, half);
+DECL_FATTN_WMMA_F16_CASE(256, 16, half);
diff --git a/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu b/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu
new file mode 100644
index 000000000..deacd5f58
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb32.cu
@@ -0,0 +1,10 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-wmma-f16.cuh"
+
+DECL_FATTN_WMMA_F16_CASE(64, 32, half);
+DECL_FATTN_WMMA_F16_CASE(80, 32, half);
+DECL_FATTN_WMMA_F16_CASE(96, 32, half);
+DECL_FATTN_WMMA_F16_CASE(112, 32, half);
+DECL_FATTN_WMMA_F16_CASE(128, 32, half);
+DECL_FATTN_WMMA_F16_CASE(256, 32, half);
diff --git a/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu b/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu
new file mode 100644
index 000000000..282896733
--- /dev/null
+++ b/ggml-cuda/template-instances/fattn-wmma-f16-instance-kqhalf-cpb8.cu
@@ -0,0 +1,8 @@
+// This file has been autogenerated by generate-variants.py, do not edit manually.
+
+#include "../fattn-wmma-f16.cuh"
+
+DECL_FATTN_WMMA_F16_CASE(64, 8, half);
+DECL_FATTN_WMMA_F16_CASE(96, 8, half);
+DECL_FATTN_WMMA_F16_CASE(128, 8, half);
+DECL_FATTN_WMMA_F16_CASE(256, 8, half);
diff --git a/ggml-cuda/template-instances/generate_cu_files.py b/ggml-cuda/template-instances/generate_cu_files.py
new file mode 100755
index 000000000..ee5b460e0
--- /dev/null
+++ b/ggml-cuda/template-instances/generate_cu_files.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python3
+
+from glob import glob
+import os
+
+TYPES_KV = ["GGML_TYPE_Q4_0", "GGML_TYPE_Q4_1", "GGML_TYPE_Q5_0", "GGML_TYPE_Q5_1", "GGML_TYPE_Q8_0", "GGML_TYPE_F16"]
+
+SOURCE_FATTN_VEC = """// This file has been autogenerated by generate_cu_files.py, do not edit manually.
+
+#include "../fattn-vec-f{vkq_size}.cuh"
+
+DECL_FATTN_VEC_F{vkq_size}_CASE({head_size}, {type_k}, {type_v});
+"""
+
+SOURCE_FATTN_WMMA_START = """// This file has been autogenerated by generate_cu_files.py, do not edit manually.
+
+#include "../fattn-wmma-f16.cuh"
+
+"""
+
+SOURCE_FATTN_WMMA_CASE = "DECL_FATTN_WMMA_F16_CASE({head_size}, {cols_per_block}, {kq_acc_t});\n"
+
+
+def get_short_name(long_quant_name):
+ return long_quant_name.replace("GGML_TYPE_", "").lower()
+
+
+def get_head_sizes(type_k, type_v):
+ if type_k == "GGML_TYPE_F16" and type_v == "GGML_TYPE_F16":
+ return [64, 128, 256]
+ if type_k == "GGML_TYPE_F16":
+ return [64, 128]
+ return [128]
+
+
+for filename in glob("*.cu"):
+ os.remove(filename)
+
+for vkq_size in [16, 32]:
+ for type_k in TYPES_KV:
+ for type_v in TYPES_KV:
+ for head_size in get_head_sizes(type_k, type_v):
+ with open(f"fattn-vec-f{vkq_size}-instance-hs{head_size}-{get_short_name(type_k)}-{get_short_name(type_v)}.cu", "w") as f:
+ f.write(SOURCE_FATTN_VEC.format(vkq_size=vkq_size, head_size=head_size, type_k=type_k, type_v=type_v))
+
+for kq_acc_t in ["half", "float"]:
+ for cols_per_block in [8, 16, 32]:
+ if kq_acc_t == "float" and cols_per_block == 8:
+ continue
+
+ with open(f"fattn-wmma-f16-instance-kq{kq_acc_t}-cpb{cols_per_block}.cu", "w") as f:
+ f.write(SOURCE_FATTN_WMMA_START)
+
+ for head_size in [64, 80, 96, 112, 128, 256]:
+ if cols_per_block == 8 and head_size % 32 != 0: # wmma fragment is 8x32
+ continue
+ if kq_acc_t == "float" and cols_per_block == 32 and head_size == 256: # register spilling, bad performance
+ continue
+ f.write(SOURCE_FATTN_WMMA_CASE.format(kq_acc_t=kq_acc_t, cols_per_block=cols_per_block, head_size=head_size))
diff --git a/ggml-cuda/vecdotq.cuh b/ggml-cuda/vecdotq.cuh
index 5ebdddcc7..df9752390 100644
--- a/ggml-cuda/vecdotq.cuh
+++ b/ggml-cuda/vecdotq.cuh
@@ -180,8 +180,8 @@ template static __device__ __forceinline__ float vec_dot_q5_1_q8_1_imp
#define VDR_Q8_0_Q8_1_MMVQ 2
#define VDR_Q8_0_Q8_1_MMQ 8
-template static __device__ __forceinline__ float vec_dot_q8_0_q8_1_impl(
- const int * v, const int * u, const float & d8_0, const float & d8_1) {
+template static __device__ __forceinline__ T vec_dot_q8_0_q8_1_impl(
+ const int * v, const int * u, const T & d8_0, const T & d8_1) {
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
int sumi = 0;
@@ -192,7 +192,7 @@ template static __device__ __forceinline__ float vec_dot_q8_0_q8_1_imp
sumi = __dp4a(v[i], u[i], sumi);
}
- return d8_0*d8_1 * sumi;
+ return d8_0*d8_1 * ((T) sumi);
#else
NO_DEVICE_CODE;
#endif // __CUDA_ARCH__ >= MIN_CC_DP4A
@@ -656,7 +656,7 @@ static __device__ __forceinline__ float vec_dot_q8_0_q8_1(
u[i] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
}
- return vec_dot_q8_0_q8_1_impl(v, u, bq8_0->d, __low2half(bq8_1->ds));
+ return vec_dot_q8_0_q8_1_impl(v, u, bq8_0->d, __low2half(bq8_1->ds));
}
static __device__ __forceinline__ float vec_dot_q2_K_q8_1(
diff --git a/ggml-kompute.cpp b/ggml-kompute.cpp
index 6c6058b2a..0c51c322f 100644
--- a/ggml-kompute.cpp
+++ b/ggml-kompute.cpp
@@ -1597,7 +1597,6 @@ static void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml
{
GGML_ASSERT(ne00 == ne10);
- // TODO: assert that dim2 and dim3 are contiguous
GGML_ASSERT(ne12 % ne02 == 0);
GGML_ASSERT(ne13 % ne03 == 0);
diff --git a/ggml-metal.m b/ggml-metal.m
index 4ba498e87..fddc44f78 100644
--- a/ggml-metal.m
+++ b/ggml-metal.m
@@ -779,6 +779,12 @@ static bool ggml_metal_supports_op(const struct ggml_metal_context * ctx, const
case GGML_OP_LEAKY_RELU:
return true;
case GGML_OP_FLASH_ATTN_EXT:
+ if (op->src[1]->type != GGML_TYPE_F16) {
+ return false;
+ }
+ if (op->src[2]->type != GGML_TYPE_F16) {
+ return false;
+ }
if (op->src[0]->ne[0] == 256) {
return false;
}
@@ -1519,7 +1525,6 @@ static enum ggml_status ggml_metal_graph_compute(
{
GGML_ASSERT(ne00 == ne10);
- // TODO: assert that dim2 and dim3 are contiguous
GGML_ASSERT(ne12 % ne02 == 0);
GGML_ASSERT(ne13 % ne03 == 0);
@@ -2187,6 +2192,7 @@ static enum ggml_status ggml_metal_graph_compute(
case GGML_OP_RMS_NORM:
{
GGML_ASSERT(ne00 % 4 == 0);
+ GGML_ASSERT(ggml_is_contiguous_1(src0));
float eps;
memcpy(&eps, dst->op_params, sizeof(float));
@@ -2214,6 +2220,7 @@ static enum ggml_status ggml_metal_graph_compute(
case GGML_OP_GROUP_NORM:
{
GGML_ASSERT(ne00 % 4 == 0);
+ GGML_ASSERT(ggml_is_contiguous(src0));
//float eps;
//memcpy(&eps, dst->op_params, sizeof(float));
@@ -2247,6 +2254,8 @@ static enum ggml_status ggml_metal_graph_compute(
} break;
case GGML_OP_NORM:
{
+ GGML_ASSERT(ggml_is_contiguous_1(src0));
+
float eps;
memcpy(&eps, dst->op_params, sizeof(float));
diff --git a/ggml-metal.metal b/ggml-metal.metal
index b16f2b7e0..0cb85e1a5 100644
--- a/ggml-metal.metal
+++ b/ggml-metal.metal
@@ -1767,13 +1767,13 @@ kernel void kernel_rope(
const int64_t p = pos[i2];
- const float theta_0 = (float)p;
+ const float theta_base = (float)p;
const float inv_ndims = -1.f/n_dims;
if (!is_neox) {
for (int64_t i0 = 2*tiitg; i0 < ne0; i0 += 2*tptg.x) {
+ const float theta = theta_base * pow(freq_base, inv_ndims*i0);
- const float theta = theta_0 * pow(freq_base, inv_ndims*i0);
float cos_theta, sin_theta;
rope_yarn(theta, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta);
@@ -1789,18 +1789,14 @@ kernel void kernel_rope(
} else {
for (int64_t ic = 2*tiitg; ic < ne0; ic += 2*tptg.x) {
if (ic < n_dims) {
- const int64_t ib = 0;
+ const int64_t i0 = ic/2;
- // simplified from `(ib * n_dims + ic) * inv_ndims`
- const float cur_rot = inv_ndims*ic - ib;
- const float freq_factor = src2 != src0 ? src2[ic/2] : 1.0f;
+ const float freq_factor = src2 != src0 ? src2[i0] : 1.0f;
- const float theta = theta_0 * pow(freq_base, cur_rot) / freq_factor;
+ const float theta = theta_base * pow(freq_base, inv_ndims*ic);
float cos_theta, sin_theta;
- rope_yarn(theta, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor, &cos_theta, &sin_theta);
-
- const int64_t i0 = ib*n_dims + ic/2;
+ rope_yarn(theta/freq_factor, freq_scale, corr_dims, ic, ext_factor, attn_factor, &cos_theta, &sin_theta);
device const T * const src = (device T *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
device T * dst_data = (device T *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
diff --git a/ggml-quants.c b/ggml-quants.c
index 4f2c7224c..9f864e5c4 100644
--- a/ggml-quants.c
+++ b/ggml-quants.c
@@ -6088,6 +6088,7 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * r
const uint8_t * restrict q2 = x[i].qs;
const int8_t * restrict q8 = y[i].qs;
+
const __m128i mins_and_scales = __lsx_vld((const __m128i*)x[i].scales, 0);
const __m128i scales8 = __lsx_vand_v(mins_and_scales, m4);
const __m128i mins8 = __lsx_vand_v(__lsx_vsrli_h(mins_and_scales, 4), m4);
@@ -6807,6 +6808,8 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * r
for (int i = 0; i < nb; ++i) {
const float d = y[i].d * GGML_FP16_TO_FP32(x[i].d);
+ const uint8_t * restrict q3 = x[i].qs;
+ const int8_t * restrict q8 = y[i].qs;
// Set up scales
memcpy(aux, x[i].scales, 12);
__m128i scales128 = lsx_set_w(
@@ -6828,29 +6831,32 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * r
int bit = 0;
int is = 0;
+ __m256i xvbit;
- const uint8_t * restrict q3 = x[i].qs;
- const int8_t * restrict q8 = y[i].qs;
for (int j = 0; j < QK_K/128; ++j) {
// load low 2 bits
const __m256i q3bits = __lasx_xvld((const __m256i*)q3, 0); q3 += 32;
+ xvbit = __lasx_xvreplgr2vr_h(bit);
// prepare low and high bits
const __m256i q3l_0 = __lasx_xvand_v(q3bits, m3);
- const __m256i q3h_0 = __lasx_xvslli_h(__lasx_xvsrli_h(__lasx_xvandn_v(hbits, __lasx_xvslli_h(mone, bit)), bit), 2);
+ const __m256i q3h_0 = __lasx_xvslli_h(__lasx_xvsrl_h(__lasx_xvandn_v(hbits, __lasx_xvsll_h(mone, xvbit)), xvbit), 2);
++bit;
+ xvbit = __lasx_xvreplgr2vr_h(bit);
const __m256i q3l_1 = __lasx_xvand_v(__lasx_xvsrli_h(q3bits, 2), m3);
- const __m256i q3h_1 = __lasx_xvslli_h(__lasx_xvsrli_h(__lasx_xvandn_v(hbits, __lasx_xvslli_h(mone, bit)), bit), 2);
+ const __m256i q3h_1 = __lasx_xvslli_h(__lasx_xvsrl_h(__lasx_xvandn_v(hbits, __lasx_xvsll_h(mone, xvbit)), xvbit), 2);
++bit;
+ xvbit = __lasx_xvreplgr2vr_h(bit);
const __m256i q3l_2 = __lasx_xvand_v(__lasx_xvsrli_h(q3bits, 4), m3);
- const __m256i q3h_2 = __lasx_xvslli_h(__lasx_xvsrli_h(__lasx_xvandn_v(hbits, __lasx_xvslli_h(mone, bit)), bit), 2);
+ const __m256i q3h_2 = __lasx_xvslli_h(__lasx_xvsrl_h(__lasx_xvandn_v(hbits, __lasx_xvsll_h(mone, xvbit)), xvbit), 2);
++bit;
+ xvbit = __lasx_xvreplgr2vr_h(bit);
const __m256i q3l_3 = __lasx_xvand_v(__lasx_xvsrli_h(q3bits, 6), m3);
- const __m256i q3h_3 = __lasx_xvslli_h(__lasx_xvsrli_h(__lasx_xvandn_v(hbits, __lasx_xvslli_h(mone, bit)), bit), 2);
+ const __m256i q3h_3 = __lasx_xvslli_h(__lasx_xvsrl_h(__lasx_xvandn_v(hbits, __lasx_xvsll_h(mone, xvbit)), xvbit), 2);
++bit;
// load Q8 quants
@@ -7399,6 +7405,9 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * r
*s = vec_extract(vsumf0, 0);
#elif defined __loongarch_asx
+ GGML_UNUSED(kmask1);
+ GGML_UNUSED(kmask2);
+ GGML_UNUSED(kmask3);
const __m256i m4 = __lasx_xvreplgr2vr_b(0xF);
@@ -7411,6 +7420,11 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * r
const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
const uint8_t * restrict q4 = x[i].qs;
const int8_t * restrict q8 = y[i].qs;
@@ -7450,16 +7464,17 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * r
__m256 vd = __lasx_xvreplfr2vr_s(d);
acc = __lasx_xvfmadd_s(vd, __lasx_xvffint_s_w(sumi), acc);
+
}
acc_m = __lsx_vfadd_s(acc_m, (__m128)__lsx_vpermi_w((__m128i)acc_m, (__m128i)acc_m, 0xee));
__m128i tmp1 = __lsx_vinsgr2vr_w(__lsx_vldi(0), __lsx_vpickve2gr_w((__m128i)acc_m, 1), 0);
acc_m = __lsx_vfadd_s(acc_m, (__m128)tmp1);
+
ft_union fi;
fi.i = __lsx_vpickve2gr_w(acc_m, 0);
*s = hsum_float_8(acc) + fi.f ;
-
#else
const uint8_t * scales = (const uint8_t*)&utmp[0];
@@ -7997,6 +8012,9 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r
*s = vec_extract(vsumf0, 0);
#elif defined __loongarch_asx
+ GGML_UNUSED(kmask1);
+ GGML_UNUSED(kmask2);
+ GGML_UNUSED(kmask3);
const __m256i m4 = __lasx_xvreplgr2vr_b(0xF);
const __m128i mzero = __lsx_vldi(0);
@@ -8015,6 +8033,11 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r
const float dmin = -y[i].d * GGML_FP16_TO_FP32(x[i].dmin);
memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
const __m256i mins_and_scales = lasx_extu8_16(lsx_set_w(utmp[3], utmp[2], utmp[1], utmp[0]));
@@ -8033,6 +8056,7 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r
__m256i sumi = __lasx_xvldi(0);
int bit = 0;
+ __m256i xvbit;
for (int j = 0; j < QK_K/64; ++j) {
@@ -8041,13 +8065,15 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r
const __m256i q5bits = __lasx_xvld((const __m256i*)q5, 0); q5 += 32;
+ xvbit = __lasx_xvreplgr2vr_h(bit++);
const __m256i q5l_0 = __lasx_xvand_v(q5bits, m4);
- const __m256i q5h_0 = __lasx_xvslli_h(__lasx_xvsrli_h(__lasx_xvand_v(hbits, hmask), bit++), 4);
+ const __m256i q5h_0 = __lasx_xvslli_h(__lasx_xvsrl_h(__lasx_xvand_v(hbits, hmask), xvbit), 4);
const __m256i q5_0 = __lasx_xvadd_b(q5l_0, q5h_0);
hmask = __lasx_xvslli_h(hmask, 1);
+ xvbit = __lasx_xvreplgr2vr_h(bit++);
const __m256i q5l_1 = __lasx_xvand_v(__lasx_xvsrli_h(q5bits, 4), m4);
- const __m256i q5h_1 = __lasx_xvslli_h(__lasx_xvsrli_h(__lasx_xvand_v(hbits, hmask), bit++), 4);
+ const __m256i q5h_1 = __lasx_xvslli_h(__lasx_xvsrl_h(__lasx_xvand_v(hbits, hmask), xvbit), 4);
const __m256i q5_1 = __lasx_xvadd_b(q5l_1, q5h_1);
hmask = __lasx_xvslli_h(hmask, 1);
@@ -8061,10 +8087,12 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r
p16_1 = lasx_madd_h(scale_1, p16_1);
sumi = __lasx_xvadd_w(sumi, __lasx_xvadd_w(p16_0, p16_1));
+
}
__m256 vd = __lasx_xvreplfr2vr_s(d);
acc = __lasx_xvfmadd_s(vd, __lasx_xvffint_s_w(sumi), acc);
+
}
*s = hsum_float_8(acc) + summs;
diff --git a/ggml-sycl.cpp b/ggml-sycl.cpp
index 022a52aeb..5cd97e4ff 100644
--- a/ggml-sycl.cpp
+++ b/ggml-sycl.cpp
@@ -3022,20 +3022,19 @@ static int g_work_group_size = 0;
// typedef sycl::half ggml_fp16_t;
#define __SYCL_ARCH__ DPCT_COMPATIBILITY_TEMP
-#define VER_4VEC 610 //todo for hardward optimize.
+#define VER_4VEC 130 //todo for hardward optimize.
#define VER_GEN9 700 //todo for hardward optimize.
#define VER_GEN12 1000000 //todo for hardward optimize.
#define VER_GEN13 (VER_GEN12 + 1030) //todo for hardward optimize.
#define GGML_SYCL_MAX_NODES 8192 //TODO: adapt to hardwares
-
-//define for XMX in Intel GPU
-//TODO: currently, it's not used for XMX really.
-#define SYCL_USE_XMX
+#if !defined(GGML_SYCL_FORCE_MMQ)
+ #define SYCL_USE_XMX
+#endif
// max batch size to use MMQ kernels when tensor cores are available
-#define XMX_MAX_BATCH_SIZE 32
+#define MMQ_MAX_BATCH_SIZE 32
#if defined(_MSC_VER)
@@ -13567,7 +13566,7 @@ inline void ggml_sycl_op_concat(const ggml_tensor *src0,
#pragma message("TODO: generalize concat kernel for dim != 2")
#pragma message(" https://github.com/ggerganov/llama.cpp/pull/7563")
int dim = dst->op_params[0];
- GGML_ASSERT(dim != 2);
+ GGML_ASSERT(dim == 2);
GGML_ASSERT(src0->type == GGML_TYPE_F32);
GGML_ASSERT(src1->type == GGML_TYPE_F32);
@@ -15184,7 +15183,7 @@ static void ggml_sycl_mul_mat_batched_sycl(const ggml_tensor *src0,
const int64_t r2 = ne12/ne02;
const int64_t r3 = ne13/ne03;
- if (r2 == 1 && r3 == 1 && src0->nb[2]*src0->ne[2] == src0->nb[3] && src1->nb[2]*src1->ne[2] == src1->nb[3]) {
+ if (r2 == 1 && r3 == 1 && ggml_is_contiguous_2(src0) && ggml_is_contiguous_2(src1)) {
// there is no broadcast and src0, src1 are contiguous across dims 2, 3
SYCL_CHECK(CHECK_TRY_ERROR(dpct::gemm_batch(
*g_sycl_handles[g_main_device], oneapi::mkl::transpose::trans,
@@ -15249,6 +15248,29 @@ catch (sycl::exception const &exc) {
std::exit(1);
}
+inline bool ggml_sycl_supports_mmq(enum ggml_type type) {
+ // TODO: accuracy issues in MMQ
+ return false;
+}
+
+bool ggml_sycl_supports_dmmv(enum ggml_type type) {
+ switch (type) {
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ case GGML_TYPE_Q5_0:
+ case GGML_TYPE_Q5_1:
+ case GGML_TYPE_Q8_0:
+ case GGML_TYPE_Q2_K:
+ case GGML_TYPE_Q3_K:
+ case GGML_TYPE_Q4_K:
+ case GGML_TYPE_Q5_K:
+ case GGML_TYPE_Q6_K:
+ case GGML_TYPE_F16:
+ return true;
+ default:
+ return false;
+ }
+}
static void ggml_sycl_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
const bool all_on_device =
@@ -15265,76 +15287,42 @@ static void ggml_sycl_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1
}
}
+ // check data types and tensor shapes for custom matrix multiplication kernels:
+ bool use_dequantize_mul_mat_vec = ggml_sycl_supports_dmmv(src0->type)
+ && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32
+ && src0->ne[0] % GGML_SYCL_DMMV_X == 0 && src1->ne[1] == 1;
+
+ bool use_mul_mat_vec_q = ggml_is_quantized(src0->type)
+ && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32
+ && src1->ne[1] <= MMVQ_MAX_BATCH_SIZE;
+
+ bool use_mul_mat_q = ggml_sycl_supports_mmq(src0->type)
+ && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32;
+
+ // mmvq and mmq need the __dp4a instruction which is available for gen12+
+ // Workaround in https://github.com/ggerganov/llama.cpp/commit/95f84d5ce8b449a9b16009434aca800df504a02e
+ use_mul_mat_q = use_mul_mat_q && (src0->type != GGML_TYPE_IQ2_XXS);
#ifdef SYCL_USE_XMX
- const bool use_xmx = true;
-#else
- const bool use_xmx = false;
-#endif
+ use_mul_mat_q = use_mul_mat_q && (src1->ne[1] <= MMQ_MAX_BATCH_SIZE);
+#endif // SYCL_USE_XMX
- // debug helpers
- //printf("src0: %8d %8d %8d %8d\n", src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3]);
- //printf(" %8d %8d %8d %8d\n", src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3]);
- //printf("src1: %8d %8d %8d %8d\n", src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3]);
- //printf(" %8d %8d %8d %8d\n", src1->nb[0], src1->nb[1], src1->nb[2], src1->nb[3]);
- //printf("src0 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src0), ggml_is_transposed(src0), ggml_type_name(src0->type), src0->name);
- //printf("src1 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src1), ggml_is_transposed(src1), ggml_type_name(src1->type), src1->name);
-
- if (!split && all_on_device && !use_xmx && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) {
+ if (!split && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) {
// KQ single-batch
- // GGML_SYCL_DEBUG("ggml_sycl_mul_mat_vec_p021\n");
ggml_sycl_mul_mat_vec_p021(src0, src1, dst);
- } else if (!split && all_on_device && !use_xmx && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) {
+ } else if (!split && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) {
// KQV single-batch
- // GGML_SYCL_DEBUG("ggml_sycl_mul_mat_vec_nc\n");
ggml_sycl_mul_mat_vec_nc(src0, src1, dst);
- } else if (!split && all_on_device && use_xmx && src0->type == GGML_TYPE_F16 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) {
+ } else if (!split && src0->type == GGML_TYPE_F16 && (src1->type == GGML_TYPE_F16) && !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2]*src1->ne[3] > 1) {
// KQ + KQV multi-batch
- // GGML_SYCL_DEBUG("ggml_sycl_mul_mat_batched_sycl\n");
ggml_sycl_mul_mat_batched_sycl(src0, src1, dst);
- } else if (src0->type == GGML_TYPE_F32) {
- // GGML_SYCL_DEBUG("ggml_sycl_op_mul_mat\n");
- ggml_sycl_op_mul_mat(src0, src1, dst, ggml_sycl_op_mul_mat_sycl, false);
- } else if (ggml_is_quantized(src0->type) || src0->type == GGML_TYPE_F16) {
- // GGML_SYCL_DEBUG("ggml_is_quantized or GGML_TYPE_F16\n");
- if (src1->ne[1] == 1 && src0->ne[0] % GGML_SYCL_DMMV_X == 0) {
-#ifdef GGML_SYCL_FORCE_DMMV
- const bool use_mul_mat_vec_q = false;
-#else
- bool use_mul_mat_vec_q = min_compute_capability >= VER_4VEC && ggml_is_quantized(src0->type);
- use_mul_mat_vec_q = use_mul_mat_vec_q ||
- (src0->type == GGML_TYPE_IQ2_XXS) || (src0->type == GGML_TYPE_IQ2_XS) || (src0->type == GGML_TYPE_IQ2_S) ||
- (src0->type == GGML_TYPE_IQ3_XXS) || (src0->type == GGML_TYPE_IQ3_S) ||
- (src0->type == GGML_TYPE_IQ4_NL) || (src0->type == GGML_TYPE_IQ4_XS) ||
- (src0->type == GGML_TYPE_IQ1_S) || (src0->type == GGML_TYPE_IQ1_M);
-
-
-#endif // GGML_SYCL_FORCE_DMMV
-
- if (use_mul_mat_vec_q) {
- // GGML_SYCL_DEBUG("ggml_sycl_mul_mat ggml_sycl_op_mul_mat_vec_q path\n");
- ggml_sycl_op_mul_mat(src0, src1, dst, ggml_sycl_op_mul_mat_vec_q, true);
- } else {
- // GGML_SYCL_DEBUG("ggml_sycl_mul_mat ggml_sycl_op_dequantize_mul_mat_vec path\n");
- ggml_sycl_op_mul_mat(src0, src1, dst, ggml_sycl_op_dequantize_mul_mat_vec, false);
- }
- } else {
- bool use_mul_mat_q = min_compute_capability >= VER_4VEC && ggml_is_quantized(src0->type);
- use_mul_mat_q = use_mul_mat_q && (src0->type != GGML_TYPE_IQ2_XXS);
-
- if (use_xmx && min_compute_capability >= VER_GEN9 && src1->ne[1] > XMX_MAX_BATCH_SIZE) {
- use_mul_mat_q = false;
- }
-
- if (use_mul_mat_q) {
- // GGML_SYCL_DEBUG("ggml_sycl_mul_mat ggml_sycl_op_mul_mat_q path\n");
- ggml_sycl_op_mul_mat(src0, src1, dst, ggml_sycl_op_mul_mat_q, true);
- } else {
- // GGML_SYCL_DEBUG("ggml_sycl_mul_mat ggml_sycl_op_mul_mat_sycl path\n");
- ggml_sycl_op_mul_mat(src0, src1, dst, ggml_sycl_op_mul_mat_sycl, false);
- }
- }
+ } else if (use_dequantize_mul_mat_vec) {
+ ggml_sycl_op_mul_mat(src0, src1, dst, ggml_sycl_op_dequantize_mul_mat_vec, false);
+ } else if (use_mul_mat_vec_q) {
+ ggml_sycl_op_mul_mat(src0, src1, dst, ggml_sycl_op_mul_mat_vec_q, true);
+ } else if (use_mul_mat_q) {
+ ggml_sycl_op_mul_mat(src0, src1, dst, ggml_sycl_op_mul_mat_q, true);
} else {
- GGML_ASSERT(false);
+ ggml_sycl_op_mul_mat(src0, src1, dst, ggml_sycl_op_mul_mat_sycl, false);
}
}
diff --git a/ggml.c b/ggml.c
index 7a3a5fa94..426501015 100644
--- a/ggml.c
+++ b/ggml.c
@@ -60,6 +60,9 @@
typedef volatile LONG atomic_int;
typedef atomic_int atomic_bool;
+typedef atomic_int atomic_flag;
+
+#define ATOMIC_FLAG_INIT 0
static void atomic_store(atomic_int * ptr, LONG val) {
InterlockedExchange(ptr, val);
@@ -73,6 +76,12 @@ static LONG atomic_fetch_add(atomic_int * ptr, LONG inc) {
static LONG atomic_fetch_sub(atomic_int * ptr, LONG dec) {
return atomic_fetch_add(ptr, -(dec));
}
+static atomic_bool atomic_flag_test_and_set(atomic_flag * ptr) {
+ return InterlockedExchange(ptr, 1);
+}
+static void atomic_flag_clear(atomic_flag * ptr) {
+ InterlockedExchange(ptr, 0);
+}
typedef HANDLE pthread_t;
@@ -1567,11 +1576,11 @@ do { \
// F16 arithmetic is not supported by AVX, so we use F32 instead
-#define GGML_F32Cx8 __m256
+#define GGML_F32Cx8 __m256
#define GGML_F32Cx8_ZERO (__m256)__lasx_xvldi(0)
#define GGML_F32Cx8_SET1(x) (__m256)__lasx_xvreplgr2vr_w((x))
-static inline __m256 __lasx_f32cx8_load(ggml_fp16_t *x) {
+static inline __m256 __lasx_f32cx8_load(const ggml_fp16_t * x) {
float tmp[8];
for (int i = 0; i < 8; i++) {
@@ -1580,13 +1589,14 @@ static inline __m256 __lasx_f32cx8_load(ggml_fp16_t *x) {
return (__m256)__lasx_xvld(tmp, 0);
}
-static inline void __lasx_f32cx8_store(ggml_fp16_t *x, __m256 y) {
+static inline void __lasx_f32cx8_store(ggml_fp16_t * x, __m256 y) {
float arr[8];
__lasx_xvst(y, arr, 0);
- for (int i = 0; i < 8; i++)
+ for (int i = 0; i < 8; i++) {
x[i] = GGML_FP32_TO_FP16(arr[i]);
+ }
}
#define GGML_F32Cx8_LOAD(x) __lasx_f32cx8_load(x)
#define GGML_F32Cx8_STORE(x, y) __lasx_f32cx8_store(x, y)
@@ -1662,7 +1672,7 @@ static inline void __lasx_f32cx8_store(ggml_fp16_t *x, __m256 y) {
#define GGML_F16_STEP 32
#define GGML_F16_EPR 4
-static inline __m128 __lsx_f16x4_load(ggml_fp16_t *x) {
+static inline __m128 __lsx_f16x4_load(const ggml_fp16_t * x) {
float tmp[4];
tmp[0] = GGML_FP16_TO_FP32(x[0]);
@@ -1673,7 +1683,7 @@ static inline __m128 __lsx_f16x4_load(ggml_fp16_t *x) {
return __lsx_vld(tmp, 0);
}
-static inline void __lsx_f16x4_store(ggml_fp16_t *x, __m128 y) {
+static inline void __lsx_f16x4_store(ggml_fp16_t * x, __m128 y) {
float arr[4];
__lsx_vst(y, arr, 0);
@@ -2306,32 +2316,27 @@ inline static __m512 ggml_v_expf(__m512 x) {
const __m512 r = _mm512_set1_ps(0x1.8p23f);
const __m512 z = _mm512_fmadd_ps(x, _mm512_set1_ps(0x1.715476p+0f), r);
const __m512 n = _mm512_sub_ps(z, r);
- const __m512 b = _mm512_fnmadd_ps(n, _mm512_set1_ps(0x1.7f7d1cp-20f),
- _mm512_fnmadd_ps(n, _mm512_set1_ps(0x1.62e4p-1f), x));
- const __m512i e = _mm512_slli_epi32(_mm512_castps_si512(z), 23);
- const __m512 k = _mm512_castsi512_ps(_mm512_add_epi32(e, _mm512_castps_si512(_mm512_set1_ps(1))));
- const __mmask16 c = _mm512_cmp_ps_mask(_mm512_abs_ps(n), _mm512_set1_ps(126), _CMP_GT_OQ);
- const __m512 u = _mm512_mul_ps(b, b);
- const __m512 j = _mm512_fmadd_ps(_mm512_fmadd_ps(_mm512_fmadd_ps(_mm512_set1_ps(0x1.0e4020p-7f), b,
- _mm512_set1_ps(0x1.573e2ep-5f)), u,
- _mm512_fmadd_ps(_mm512_set1_ps(0x1.555e66p-3f), b,
- _mm512_set1_ps(0x1.fffdb6p-2f))),
- u, _mm512_mul_ps(_mm512_set1_ps(0x1.ffffecp-1f), b));
- if (_mm512_kortestz(c, c))
- return _mm512_fmadd_ps(j, k, k);
- const __m512i g = _mm512_and_si512(
- _mm512_movm_epi32(_mm512_cmp_ps_mask(n, _mm512_setzero_ps(), _CMP_LE_OQ)),
- _mm512_set1_epi32(0x82000000u));
- const __m512 s1 =
- _mm512_castsi512_ps(_mm512_add_epi32(g, _mm512_set1_epi32(0x7f000000u)));
- const __m512 s2 = _mm512_castsi512_ps(_mm512_sub_epi32(e, g));
+ const __m512 b =
+ _mm512_fnmadd_ps(n, _mm512_set1_ps(0x1.7f7d1cp-20f),
+ _mm512_fnmadd_ps(n, _mm512_set1_ps(0x1.62e4p-1f), x));
const __mmask16 d =
_mm512_cmp_ps_mask(_mm512_abs_ps(n), _mm512_set1_ps(192), _CMP_GT_OQ);
- return _mm512_mask_blend_ps(
- d, _mm512_mask_blend_ps(
- c, _mm512_fmadd_ps(k, j, k),
- _mm512_mul_ps(_mm512_fmadd_ps(s2, j, s2), s1)),
- _mm512_mul_ps(s1, s1));
+ const __m512 u = _mm512_mul_ps(b, b);
+ const __m512 j = _mm512_fmadd_ps(
+ _mm512_fmadd_ps(_mm512_fmadd_ps(_mm512_set1_ps(0x1.0e4020p-7f), b,
+ _mm512_set1_ps(0x1.573e2ep-5f)),
+ u,
+ _mm512_fmadd_ps(_mm512_set1_ps(0x1.555e66p-3f), b,
+ _mm512_set1_ps(0x1.fffdb6p-2f))),
+ u,
+ _mm512_fmadd_ps(_mm512_set1_ps(0x1.ffffecp-1f), b, _mm512_set1_ps(1.0F)));
+ const __m512 res = _mm512_scalef_ps(j, n);
+ if (_mm512_kortestz(d, d))
+ return res;
+ const __m512 zero = _mm512_setzero_ps();
+ const __m512 alt = _mm512_mask_blend_ps(
+ _mm512_cmp_ps_mask(n, zero, _CMP_LE_OQ), _mm512_set1_ps(INFINITY), zero);
+ return _mm512_mask_blend_ps(d, res, alt);
}
// computes silu x/(1+exp(-x)) in single precision vector
@@ -2883,24 +2888,20 @@ struct ggml_state {
// global state
static struct ggml_state g_state;
-static atomic_int g_state_barrier = 0;
+static atomic_flag g_state_critical = ATOMIC_FLAG_INIT;
// barrier via spin lock
inline static void ggml_critical_section_start(void) {
- int processing = atomic_fetch_add(&g_state_barrier, 1);
-
- while (processing > 0) {
- // wait for other threads to finish
- atomic_fetch_sub(&g_state_barrier, 1);
- sched_yield(); // TODO: reconsider this
- processing = atomic_fetch_add(&g_state_barrier, 1);
+ while (atomic_flag_test_and_set(&g_state_critical)) {
+ // spin
+ sched_yield();
}
}
// TODO: make this somehow automatically executed
// some sort of "sentry" mechanism
inline static void ggml_critical_section_end(void) {
- atomic_fetch_sub(&g_state_barrier, 1);
+ atomic_flag_clear(&g_state_critical);
}
#if defined(__gnu_linux__)
@@ -3216,7 +3217,11 @@ GGML_CALL bool ggml_is_contiguous(const struct ggml_tensor * tensor) {
tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
}
-static inline bool ggml_is_contiguous_except_dim_1(const struct ggml_tensor * tensor) {
+GGML_CALL bool ggml_is_contiguous_0(const struct ggml_tensor * tensor) {
+ return ggml_is_contiguous(tensor);
+}
+
+GGML_CALL bool ggml_is_contiguous_1(const struct ggml_tensor * tensor) {
static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
return
@@ -3225,6 +3230,14 @@ static inline bool ggml_is_contiguous_except_dim_1(const struct ggml_tensor * te
tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
}
+GGML_CALL bool ggml_is_contiguous_2(const struct ggml_tensor * tensor) {
+ static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
+
+ return
+ tensor->nb[0] == ggml_type_size(tensor->type) &&
+ tensor->nb[3] == tensor->nb[2]*tensor->ne[2];
+}
+
GGML_CALL bool ggml_is_permuted(const struct ggml_tensor * tensor) {
static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
@@ -6392,6 +6405,16 @@ struct ggml_tensor * ggml_rope_custom_inplace(
);
}
+struct ggml_tensor * ggml_rope_xpos_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ int n_dims,
+ float base,
+ bool down) {
+ return ggml_rope_impl(ctx, a, b, NULL, n_dims, 0, 0, 0, 10000.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, base, down, true);
+}
+
// ggml_rope_back
struct ggml_tensor * ggml_rope_back(
@@ -11008,7 +11031,7 @@ static void ggml_compute_forward_concat_f32(
static void ggml_compute_forward_concat(
const struct ggml_compute_params * params,
- struct ggml_tensor* dst) {
+ struct ggml_tensor * dst) {
const struct ggml_tensor * src0 = dst->src[0];
@@ -11401,8 +11424,8 @@ static void ggml_compute_forward_gelu_f32(
const struct ggml_tensor * src0 = dst->src[0];
- GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
- GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
+ GGML_ASSERT(ggml_is_contiguous_1(src0));
+ GGML_ASSERT(ggml_is_contiguous_1(dst));
GGML_ASSERT(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
@@ -11464,8 +11487,8 @@ static void ggml_compute_forward_gelu_quick_f32(
const struct ggml_tensor * src0 = dst->src[0];
- GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
- GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
+ GGML_ASSERT(ggml_is_contiguous_1(src0));
+ GGML_ASSERT(ggml_is_contiguous_1(dst));
GGML_ASSERT(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
@@ -11527,8 +11550,8 @@ static void ggml_compute_forward_silu_f32(
const struct ggml_tensor * src0 = dst->src[0];
- GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
- GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
+ GGML_ASSERT(ggml_is_contiguous_1(src0));
+ GGML_ASSERT(ggml_is_contiguous_1(dst));
GGML_ASSERT(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
@@ -11639,9 +11662,9 @@ static void ggml_compute_forward_silu_back_f32(
const struct ggml_tensor * src0 = dst->src[0];
const struct ggml_tensor * grad = dst->src[1];
- GGML_ASSERT(ggml_is_contiguous_except_dim_1(grad));
- GGML_ASSERT(ggml_is_contiguous_except_dim_1(src0));
- GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
+ GGML_ASSERT(ggml_is_contiguous_1(grad));
+ GGML_ASSERT(ggml_is_contiguous_1(src0));
+ GGML_ASSERT(ggml_is_contiguous_1(dst));
GGML_ASSERT(ggml_are_same_shape(src0, dst));
GGML_ASSERT(ggml_are_same_shape(src0, grad));
@@ -14339,7 +14362,7 @@ static void ggml_compute_forward_rope_f32(
int ir = 0;
const float theta_scale = powf(freq_base, -2.0f/n_dims);
- const float inv_ndims = -1.f/n_dims;
+
float corr_dims[2];
ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
@@ -14388,7 +14411,7 @@ static void ggml_compute_forward_rope_f32(
const float cos_block_theta = cosf(block_theta);
const float sin_block_theta = sinf(block_theta) * sin_sign;
- theta_base *= theta_scale;
+ theta_base *= theta_scale;
block_theta *= theta_scale;
const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
@@ -14423,29 +14446,22 @@ static void ggml_compute_forward_rope_f32(
dst_data[1] = x0*sin_theta*zeta + x1*cos_theta*zeta;
}
} else {
- // TODO: this might be wrong for ne0 != n_dims - need double check
- // it seems we have to rope just the first n_dims elements and do nothing with the rest
- // ref: https://github.com/ml-explore/mlx/blob/dc2edc762c797e3b8de50b1dad4dc0a131691033/benchmarks/python/llama_jax_bench.py#L11-L26
- theta_base *= freq_scale;
+ // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py
for (int64_t ic = 0; ic < ne0; ic += 2) {
if (ic < n_dims) {
- const int64_t ib = 0;
+ const int64_t i0 = ic/2;
- // simplified from `(ib * n_dims + ic) * inv_ndims`
- float cur_rot = inv_ndims * ic - ib;
- float freq_factor = freq_factors ? freq_factors[ic/2] : 1.0f;
+ const float freq_factor = freq_factors ? freq_factors[i0] : 1.0f;
float cos_theta, sin_theta;
rope_yarn(
- theta_base/freq_factor, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor,
+ theta_base/freq_factor, freq_scale, corr_dims, ic, ext_factor, attn_factor,
&cos_theta, &sin_theta
);
- sin_theta *= sin_sign;
+ sin_theta *= sin_sign;
theta_base *= theta_scale;
- const int64_t i0 = ib*n_dims + ic/2;
-
const float * const src = (float *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
float * dst_data = (float *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
@@ -14524,7 +14540,7 @@ static void ggml_compute_forward_rope_f16(
int ir = 0;
const float theta_scale = powf(freq_base, -2.0f/n_dims);
- const float inv_ndims = -1.f/n_dims;
+
float corr_dims[2];
ggml_rope_yarn_corr_dims(n_dims, n_orig_ctx, freq_base, beta_fast, beta_slow, corr_dims);
@@ -14573,7 +14589,7 @@ static void ggml_compute_forward_rope_f16(
const float cos_block_theta = cosf(block_theta);
const float sin_block_theta = sinf(block_theta) * sin_sign;
- theta_base *= theta_scale;
+ theta_base *= theta_scale;
block_theta *= theta_scale;
const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
@@ -14604,29 +14620,22 @@ static void ggml_compute_forward_rope_f16(
dst_data[1] = GGML_FP32_TO_FP16(x0*sin_theta + x1*cos_theta);
}
} else {
- // TODO: this might be wrong for ne0 != n_dims - need double check
- // it seems we have to rope just the first n_dims elements and do nothing with the rest
- // ref: https://github.com/ml-explore/mlx/blob/dc2edc762c797e3b8de50b1dad4dc0a131691033/benchmarks/python/llama_jax_bench.py#L11-L26
- theta_base *= freq_scale;
+ // ref: https://github.com/jquesnelle/yarn/blob/master/scaled_rope/LlamaYaRNScaledRotaryEmbedding.py
for (int64_t ic = 0; ic < ne0; ic += 2) {
if (ic < n_dims) {
- const int64_t ib = 0;
+ const int64_t i0 = ic/2;
- // simplified from `(ib * n_dims + ic) * inv_ndims`
- float cur_rot = inv_ndims * ic - ib;
- float freq_factor = freq_factors ? freq_factors[ic/2] : 1.0f;
+ const float freq_factor = freq_factors ? freq_factors[i0] : 1.0f;
float cos_theta, sin_theta;
rope_yarn(
- theta_base/freq_factor, freq_scale, corr_dims, cur_rot, ext_factor, attn_factor,
+ theta_base/freq_factor, freq_scale, corr_dims, ic, ext_factor, attn_factor,
&cos_theta, &sin_theta
);
- sin_theta *= sin_sign;
+ sin_theta *= sin_sign;
theta_base *= theta_scale;
- const int64_t i0 = ib*n_dims + ic/2;
-
const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
ggml_fp16_t * dst_data = (ggml_fp16_t *)((char *) dst->data + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
@@ -22797,6 +22806,14 @@ int ggml_cpu_has_sycl(void) {
#endif
}
+int ggml_cpu_has_rpc(void) {
+#if defined(GGML_USE_RPC)
+ return 1;
+#else
+ return 0;
+#endif
+}
+
int ggml_cpu_has_gpublas(void) {
return ggml_cpu_has_cuda() || ggml_cpu_has_clblast() || ggml_cpu_has_vulkan() || ggml_cpu_has_kompute() ||
ggml_cpu_has_sycl();
diff --git a/ggml.h b/ggml.h
index bdf05a311..9df601e2c 100644
--- a/ggml.h
+++ b/ggml.h
@@ -756,7 +756,6 @@ extern "C" {
GGML_API enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype);
GGML_API GGML_CALL bool ggml_is_transposed(const struct ggml_tensor * tensor);
- GGML_API GGML_CALL bool ggml_is_contiguous(const struct ggml_tensor * tensor);
GGML_API GGML_CALL bool ggml_is_permuted (const struct ggml_tensor * tensor);
GGML_API GGML_CALL bool ggml_is_empty (const struct ggml_tensor * tensor);
GGML_API bool ggml_is_scalar (const struct ggml_tensor * tensor);
@@ -765,6 +764,11 @@ extern "C" {
GGML_API bool ggml_is_3d (const struct ggml_tensor * tensor);
GGML_API int ggml_n_dims (const struct ggml_tensor * tensor); // returns 1 for scalars
+ GGML_API GGML_CALL bool ggml_is_contiguous (const struct ggml_tensor * tensor);
+ GGML_API GGML_CALL bool ggml_is_contiguous_0(const struct ggml_tensor * tensor); // same as ggml_is_contiguous()
+ GGML_API GGML_CALL bool ggml_is_contiguous_1(const struct ggml_tensor * tensor); // contiguous for dims >= 1
+ GGML_API GGML_CALL bool ggml_is_contiguous_2(const struct ggml_tensor * tensor); // contiguous for dims >= 2
+
GGML_API bool ggml_are_same_shape (const struct ggml_tensor * t0, const struct ggml_tensor * t1);
GGML_API bool ggml_are_same_stride(const struct ggml_tensor * t0, const struct ggml_tensor * t1);
@@ -1548,6 +1552,14 @@ extern "C" {
float beta_slow),
"use ggml_rope_ext_inplace instead");
+ struct ggml_tensor * ggml_rope_xpos_inplace(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ int n_dims,
+ float base,
+ bool down);
+
// compute correction dims for YaRN RoPE scaling
GGML_CALL void ggml_rope_yarn_corr_dims(
int n_dims, int n_orig_ctx, float freq_base, float beta_fast, float beta_slow, float dims[2]);
@@ -2418,6 +2430,7 @@ extern "C" {
GGML_API int ggml_cpu_has_sse3 (void);
GGML_API int ggml_cpu_has_ssse3 (void);
GGML_API int ggml_cpu_has_sycl (void);
+ GGML_API int ggml_cpu_has_rpc (void);
GGML_API int ggml_cpu_has_vsx (void);
GGML_API int ggml_cpu_has_matmul_int8(void);
diff --git a/ggml_vk_generate_shaders.py b/ggml_vk_generate_shaders.py
index a8f7373df..7c85ca7ba 100644
--- a/ggml_vk_generate_shaders.py
+++ b/ggml_vk_generate_shaders.py
@@ -2670,14 +2670,12 @@ void main() {
const uint i = row*p.ncols + ib*p.ndims + ic/2;
const uint i2 = row/p.p_delta_rows;
- const float cur_rot = p.inv_ndims * ic - ib;
-
const int pos = data_b[i2];
const float freq_factor = p.has_freq_facs != 0 ? data_freq_factors[ic/2] : 1.0f;
const float theta_base = pos*p.freq_scale*pow(p.theta_scale, col/2.0f) / freq_factor;
float cos_theta, sin_theta;
- rope_yarn(theta_base, uint(cur_rot), cos_theta, sin_theta);
+ rope_yarn(theta_base, ic, cos_theta, sin_theta);
const float x0 = float(data_a[i + 0]);
const float x1 = float(data_a[i + p.ndims/2]);
diff --git a/gguf-py/gguf/vocab.py b/gguf-py/gguf/vocab.py
index 3ba99be4f..dc5749913 100644
--- a/gguf-py/gguf/vocab.py
+++ b/gguf-py/gguf/vocab.py
@@ -1,10 +1,15 @@
from __future__ import annotations
+import re
import logging
import json
import os
from pathlib import Path
-from typing import Any, Callable, Sequence, Mapping, Iterable
+from typing import Any, Callable, Sequence, Mapping, Iterable, Protocol, ClassVar, runtime_checkable
+
+from sentencepiece import SentencePieceProcessor
+
+import gguf
from .gguf_writer import GGUFWriter
@@ -163,3 +168,298 @@ class SpecialVocab:
for typ in self.special_token_types:
self._set_special_token(typ, config.get(f'{typ}_token_id'))
return True
+
+
+@runtime_checkable
+class BaseVocab(Protocol):
+ tokenizer_model: ClassVar[str]
+ name: ClassVar[str]
+
+
+@runtime_checkable
+class Vocab(BaseVocab, Protocol):
+ vocab_size: int
+ added_tokens_dict: dict[str, int]
+ added_tokens_list: list[str]
+ fname_tokenizer: Path
+
+ def __init__(self, base_path: Path): ...
+ def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]: ...
+
+
+class NoVocab(BaseVocab):
+ tokenizer_model = "no_vocab"
+ name = "no_vocab"
+
+ def __repr__(self) -> str:
+ return ""
+
+
+class BpeVocab(Vocab):
+ tokenizer_model = "gpt2"
+ name = "bpe"
+
+ def __init__(self, base_path: Path):
+ added_tokens: dict[str, int] = {}
+
+ if (fname_tokenizer := base_path / 'vocab.json').exists():
+ # "slow" tokenizer
+ with open(fname_tokenizer, encoding="utf-8") as f:
+ self.vocab = json.load(f)
+
+ try:
+ # FIXME: Verify that added tokens here _cannot_ overlap with the main vocab.
+ with open(base_path / 'added_tokens.json', encoding="utf-8") as f:
+ added_tokens = json.load(f)
+ except FileNotFoundError:
+ pass
+ else:
+ # "fast" tokenizer
+ fname_tokenizer = base_path / 'tokenizer.json'
+
+ # if this fails, FileNotFoundError propagates to caller
+ with open(fname_tokenizer, encoding="utf-8") as f:
+ tokenizer_json = json.load(f)
+
+ tokenizer_model: dict[str, Any] = tokenizer_json['model']
+ if (
+ tokenizer_model['type'] != 'BPE' or tokenizer_model.get('byte_fallback', False)
+ or tokenizer_json['decoder']['type'] != 'ByteLevel'
+ ):
+ raise FileNotFoundError('Cannot find GPT-2 BPE tokenizer')
+
+ self.vocab = tokenizer_model["vocab"]
+
+ if (added := tokenizer_json.get('added_tokens')) is not None:
+ # Added tokens here can be duplicates of the main vocabulary.
+ added_tokens = {item['content']: item['id']
+ for item in added
+ if item['content'] not in self.vocab}
+
+ vocab_size = len(self.vocab)
+ expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
+ actual_ids = sorted(added_tokens.values())
+ if expected_ids != actual_ids:
+ expected_end_id = vocab_size + len(actual_ids) - 1
+ raise ValueError(f"Expected the {len(actual_ids)} added token ID(s) to be sequential in the range "
+ f"{vocab_size} - {expected_end_id}; got {actual_ids}")
+
+ items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1])
+ self.added_tokens_dict = added_tokens
+ self.added_tokens_list = [text for (text, idx) in items]
+ self.vocab_size_base = vocab_size
+ self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
+ self.fname_tokenizer = fname_tokenizer
+
+ def bpe_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
+ reverse_vocab = {id: encoded_tok for encoded_tok, id in self.vocab.items()}
+
+ for i, _ in enumerate(self.vocab):
+ yield reverse_vocab[i], 0.0, gguf.TokenType.NORMAL
+
+ def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
+ for text in self.added_tokens_list:
+ score = -1000.0
+ yield text.encode("utf-8"), score, gguf.TokenType.CONTROL
+
+ def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
+ yield from self.bpe_tokens()
+ yield from self.added_tokens()
+
+ def __repr__(self) -> str:
+ return f""
+
+
+class SentencePieceVocab(Vocab):
+ tokenizer_model = "llama"
+ name = "spm"
+
+ def __init__(self, base_path: Path):
+ added_tokens: dict[str, int] = {}
+ if (fname_tokenizer := base_path / 'tokenizer.model').exists():
+ # normal location
+ try:
+ with open(base_path / 'added_tokens.json', encoding="utf-8") as f:
+ added_tokens = json.load(f)
+ except FileNotFoundError:
+ pass
+ elif not (fname_tokenizer := base_path.parent / 'tokenizer.model').exists():
+ # not found in alternate location either
+ raise FileNotFoundError('Cannot find tokenizer.model')
+
+ self.sentencepiece_tokenizer = SentencePieceProcessor()
+ self.sentencepiece_tokenizer.LoadFromFile(str(fname_tokenizer))
+ vocab_size = self.sentencepiece_tokenizer.vocab_size()
+
+ new_tokens = {id: piece for piece, id in added_tokens.items() if id >= vocab_size}
+ expected_new_ids = list(range(vocab_size, vocab_size + len(new_tokens)))
+ actual_new_ids = sorted(new_tokens.keys())
+
+ if expected_new_ids != actual_new_ids:
+ raise ValueError(f"Expected new token IDs {expected_new_ids} to be sequential; got {actual_new_ids}")
+
+ # Token pieces that were added to the base vocabulary.
+ self.added_tokens_dict = added_tokens
+ self.added_tokens_list = [new_tokens[id] for id in actual_new_ids]
+ self.vocab_size_base = vocab_size
+ self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
+ self.fname_tokenizer = fname_tokenizer
+
+ def sentencepiece_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
+ tokenizer = self.sentencepiece_tokenizer
+ for i in range(tokenizer.vocab_size()):
+ piece = tokenizer.IdToPiece(i)
+ text = piece.encode("utf-8")
+ score: float = tokenizer.GetScore(i)
+
+ toktype = gguf.TokenType.NORMAL
+ if tokenizer.IsUnknown(i):
+ toktype = gguf.TokenType.UNKNOWN
+ if tokenizer.IsControl(i):
+ toktype = gguf.TokenType.CONTROL
+
+ # NOTE: I think added_tokens are user defined.
+ # ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
+ # if tokenizer.is_user_defined(i): toktype = gguf.TokenType.USER_DEFINED
+
+ if tokenizer.IsUnused(i):
+ toktype = gguf.TokenType.UNUSED
+ if tokenizer.IsByte(i):
+ toktype = gguf.TokenType.BYTE
+
+ yield text, score, toktype
+
+ def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
+ for text in self.added_tokens_list:
+ score = -1000.0
+ yield text.encode("utf-8"), score, gguf.TokenType.USER_DEFINED
+
+ def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
+ yield from self.sentencepiece_tokens()
+ yield from self.added_tokens()
+
+ def __repr__(self) -> str:
+ return f""
+
+
+class LlamaHfVocab(Vocab):
+ tokenizer_model = "llama"
+ name = "hfft"
+
+ def __init__(self, base_path: Path):
+ fname_tokenizer = base_path / 'tokenizer.json'
+ # if this fails, FileNotFoundError propagates to caller
+ with open(fname_tokenizer, encoding='utf-8') as f:
+ tokenizer_json = json.load(f)
+
+ # pre-check so we know if we need transformers
+ tokenizer_model: dict[str, Any] = tokenizer_json['model']
+ is_llama3 = (
+ tokenizer_model['type'] == 'BPE' and tokenizer_model.get('ignore_merges', False)
+ and not tokenizer_model.get('byte_fallback', True)
+ )
+ if is_llama3:
+ raise TypeError('Llama 3 must be converted with BpeVocab')
+
+ if not is_llama3 and (
+ tokenizer_model['type'] != 'BPE' or not tokenizer_model.get('byte_fallback', False)
+ or tokenizer_json['decoder']['type'] != 'Sequence'
+ ):
+ raise FileNotFoundError('Cannot find Llama BPE tokenizer')
+
+ try:
+ from transformers import AutoTokenizer
+ except ImportError as e:
+ raise ImportError(
+ "To use LlamaHfVocab, please install the `transformers` package. "
+ "You can install it with `pip install transformers`."
+ ) from e
+
+ # Allow the tokenizer to default to slow or fast versions.
+ # Explicitly set tokenizer to use local paths.
+ self.tokenizer = AutoTokenizer.from_pretrained(
+ base_path,
+ cache_dir=base_path,
+ local_files_only=True,
+ )
+ assert self.tokenizer.is_fast # assume tokenizer.json is used
+
+ # Initialize lists and dictionaries for added tokens
+ self.added_tokens_list = []
+ self.added_tokens_dict = dict()
+ self.added_tokens_ids = set()
+
+ # Process added tokens
+ for tok, tokidx in sorted(
+ self.tokenizer.get_added_vocab().items(), key=lambda x: x[1]
+ ):
+ # Only consider added tokens that are not in the base vocabulary
+ if tokidx >= self.tokenizer.vocab_size:
+ self.added_tokens_list.append(tok)
+ self.added_tokens_dict[tok] = tokidx
+ self.added_tokens_ids.add(tokidx)
+
+ # Store special tokens and their IDs
+ self.specials = {
+ tok: self.tokenizer.get_vocab()[tok]
+ for tok in self.tokenizer.all_special_tokens
+ }
+ self.special_ids = set(self.tokenizer.all_special_ids)
+
+ # Set vocabulary sizes
+ self.vocab_size_base = self.tokenizer.vocab_size
+ self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
+
+ self.fname_tokenizer = fname_tokenizer
+
+ def hf_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
+ reverse_vocab = {
+ id: encoded_tok for encoded_tok, id in self.tokenizer.get_vocab().items()
+ }
+
+ for token_id in range(self.vocab_size_base):
+ # Skip processing added tokens here
+ if token_id in self.added_tokens_ids:
+ continue
+
+ # Convert token text to bytes
+ token_text = reverse_vocab[token_id].encode("utf-8")
+
+ # Yield token text, score, and type
+ yield token_text, self.get_token_score(token_id), self.get_token_type(
+ token_id, token_text, self.special_ids # Reuse already stored special IDs
+ )
+
+ def get_token_type(self, token_id: int, token_text: bytes, special_ids: set[int]) -> gguf.TokenType:
+ # Special case for byte tokens
+ if re.fullmatch(br"<0x[0-9A-Fa-f]{2}>", token_text):
+ return gguf.TokenType.BYTE
+
+ # Determine token type based on whether it's a special token
+ return gguf.TokenType.CONTROL if token_id in special_ids else gguf.TokenType.NORMAL
+
+ def get_token_score(self, token_id: int) -> float:
+ # Placeholder for actual logic to determine the token's score
+ # This needs to be implemented based on specific requirements
+ return -1000.0 # Default score
+
+ def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
+ for text in self.added_tokens_list:
+ if text in self.specials:
+ toktype = self.get_token_type(self.specials[text], b'', self.special_ids)
+ score = self.get_token_score(self.specials[text])
+ else:
+ toktype = gguf.TokenType.USER_DEFINED
+ score = -1000.0
+
+ yield text.encode("utf-8"), score, toktype
+
+ def has_newline_token(self):
+ return "<0x0A>" in self.tokenizer.vocab or "\n" in self.tokenizer.vocab
+
+ def all_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
+ yield from self.hf_tokens()
+ yield from self.added_tokens()
+
+ def __repr__(self) -> str:
+ return f""
diff --git a/gguf-py/scripts/gguf-new-metadata.py b/gguf-py/scripts/gguf-new-metadata.py
index c9f1927f6..21e91180c 100755
--- a/gguf-py/scripts/gguf-new-metadata.py
+++ b/gguf-py/scripts/gguf-new-metadata.py
@@ -144,6 +144,7 @@ def main() -> None:
parser.add_argument("--general-description", type=str, help="The models general.description", metavar='"Description ..."')
parser.add_argument("--chat-template", type=str, help="Chat template string (or JSON string containing templates)", metavar='"{% ... %} ..."')
parser.add_argument("--chat-template-config", type=Path, help="Config file containing chat template(s)", metavar='tokenizer_config.json')
+ parser.add_argument("--pre-tokenizer", type=str, help="The models tokenizer.ggml.pre", metavar='"pre tokenizer"')
parser.add_argument("--remove-metadata", action="append", type=str, help="Remove metadata (by key name) from output model", metavar='general.url')
parser.add_argument("--special-token", action="append", type=str, help="Special token by value", nargs=2, metavar=(' | '.join(token_names.keys()), '""'))
parser.add_argument("--special-token-by-id", action="append", type=str, help="Special token by id", nargs=2, metavar=(' | '.join(token_names.keys()), '0'))
@@ -172,6 +173,9 @@ def main() -> None:
if template:
new_metadata[gguf.Keys.Tokenizer.CHAT_TEMPLATE] = MetadataDetails(gguf.GGUFValueType.STRING, template)
+ if args.pre_tokenizer:
+ new_metadata[gguf.Keys.Tokenizer.PRE] = MetadataDetails(gguf.GGUFValueType.STRING, args.pre_tokenizer)
+
if remove_metadata:
logger.warning('*** Warning *** Warning *** Warning **')
logger.warning('* Most metadata is required for a fully functional GGUF file,')
diff --git a/llama.cpp b/llama.cpp
index ca64b7e29..6878bc893 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -1738,12 +1738,13 @@ struct llama_mlock {
};
using llama_mlocks = std::vector>;
-static std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token, bool special) {
+// NOTE: avoid ever using this except for building the token_to_piece caches
+static std::string llama_token_to_piece(const struct llama_model * model, llama_token token, bool special) {
std::vector result(8, 0);
- const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size(), special);
+ const int n_tokens = llama_token_to_piece(model, token, result.data(), result.size(), special);
if (n_tokens < 0) {
result.resize(-n_tokens);
- int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size(), special);
+ int check = llama_token_to_piece(model, token, result.data(), result.size(), special);
GGML_ASSERT(check == -n_tokens);
}
else {
@@ -2946,7 +2947,9 @@ struct llama_vocab {
std::unordered_map token_to_id;
std::vector id_to_token;
- std::unordered_map special_tokens_cache;
+ std::vector cache_special_tokens;
+ std::vector cache_token_to_piece; // llama_token_to_piece(special = false);
+ std::vector cache_token_to_piece_special; // llama_token_to_piece(special = true);
std::map, int> bpe_ranks;
@@ -6046,20 +6049,14 @@ static void llm_load_vocab(
vocab.special_cls_id = 101;
vocab.special_mask_id = 103;
vocab.add_space_prefix = false;
- } else {
- if (tokenizer_model == "gpt2") {
- vocab.type = LLAMA_VOCAB_TYPE_BPE;
+ } else if (tokenizer_model == "gpt2") {
+ vocab.type = LLAMA_VOCAB_TYPE_BPE;
- const int add_space_prefix_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_ADD_PREFIX).c_str());
- if (add_space_prefix_keyidx != -1) {
- vocab.add_space_prefix = gguf_get_val_bool(ctx, add_space_prefix_keyidx);
- }
- } else {
- LLAMA_LOG_WARN("%s: unknown tokenizer: '%s'", __func__, tokenizer_model.c_str());
- LLAMA_LOG_WARN("%s: using default tokenizer: 'llama'", __func__);
- vocab.type = LLAMA_VOCAB_TYPE_SPM;
- return;
+ const int add_space_prefix_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_ADD_PREFIX).c_str());
+ if (add_space_prefix_keyidx != -1) {
+ vocab.add_space_prefix = gguf_get_val_bool(ctx, add_space_prefix_keyidx);
}
+
// read bpe merges and populate bpe ranks
const int merges_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_MERGES).c_str());
if (merges_keyidx == -1) {
@@ -6093,6 +6090,8 @@ static void llm_load_vocab(
vocab.special_pad_id = -1;
vocab.special_cls_id = -1;
vocab.special_mask_id = -1;
+ } else {
+ throw std::runtime_error(format("unknown tokenizer: '%s'", tokenizer_model.c_str()));
}
// for now, only BPE models have pre-tokenizers
@@ -6285,97 +6284,40 @@ static void llm_load_vocab(
// build special tokens cache
{
- // TODO: It is unclear (to me) at this point, whether special tokes are guaranteed to be of a deterministic type,
- // and will always be correctly labeled in 'added_tokens.json' etc.
- // The assumption is, since special tokens aren't meant to be exposed to end user, they are designed
- // to be unmatchable by the tokenizer, therefore tokens from the vocab, which are unmatchable by the tokenizer
- // are special tokens.
- // From testing, this appears to correlate 1:1 with special tokens.
- //
-
- // Counting special tokens and verifying in only one direction
- // is sufficient to detect difference in those two sets.
- //
- uint32_t special_tokens_count_by_type = 0;
- uint32_t special_tokens_count_from_verification = 0;
-
- bool special_tokens_definition_mismatch = false;
-
- for (const auto & t : vocab.token_to_id) {
- const auto & token = t.first;
- const auto & id = t.second;
-
- // Count all non-normal tokens in the vocab while iterating
+ for (llama_vocab::id id = 0; id < (llama_vocab::id)n_vocab; ++id) {
if (vocab.id_to_token[id].type != LLAMA_TOKEN_TYPE_NORMAL) {
- special_tokens_count_by_type++;
- }
-
- // Skip single character tokens
- if (token.length() > 1) {
- bool is_tokenizable = false;
-
- // Split token string representation in two, in all possible ways
- // and check if both halves can be matched to a valid token
- for (unsigned i = 1; i < token.length();) {
- const auto left = token.substr(0, i);
- const auto right = token.substr(i);
-
- // check if we didnt partition in the middle of a utf sequence
- auto utf = utf8_len(left.at(left.length() - 1));
-
- if (utf == 1) {
- if (vocab.token_to_id.find(left) != vocab.token_to_id.end() &&
- vocab.token_to_id.find(right) != vocab.token_to_id.end() ) {
- is_tokenizable = true;
- break;
- }
- i++;
- } else {
- // skip over the rest of multibyte utf sequence
- i += utf - 1;
- }
- }
-
- if (!is_tokenizable) {
- // Some tokens are multibyte, but they are utf sequences with equivalent text length of 1
- // it's faster to re-filter them here, since there are way less candidates now
-
- // Calculate a total "utf" length of a token string representation
- size_t utf8_str_len = 0;
- for (unsigned i = 0; i < token.length();) {
- utf8_str_len++;
- i += utf8_len(token.at(i));
- }
-
- // And skip the ones which are one character
- if (utf8_str_len > 1) {
- // At this point what we have left are special tokens only
- vocab.special_tokens_cache[token] = id;
-
- // Count manually found special tokens
- special_tokens_count_from_verification++;
-
- // If this manually found special token is not marked as such, flag a mismatch
- if (vocab.id_to_token[id].type == LLAMA_TOKEN_TYPE_NORMAL) {
- special_tokens_definition_mismatch = true;
- }
- }
- }
+ vocab.cache_special_tokens.push_back(id);
}
}
- if (special_tokens_definition_mismatch || special_tokens_count_from_verification != special_tokens_count_by_type) {
- LLAMA_LOG_WARN("%s: mismatch in special tokens definition ( %u/%zu vs %u/%zu ).\n",
- __func__,
- special_tokens_count_from_verification, vocab.id_to_token.size(),
- special_tokens_count_by_type, vocab.id_to_token.size()
- );
- } else {
- LLAMA_LOG_INFO("%s: special tokens definition check successful ( %u/%zu ).\n",
- __func__,
- special_tokens_count_from_verification, vocab.id_to_token.size()
- );
+ std::sort( vocab.cache_special_tokens.begin(), vocab.cache_special_tokens.end(),
+ [&] (const llama_vocab::id a, const llama_vocab::id b) {
+ return vocab.id_to_token[a].text.size() > vocab.id_to_token[b].text.size();
+ }
+ );
+
+ LLAMA_LOG_INFO("%s: special tokens cache size = %u\n", __func__, (uint32_t)vocab.cache_special_tokens.size());
+ }
+
+ // build token to piece caches
+ {
+ size_t size_cache = 0;
+
+ std::vector cache_token_to_piece (n_vocab);
+ std::vector cache_token_to_piece_special(n_vocab);
+
+ for (uint32_t id = 0; id < n_vocab; ++id) {
+ cache_token_to_piece[id] = llama_token_to_piece(&model, id, false);
+ cache_token_to_piece_special[id] = llama_token_to_piece(&model, id, true);
+
+ size_cache += cache_token_to_piece[id].size();
+ size_cache += cache_token_to_piece_special[id].size();
}
+
+ std::swap(vocab.cache_token_to_piece, cache_token_to_piece);
+ std::swap(vocab.cache_token_to_piece_special, cache_token_to_piece_special);
+
+ LLAMA_LOG_INFO("%s: token to piece cache size = %.4f MB\n", __func__, size_cache / 1024.0 / 1024.0);
}
}
@@ -12973,46 +12915,69 @@ struct llm_build_context {
}
// split into {n_head * n_embd_head_qk_nope, n_tokens}
- struct ggml_tensor * q_nope = ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens, ggml_element_size(q) * hparams.n_embd_head_k, ggml_element_size(q) * hparams.n_embd_head_k * n_head, 0);
+ struct ggml_tensor * q_nope = ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens,
+ ggml_row_size(q->type, hparams.n_embd_head_k),
+ ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
+ 0);
cb(q_nope, "q_nope", il);
+
// and {n_head * n_embd_head_qk_rope, n_tokens}
- struct ggml_tensor * q_pe = ggml_view_3d(ctx0, q, n_embd_head_qk_rope, n_head, n_tokens, ggml_element_size(q) * hparams.n_embd_head_k, ggml_element_size(q) * hparams.n_embd_head_k * n_head, ggml_element_size(q) * n_embd_head_qk_nope);
+ struct ggml_tensor * q_pe = ggml_view_3d(ctx0, q, n_embd_head_qk_rope, n_head, n_tokens,
+ ggml_row_size(q->type, hparams.n_embd_head_k),
+ ggml_row_size(q->type, hparams.n_embd_head_k * n_head),
+ ggml_row_size(q->type, n_embd_head_qk_nope));
cb(q_pe, "q_pe", il);
// {n_embd, kv_lora_rank + n_embd_head_qk_rope} * {n_embd, n_tokens} -> {kv_lora_rank + n_embd_head_qk_rope, n_tokens}
- struct ggml_tensor * compressed_kv_pe = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur);
- cb(compressed_kv_pe, "compressed_kv_pe", il);
+ struct ggml_tensor * kv_pe_compresseed = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur);
+ cb(kv_pe_compresseed, "kv_pe_compresseed", il);
// split into {kv_lora_rank, n_tokens}
- struct ggml_tensor * compressed_kv = ggml_view_2d(ctx0, compressed_kv_pe, kv_lora_rank, n_tokens, compressed_kv_pe->nb[1], 0);
- cb(compressed_kv, "compressed_kv", il);
+ struct ggml_tensor * kv_compressed = ggml_view_2d(ctx0, kv_pe_compresseed, kv_lora_rank, n_tokens,
+ kv_pe_compresseed->nb[1],
+ 0);
+ cb(kv_compressed, "kv_compressed", il);
+
// and {n_embd_head_qk_rope, n_tokens}
- struct ggml_tensor * k_pe = ggml_view_2d(ctx0, compressed_kv_pe, n_embd_head_qk_rope, n_tokens, compressed_kv_pe->nb[1], ggml_element_size(compressed_kv_pe)*kv_lora_rank);
+ struct ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_pe_compresseed, n_embd_head_qk_rope, 1, n_tokens,
+ kv_pe_compresseed->nb[1],
+ kv_pe_compresseed->nb[1],
+ ggml_row_size(kv_pe_compresseed->type, kv_lora_rank));
cb(k_pe, "k_pe", il);
- compressed_kv = llm_build_norm(ctx0, compressed_kv, hparams,
+ kv_compressed = ggml_cont(ctx0, kv_compressed); // TODO: the CUDA backend does not support non-contiguous norm
+ kv_compressed = llm_build_norm(ctx0, kv_compressed, hparams,
model.layers[il].attn_kv_a_norm, NULL,
LLM_NORM_RMS, cb, il);
- cb(compressed_kv, "compressed_kv", il);
+ cb(kv_compressed, "kv_compressed", il);
// {kv_lora_rank, n_head * (n_embd_head_qk_nope + n_embd_head_v)} * {kv_lora_rank, n_tokens} -> {n_head * (n_embd_head_qk_nope + n_embd_head_v), n_tokens}
- struct ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, compressed_kv);
+ struct ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_compressed);
cb(kv, "kv", il);
// split into {n_head * n_embd_head_qk_nope, n_tokens}
- struct ggml_tensor * k_nope = ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens, ggml_element_size(kv) * (n_embd_head_qk_nope + hparams.n_embd_head_v), ggml_element_size(kv) * n_head * (n_embd_head_qk_nope + hparams.n_embd_head_v), 0);
+ struct ggml_tensor * k_nope = ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens,
+ ggml_row_size(kv->type, n_embd_head_qk_nope + hparams.n_embd_head_v),
+ ggml_row_size(kv->type, n_head * (n_embd_head_qk_nope + hparams.n_embd_head_v)),
+ 0);
cb(k_nope, "k_nope", il);
// and {n_head * n_embd_head_v, n_tokens}
- struct ggml_tensor * v_states = ggml_view_3d(ctx0, kv, hparams.n_embd_head_v, n_head, n_tokens, ggml_element_size(kv) * (n_embd_head_qk_nope + hparams.n_embd_head_v), ggml_element_size(kv) * n_head * (n_embd_head_qk_nope + hparams.n_embd_head_v), ggml_element_size(kv) * n_embd_head_qk_nope);
+ struct ggml_tensor * v_states = ggml_view_3d(ctx0, kv, hparams.n_embd_head_v, n_head, n_tokens,
+ ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)),
+ ggml_row_size(kv->type, (n_embd_head_qk_nope + hparams.n_embd_head_v)*n_head),
+ ggml_row_size(kv->type, (n_embd_head_qk_nope)));
cb(v_states, "v_states", il);
v_states = ggml_cont(ctx0, v_states);
cb(v_states, "v_states", il);
- v_states = ggml_view_2d(ctx0, v_states, hparams.n_embd_head_v * n_head, n_tokens, ggml_element_size(kv) * hparams.n_embd_head_v * n_head, 0);
+ v_states = ggml_view_2d(ctx0, v_states, hparams.n_embd_head_v * n_head, n_tokens,
+ ggml_row_size(kv->type, hparams.n_embd_head_v * n_head),
+ 0);
cb(v_states, "v_states", il);
+ q_pe = ggml_cont(ctx0, q_pe); // TODO: the CUDA backend does not support non-contiguous RoPE
q_pe = ggml_rope_ext(
ctx0, q_pe, inp_pos, nullptr,
n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
@@ -13021,8 +12986,9 @@ struct llm_build_context {
cb(q_pe, "q_pe", il);
// shared RoPE key
+ k_pe = ggml_cont(ctx0, k_pe); // TODO: the CUDA backend does not support non-contiguous RoPE
k_pe = ggml_rope_ext(
- ctx0, ggml_view_3d(ctx0, k_pe, n_embd_head_qk_rope, 1, n_tokens, k_pe->nb[0], k_pe->nb[1], 0), inp_pos, nullptr,
+ ctx0, k_pe, inp_pos, nullptr,
n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
ext_factor, attn_factor_scaled, beta_fast, beta_slow
);
@@ -14785,7 +14751,7 @@ struct llm_tokenizer_wpm {
llm_tokenizer_wpm(const llama_vocab & vocab): vocab(vocab) {}
void tokenize(const std::string & text, std::vector & output) {
- auto * token_map = &vocab.token_to_id;
+ const auto & token_map = vocab.token_to_id;
// normalize and split by whitespace
std::vector words = preprocess(text);
@@ -14800,108 +14766,89 @@ struct llm_tokenizer_wpm {
}
// prepend phantom space
- std::string word1 = "\xe2\x96\x81" + word;
- int n = word1.size();
+ const std::string word1 = "\xe2\x96\x81" + word;
+ const int n = word1.size();
+
+ const size_t current_tokens = output.size();
// we're at the start of a new word
- int i = 0;
- bool match_any = false;
-
// move through character position in word
- while (i < n) {
+ for (int i = 0; i < n; ++i) {
// loop through possible match length
bool match = false;
for (int j = n; j > i; j--) {
- auto it = token_map->find(word1.substr(i, j - i));
- if (it != token_map->end()) {
+ auto it = token_map.find(word1.substr(i, j - i));
+ if (it != token_map.end()) {
output.push_back(it->second);
match = true;
- match_any = true;
- i = j;
+ i = j - 1;
break;
}
}
- // must be an unknown character
- if (!match) {
- i++;
+ if (!match) { // discard all
+ output.resize(current_tokens);
+ break; // and discard next tokens
}
}
// we didn't find any matches for this word
- if (!match_any) {
+ if (current_tokens == output.size()) {
output.push_back(vocab.special_unk_id);
}
}
}
std::vector preprocess(const std::string & text) {
- std::vector cpts_nfd = unicode_cpts_normalize_nfd(unicode_cpts_from_utf8(text));
+ const std::vector cpts_nfd = unicode_cpts_normalize_nfd(unicode_cpts_from_utf8(text));
+ std::vector words(1, "");
- // strip accents, strip control, uniformize whitespace,
- // to lowercase, pad chinese characters, pad punctuation
- std::string new_str = "";
- for (uint32_t code : cpts_nfd) {
- const codepoint_flags flags = unicode_cpt_flags(code);
- if (flags.is_accent_mark || flags.is_control) {
+ for (const char32_t cpt : cpts_nfd) {
+ const auto flags = unicode_cpt_flags(cpt);
+
+ if (flags.is_whitespace) {
+ if (words.back().size()) { // finish previous word if any
+ words.emplace_back();
+ }
continue;
}
- code = unicode_tolower(code);
- if (flags.is_separator || flags.is_whitespace) { //####FIXME: is_separator ?
- code = ' ';
+
+ assert (!flags.is_separator);
+ if (cpt == 0 || cpt == 0xFFFD || flags.is_control) {
+ continue;
}
- std::string s = unicode_cpt_to_utf8(code);
- if (flags.is_punctuation || is_ascii_punct(code) || is_chinese_char(code)) {
- new_str += " ";
- new_str += s;
- new_str += " ";
+
+ const std::string s = unicode_cpt_to_utf8(unicode_tolower(cpt));
+ if (flags.is_punctuation || ( cpt < 0x7F && flags.is_symbol ) || is_chinese_char(cpt)) {
+ if (words.back().size()) { // finish previous word if any
+ words.emplace_back();
+ }
+ words.back() = s; // single char word
+ words.emplace_back(); // start a new word
} else {
- new_str += s;
+ words.back() += s; // append char to word
}
}
- // split by whitespace
- uint64_t l = 0;
- uint64_t r = 0;
- std::vector words;
- while (r < new_str.size()) {
- // if is whitespace
- if (isspace(new_str[r], std::locale::classic())) {
- if (r > l) words.push_back(new_str.substr(l, (r - l)));
- l = r + 1;
- r = l;
- } else {
- r += 1;
- }
- }
- if (r > l) {
- words.push_back(new_str.substr(l, (r - l)));
+ if (!words.back().size()) {
+ words.pop_back();
}
+
return words;
}
- bool is_ascii_punct(uint32_t code) {
- if (code > 0xFF) {
- return false;
- }
- auto c = char(static_cast(code));
- return ispunct(c, std::locale::classic());
- }
-
- bool is_chinese_char(uint32_t cpt) {
- if ((cpt >= 0x4E00 && cpt <= 0x9FFF) ||
- (cpt >= 0x3400 && cpt <= 0x4DBF) ||
+ static bool is_chinese_char(uint32_t cpt) {
+ return
+ (cpt >= 0x04E00 && cpt <= 0x09FFF) ||
+ (cpt >= 0x03400 && cpt <= 0x04DBF) ||
(cpt >= 0x20000 && cpt <= 0x2A6DF) ||
(cpt >= 0x2A700 && cpt <= 0x2B73F) ||
(cpt >= 0x2B740 && cpt <= 0x2B81F) ||
(cpt >= 0x2B920 && cpt <= 0x2CEAF) || // this should be 0x2B820 but in hf rust code it is 0x2B920
- (cpt >= 0xF900 && cpt <= 0xFAFF) ||
- (cpt >= 0x2F800 && cpt <= 0x2FA1F) ||
- (cpt >= 0x3000 && cpt <= 0x303F) ||
- (cpt >= 0xFF00 && cpt <= 0xFFEF)) {
- return true; // NOLINT
- }
- return false;
+ (cpt >= 0x0F900 && cpt <= 0x0FAFF) ||
+ (cpt >= 0x2F800 && cpt <= 0x2FA1F);
+ //(cpt >= 0x3000 && cpt <= 0x303F) ||
+ //(cpt >= 0xFF00 && cpt <= 0xFFEF);
}
const llama_vocab & vocab;
@@ -14945,9 +14892,8 @@ struct fragment_buffer_variant {
static void tokenizer_st_partition(const llama_vocab & vocab, std::forward_list & buffer) {
// for each special token
- for (const auto & st: vocab.special_tokens_cache) {
- const auto & special_token = st.first;
- const auto & special_id = st.second;
+ for (const llama_vocab::id special_id : vocab.cache_special_tokens) {
+ const auto & special_token = vocab.id_to_token[special_id].text;
// for each text fragment
std::forward_list