Compare commits

...

6 Commits

Author SHA1 Message Date
Andrei
3d9b37026e
Merge 951f1d9053 into dea5e86051 2024-10-31 13:56:23 +01:00
Diego Devesa
dea5e86051
ggml : check tensor name lengths in gguf files (#10100) 2024-10-31 11:40:59 +01:00
Sergio López
1329c0a75e
kompute: add mul_mat_q4_k shader (#10097)
This is a more or less direct translation from the Metal implementation
to GLSL.

Signed-off-by: Sergio Lopez <slp@redhat.com>
2024-10-31 11:09:52 +02:00
Sergio López
61408e7fad
kompute: add backend registry / device interfaces (#10045)
Some checks are pending
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/full-cuda.Dockerfile platforms:linux/amd64 tag:full-cuda]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/full-musa.Dockerfile platforms:linux/amd64 tag:full-musa]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/full.Dockerfile platforms:linux/amd64,linux/arm64 tag:full]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli-cuda.Dockerfile platforms:linux/amd64 tag:light-cuda]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli-intel.Dockerfile platforms:linux/amd64 tag:light-intel]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli-musa.Dockerfile platforms:linux/amd64 tag:light-musa]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli.Dockerfile platforms:linux/amd64,linux/arm64 tag:light]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server-cuda.Dockerfile platforms:linux/amd64 tag:server-cuda]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server-intel.Dockerfile platforms:linux/amd64 tag:server-intel]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server-musa.Dockerfile platforms:linux/amd64 tag:server-musa]) (push) Waiting to run
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server.Dockerfile platforms:linux/amd64,linux/arm64 tag:server]) (push) Waiting to run
Nix CI / nix-eval (macos-latest) (push) Waiting to run
Nix CI / nix-eval (ubuntu-latest) (push) Waiting to run
Nix CI / nix-build (macos-latest) (push) Waiting to run
Nix CI / nix-build (ubuntu-latest) (push) Waiting to run
flake8 Lint / Lint (push) Waiting to run
Get in line with the other backends by supporting the newer
backend/device registry interfaces.

Signed-off-by: Sergio Lopez <slp@redhat.com>
2024-10-30 17:01:52 +01:00
Andrei Betlen
951f1d9053 Merge remote-tracking branch 'origin' into add-support-for-phi3-vision 2024-08-27 18:13:54 -04:00
Andrei Betlen
dc0625ab8f Add support for Phi3-vision-instruct 2024-08-27 18:11:41 -04:00
9 changed files with 574 additions and 69 deletions

View File

@ -136,6 +136,8 @@ static std::string format(const char * fmt, ...) {
#define TN_MVLM_PROJ_BLOCK "mm.model.mb_block.%d.block.%d.%s"
#define TN_MVLM_PROJ_PEG "mm.model.peg.%d.%s"
#define TN_IMAGE_NEWLINE "model.image_newline"
#define TN_SUB_GN "v.sub_gn"
#define TN_GLB_GN "v.glb_gn"
#define TN_MINICPMV_POS_EMBD_K "resampler.pos_embed_k"
#define TN_MINICPMV_QUERY "resampler.query"
@ -534,6 +536,9 @@ struct clip_vision_model {
struct ggml_tensor * mm_model_ln_kv_b;
struct ggml_tensor * mm_model_ln_post_w;
struct ggml_tensor * mm_model_ln_post_b;
struct ggml_tensor * sub_gn;
struct ggml_tensor * glb_gn;
};
struct clip_ctx {
@ -781,6 +786,138 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
// print_tensor_info(embeddings, "embeddings");
// phi-3.5-vision-instruct
if (model.sub_gn && model.glb_gn) {
// Phi3VisionEmbedding.hd_transform()
ggml_tensor * x = embeddings;
int num_images = batch_size;
int h_crop = 1, w_crop = 1;
int C = x->ne[0];
int L = x->ne[1];
int N = x->ne[2];
int H = (int)sqrt((float)L);
GGML_ASSERT(H * H == L);
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
// Phi3ImageEmbedding.reshape_hd_patches_2x2merge()
x = ggml_reshape_4d(ctx0, x, N, H, H, C);
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 2, 1, 3));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 0, 1, 2));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 2, 3, 1, 0));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_reshape_4d(ctx0, x, 2, H / 2, 2, H / 2 * C * N);
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 2, 1, 3));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 1, 3, 2));
x = ggml_reshape_3d(ctx0, x, N * C * (H / 2), (H / 2), 4);
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_reshape_4d(ctx0, x, 4, H / 2, H / 2, N * C);
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_reshape_4d(ctx0, x, 4, (H / 2) * (H / 2), C, N);
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 3, 1, 2));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_reshape_4d(ctx0, x, 4 * C, H / 2, H / 2, N);
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_reshape_4d(ctx0, x, (H / 2) * 4 * C, (H / 2), w_crop, num_images * h_crop);
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 2, 1, 3));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_reshape_4d(ctx0, x, 4 * C, w_crop * (H / 2), h_crop * (H / 2), num_images);
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
ggml_tensor * global_image_features_hd = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
// Phi3ImageEmbedding.add_image_newline()
ggml_tensor * newline_embedding = model.sub_gn;
for (int i = 0; i < H/2-1; i++) {
newline_embedding = ggml_concat(ctx0, newline_embedding, model.sub_gn, 2);
}
ggml_tensor * global_image_features_hd_newline = ggml_concat(ctx0, global_image_features_hd, newline_embedding, 1);
global_image_features_hd_newline = ggml_cont(ctx0, ggml_permute(ctx0, global_image_features_hd_newline, 3, 2, 1, 0));
global_image_features_hd_newline = ggml_reshape_4d(ctx0, global_image_features_hd_newline, 1, 1, (w_crop*(H/2)+1) * h_crop*(H/2), 4*C);
global_image_features_hd_newline = ggml_cont(ctx0, ggml_permute(ctx0, global_image_features_hd_newline, 3, 2, 1, 0));
h_crop = image_size / 336;
w_crop = image_size / 336;
// sub_image_features_hd
x = embeddings;
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
// Phi3ImageEmbedding.reshape_hd_patches_2x2merge()
x = ggml_reshape_4d(ctx0, x, N, H, H, C);
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 2, 1, 3));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 0, 1, 2));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 2, 3, 1, 0));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_reshape_4d(ctx0, x, 2, H / 2, 2, H / 2 * C * N);
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 2, 1, 3));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 1, 3, 2));
x = ggml_reshape_3d(ctx0, x, N * C * (H / 2), (H / 2), 4);
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_reshape_4d(ctx0, x, 4, H / 2, H / 2, N * C);
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_reshape_4d(ctx0, x, 4, (H / 2) * (H / 2), C, N);
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 3, 1, 2));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_reshape_4d(ctx0, x, 4 * C, H / 2, H / 2, N);
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_reshape_4d(ctx0, x, (H / 2) * 4 * C, (H / 2), w_crop, num_images * h_crop);
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 2, 1, 3));
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
x = ggml_reshape_4d(ctx0, x, 4 * C, w_crop * (H / 2), h_crop * (H / 2), num_images);
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
ggml_tensor * sub_image_features_hd = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
// Phi3ImageEmbedding.add_image_newline()
newline_embedding = model.sub_gn;
for (int i = 0; i < (H/2-1); i++) {
newline_embedding = ggml_concat(ctx0, newline_embedding, model.sub_gn, 2);
}
ggml_tensor * sub_image_features_hd_newline = ggml_concat(ctx0, sub_image_features_hd, newline_embedding, 1);
sub_image_features_hd_newline = ggml_cont(ctx0, ggml_permute(ctx0, sub_image_features_hd_newline, 3, 2, 1, 0));
sub_image_features_hd_newline = ggml_reshape_4d(ctx0, sub_image_features_hd_newline, 1, 1, (w_crop*(H/2)+1) * h_crop*(H/2), 4*C);
sub_image_features_hd_newline = ggml_cont(ctx0, ggml_permute(ctx0, sub_image_features_hd_newline, 3, 2, 1, 0));
embeddings = ggml_concat(ctx0, sub_image_features_hd_newline, model.glb_gn, 1);
embeddings = ggml_concat(ctx0, embeddings, global_image_features_hd_newline, 1);
}
// llava projector
if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
@ -1406,6 +1543,10 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
vision_model.image_newline = get_tensor(new_clip->ctx_data, TN_IMAGE_NEWLINE);
// LOG_INF("%s: image_newline tensor (llava-1.6) found\n", __func__);
} catch (std::runtime_error & /*e*/) { }
try {
vision_model.sub_gn = get_tensor(new_clip->ctx_data, TN_SUB_GN);
vision_model.glb_gn = get_tensor(new_clip->ctx_data, TN_GLB_GN);
} catch (std::runtime_error & /*e*/) { }
} else if (new_clip->proj_type == PROJECTOR_TYPE_LDP) {
// MobileVLM projection
vision_model.mm_model_mlp_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 1, "weight"));

View File

@ -11,6 +11,8 @@
extern "C" {
#endif
#define GGML_KOMPUTE_MAX_DEVICES 16
struct ggml_vk_device {
int index;
int type; // same as VkPhysicalDeviceType
@ -41,6 +43,8 @@ GGML_API bool ggml_backend_is_kompute(ggml_backend_t backend);
GGML_API ggml_backend_buffer_type_t ggml_backend_kompute_buffer_type(int device);
GGML_API ggml_backend_reg_t ggml_backend_kompute_reg(void);
#ifdef __cplusplus
}
#endif

View File

@ -800,6 +800,7 @@ if (GGML_KOMPUTE)
kompute-shaders/op_mul_mat_q8_0.comp
kompute-shaders/op_mul_mat_q4_0.comp
kompute-shaders/op_mul_mat_q4_1.comp
kompute-shaders/op_mul_mat_q4_k.comp
kompute-shaders/op_mul_mat_q6_k.comp
kompute-shaders/op_getrows_f32.comp
kompute-shaders/op_getrows_f16.comp
@ -833,6 +834,7 @@ if (GGML_KOMPUTE)
shaderop_mul_mat_q8_0.h
shaderop_mul_mat_q4_0.h
shaderop_mul_mat_q4_1.h
shaderop_mul_mat_q4_k.h
shaderop_mul_mat_q6_k.h
shaderop_getrows_f32.h
shaderop_getrows_f16.h

View File

@ -562,6 +562,10 @@ void * ggml_backend_reg_get_proc_address(ggml_backend_reg_t reg, const char * na
#include "ggml-cann.h"
#endif
#ifdef GGML_USE_KOMPUTE
#include "ggml-kompute.h"
#endif
struct ggml_backend_registry {
std::vector<ggml_backend_reg_t> backends;
std::vector<ggml_backend_dev_t> devices;
@ -591,8 +595,9 @@ struct ggml_backend_registry {
#ifdef GGML_USE_AMX
register_backend(ggml_backend_amx_reg());
#endif
// TODO: kompute
#ifdef GGML_USE_KOMPUTE
register_backend(ggml_backend_kompute_reg());
#endif
register_backend(ggml_backend_cpu_reg());
}

View File

@ -20,6 +20,7 @@
#include "shaderop_mul_mat_q8_0.h"
#include "shaderop_mul_mat_q4_0.h"
#include "shaderop_mul_mat_q4_1.h"
#include "shaderop_mul_mat_q4_k.h"
#include "shaderop_mul_mat_q6_k.h"
#include "shaderop_mul_mat_mat_f32.h"
#include "shaderop_getrows_f32.h"
@ -42,6 +43,7 @@
#include <cstring>
#include <iostream>
#include <memory>
#include <mutex>
#include <stdexcept>
#include <string>
#include <unordered_map>
@ -273,18 +275,9 @@ static std::vector<ggml_vk_device> ggml_vk_available_devices_internal(size_t mem
return results;
}
// public API returns a C-style array
ggml_vk_device * ggml_vk_available_devices(size_t memoryRequired, size_t * count) {
auto devices = ggml_vk_available_devices_internal(memoryRequired);
*count = devices.size();
if (devices.empty()) {
return nullptr;
}
size_t nbytes = sizeof (ggml_vk_device) * (devices.size());
auto * arr = static_cast<ggml_vk_device *>(malloc(nbytes));
memcpy(arr, devices.data(), nbytes);
return arr;
static std::vector<ggml_vk_device>& ggml_vk_available_devices() {
static std::vector<ggml_vk_device> devices = ggml_vk_available_devices_internal(0);
return devices;
}
static void ggml_vk_filterByVendor(std::vector<ggml_vk_device>& devices, const std::string& targetVendor) {
@ -341,7 +334,7 @@ ggml_vk_device ggml_vk_current_device() {
if (!komputeManager()->hasDevice())
return ggml_vk_device();
auto devices = ggml_vk_available_devices_internal(0);
auto devices = ggml_vk_available_devices();
ggml_vk_filterByName(devices, komputeManager()->physicalDevice()->getProperties().deviceName.data());
GGML_ASSERT(!devices.empty());
return devices.front();
@ -1075,6 +1068,40 @@ static void ggml_vk_mul_mat_q8_0(Args&&... args) {
ggml_vk_mul_mat_impl(spirv, "q8_0", 1/*We access blocks unaligned*/, std::forward<Args>(args)...);
}
static void ggml_vk_mul_mat_q4_k(
kp::Sequence& seq,
const std::shared_ptr<kp::Tensor>& inA,
const std::shared_ptr<kp::Tensor>& inB,
const std::shared_ptr<kp::Tensor>& out,
uint32_t inAOff, uint32_t inBOff, uint32_t outOff,
int32_t ne00, int32_t ne01, int32_t ne02, int32_t ne10,
int32_t ne11, int32_t ne12, int32_t ne13, int32_t ne0,
int32_t ne1, int32_t r2, int32_t r3
) {
const static auto spirv = getSpirvShader(kp::shader_data::op_mul_mat_q4_k_comp_spv,
kp::shader_data::op_mul_mat_q4_k_comp_spv_len);
struct PushConstants {
uint32_t inAOff, inBOff, outOff;
int32_t ne00, ne10, ne0, ne1, ne01, ne02, ne12, r2, r3;
} pushConsts {
0, 0, 0,
ne00, ne10, ne0, ne1, ne01, ne02, ne12, r2, r3
};
std::shared_ptr<kp::Algorithm> s_algo = nullptr;
if (!komputeManager()->hasAlgorithm(__func__)) {
s_algo = komputeManager()->algorithm<uint32_t, PushConstants>(__func__, s_kompute_context->pool.get(), {inA, inB, out}, spirv, {unsigned((ne01 + 3)/4), unsigned(ne11), unsigned(ne12) * unsigned(ne13)}, {}, {pushConsts});
} else {
s_algo = komputeManager()->getAlgorithm(__func__);
s_algo->setTensors({inA, inB, out});
s_algo->setWorkgroup({unsigned((ne01 + 3)/4), unsigned(ne11), unsigned(ne12) * unsigned(ne13)});
s_algo->setPushConstants<PushConstants>({pushConsts});
s_algo->updateDescriptors(s_kompute_context->pool.get());
}
seq.record<kp::OpAlgoDispatch>(s_algo);
}
static void ggml_vk_mul_mat_q6_k(
kp::Sequence& seq,
const std::shared_ptr<kp::Tensor>& inA,
@ -1323,17 +1350,7 @@ static void ggml_vk_cpy_f16_f32(Args&&... args) {
ggml_vk_cpy(spirv, 2, 4, std::forward<Args>(args)...);
}
static bool ggml_vk_supports_op(const struct ggml_tensor * op) {
switch (op->type) {
case GGML_TYPE_F16:
case GGML_TYPE_F32:
case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_1:
break;
default:
return false;
}
static bool ggml_backend_kompute_device_supports_op(ggml_backend_dev_t dev, const struct ggml_tensor * op) {
switch (op->op) {
case GGML_OP_UNARY:
switch (ggml_get_unary_op(op)) {
@ -1402,6 +1419,7 @@ static bool ggml_vk_supports_op(const struct ggml_tensor * op) {
case GGML_TYPE_Q8_0:
case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_1:
case GGML_TYPE_Q4_K:
return true;
default:
;
@ -1410,6 +1428,8 @@ static bool ggml_vk_supports_op(const struct ggml_tensor * op) {
;
}
return false;
GGML_UNUSED(dev);
}
static void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml_cgraph * gf) {
@ -1458,11 +1478,6 @@ static void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml
any_commands_recorded = true;
if (!ggml_vk_supports_op(dst)) {
fprintf(stderr, "%s: error: unsupported op '%s'\n", __func__, ggml_op_desc(dst));
GGML_ABORT("unsupported op");
}
const int32_t ne00 = src0 ? src0->ne[0] : 0;
const int32_t ne01 = src0 ? src0->ne[1] : 0;
const int32_t ne02 = src0 ? src0->ne[2] : 0;
@ -1656,6 +1671,12 @@ static void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml
ne00, ne01, ne02, ne10, ne11, ne12, ne13, ne0, ne1, r2, r3
);
break;
case GGML_TYPE_Q4_K:
ggml_vk_mul_mat_q4_k(
seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst,
ne00, ne01, ne02, ne10, ne11, ne12, ne13, ne0, ne1, ne12/ne02, ne13/ne03
);
break;
case GGML_TYPE_Q6_K:
ggml_vk_mul_mat_q6_k(
seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst,
@ -1907,25 +1928,31 @@ static ggml_backend_buffer_type_i ggml_backend_kompute_buffer_type_interface = {
};
ggml_backend_buffer_type_t ggml_backend_kompute_buffer_type(int device) {
static std::vector<ggml_backend_buffer_type> bufts = []() {
std::vector<ggml_backend_buffer_type> vec;
auto devices = ggml_vk_available_devices_internal(0);
vec.reserve(devices.size());
static std::mutex mutex;
std::lock_guard<std::mutex> lock(mutex);
for (const auto & dev : devices) {
vec.push_back({
/* .iface = */ ggml_backend_kompute_buffer_type_interface,
/* .device = */ nullptr,
/* .context = */ new ggml_backend_kompute_buffer_type_context(dev.index, dev.bufferAlignment, dev.maxAlloc)
});
auto devices = ggml_vk_available_devices();
int32_t device_count = (int32_t) devices.size();
GGML_ASSERT(device < device_count);
GGML_ASSERT(devices.size() <= GGML_KOMPUTE_MAX_DEVICES);
static ggml_backend_buffer_type
ggml_backend_kompute_buffer_types[GGML_KOMPUTE_MAX_DEVICES];
static bool ggml_backend_kompute_buffer_type_initialized = false;
if (!ggml_backend_kompute_buffer_type_initialized) {
for (int32_t i = 0; i < device_count; i++) {
ggml_backend_kompute_buffer_types[i] = {
/* .iface = */ ggml_backend_kompute_buffer_type_interface,
/* .device = */ ggml_backend_reg_dev_get(ggml_backend_kompute_reg(), i),
/* .context = */ new ggml_backend_kompute_buffer_type_context{ i, devices[i].bufferAlignment, devices[i].maxAlloc },
};
}
return vec;
}();
ggml_backend_kompute_buffer_type_initialized = true;
}
auto it = std::find_if(bufts.begin(), bufts.end(), [device](const ggml_backend_buffer_type & t) {
return device == static_cast<ggml_backend_kompute_buffer_type_context *>(t.context)->device;
});
return it < bufts.end() ? &*it : nullptr;
return &ggml_backend_kompute_buffer_types[device];
}
// backend
@ -1953,16 +1980,6 @@ static ggml_status ggml_backend_kompute_graph_compute(ggml_backend_t backend, st
return GGML_STATUS_SUCCESS;
}
static bool ggml_backend_kompute_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
GGML_UNUSED(backend);
return ggml_vk_supports_op(op);
}
static bool ggml_backend_kompute_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) {
GGML_UNUSED(backend);
return buft->iface.get_name == ggml_backend_kompute_buffer_type_get_name;
}
static struct ggml_backend_i kompute_backend_i = {
/* .get_name = */ ggml_backend_kompute_name,
/* .free = */ ggml_backend_kompute_free,
@ -1991,7 +2008,7 @@ ggml_backend_t ggml_backend_kompute_init(int device) {
ggml_backend_t kompute_backend = new ggml_backend {
/* .guid = */ ggml_backend_kompute_guid(),
/* .interface = */ kompute_backend_i,
/* .device = */ nullptr,
/* .device = */ ggml_backend_reg_dev_get(ggml_backend_kompute_reg(), device),
/* .context = */ s_kompute_context,
};
@ -2001,3 +2018,167 @@ ggml_backend_t ggml_backend_kompute_init(int device) {
bool ggml_backend_is_kompute(ggml_backend_t backend) {
return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_kompute_guid());
}
static size_t ggml_backend_kompute_get_device_count() {
auto devices = ggml_vk_available_devices();
return devices.size();
}
static void ggml_backend_kompute_get_device_description(int device, char * description, size_t description_size) {
auto devices = ggml_vk_available_devices();
GGML_ASSERT((size_t) device < devices.size());
snprintf(description, description_size, "%s", devices[device].name);
}
static void ggml_backend_kompute_get_device_memory(int device, size_t * free, size_t * total) {
auto devices = ggml_vk_available_devices();
GGML_ASSERT((size_t) device < devices.size());
*total = devices[device].heapSize;
*free = devices[device].heapSize;
}
//////////////////////////
struct ggml_backend_kompute_device_context {
int device;
std::string name;
std::string description;
};
static const char * ggml_backend_kompute_device_get_name(ggml_backend_dev_t dev) {
ggml_backend_kompute_device_context * ctx = (ggml_backend_kompute_device_context *)dev->context;
return ctx->name.c_str();
}
static const char * ggml_backend_kompute_device_get_description(ggml_backend_dev_t dev) {
ggml_backend_kompute_device_context * ctx = (ggml_backend_kompute_device_context *)dev->context;
return ctx->description.c_str();
}
static void ggml_backend_kompute_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) {
ggml_backend_kompute_device_context * ctx = (ggml_backend_kompute_device_context *)dev->context;
ggml_backend_kompute_get_device_memory(ctx->device, free, total);
}
static ggml_backend_buffer_type_t ggml_backend_kompute_device_get_buffer_type(ggml_backend_dev_t dev) {
ggml_backend_kompute_device_context * ctx = (ggml_backend_kompute_device_context *)dev->context;
return ggml_backend_kompute_buffer_type(ctx->device);
}
static bool ggml_backend_kompute_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
if (buft->iface.get_name != ggml_backend_kompute_buffer_type_get_name) {
return false;
}
ggml_backend_kompute_device_context * ctx = (ggml_backend_kompute_device_context *)dev->context;
ggml_backend_kompute_buffer_type_context * buft_ctx = (ggml_backend_kompute_buffer_type_context *)buft->context;
return buft_ctx->device == ctx->device;
}
static enum ggml_backend_dev_type ggml_backend_kompute_device_get_type(ggml_backend_dev_t dev) {
GGML_UNUSED(dev);
return GGML_BACKEND_DEVICE_TYPE_GPU;
}
static void ggml_backend_kompute_device_get_props(ggml_backend_dev_t dev, struct ggml_backend_dev_props * props) {
props->name = ggml_backend_kompute_device_get_name(dev);
props->description = ggml_backend_kompute_device_get_description(dev);
props->type = ggml_backend_kompute_device_get_type(dev);
ggml_backend_kompute_device_get_memory(dev, &props->memory_free, &props->memory_total);
props->caps = {
/* async = */ false,
/* host_buffer = */ false,
/* .buffer_from_host_ptr = */ false,
/* events = */ false,
};
}
static ggml_backend_t ggml_backend_kompute_device_init(ggml_backend_dev_t dev, const char * params) {
GGML_UNUSED(params);
ggml_backend_kompute_device_context * ctx = (ggml_backend_kompute_device_context *)dev->context;
return ggml_backend_kompute_init(ctx->device);
}
static bool ggml_backend_kompute_device_offload_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
const int min_batch_size = 32;
return (op->ne[1] >= min_batch_size && op->op != GGML_OP_GET_ROWS) ||
(op->ne[2] >= min_batch_size && op->op == GGML_OP_MUL_MAT_ID);
GGML_UNUSED(dev);
}
static const struct ggml_backend_device_i ggml_backend_kompute_device_i = {
/* .get_name = */ ggml_backend_kompute_device_get_name,
/* .get_description = */ ggml_backend_kompute_device_get_description,
/* .get_memory = */ ggml_backend_kompute_device_get_memory,
/* .get_type = */ ggml_backend_kompute_device_get_type,
/* .get_props = */ ggml_backend_kompute_device_get_props,
/* .init_backend = */ ggml_backend_kompute_device_init,
/* .get_buffer_type = */ ggml_backend_kompute_device_get_buffer_type,
/* .get_host_buffer_type = */ NULL,
/* .buffer_from_host_ptr = */ NULL,
/* .supports_op = */ ggml_backend_kompute_device_supports_op,
/* .supports_buft = */ ggml_backend_kompute_device_supports_buft,
/* .offload_op = */ ggml_backend_kompute_device_offload_op,
/* .event_new = */ NULL,
/* .event_free = */ NULL,
/* .event_synchronize = */ NULL,
};
static const char * ggml_backend_kompute_reg_get_name(ggml_backend_reg_t reg) {
GGML_UNUSED(reg);
return "Kompute";
}
static size_t ggml_backend_kompute_reg_get_device_count(ggml_backend_reg_t reg) {
GGML_UNUSED(reg);
return ggml_backend_kompute_get_device_count();
}
static ggml_backend_dev_t ggml_backend_kompute_reg_get_device(ggml_backend_reg_t reg, size_t device) {
static std::vector<ggml_backend_dev_t> devices;
static bool initialized = false;
{
static std::mutex mutex;
std::lock_guard<std::mutex> lock(mutex);
if (!initialized) {
for (size_t i = 0; i < ggml_backend_kompute_get_device_count(); i++) {
ggml_backend_kompute_device_context * ctx = new ggml_backend_kompute_device_context;
char desc[256];
ggml_backend_kompute_get_device_description(i, desc, sizeof(desc));
ctx->device = i;
ctx->name = "Kompute" + std::to_string(i);
ctx->description = desc;
devices.push_back(new ggml_backend_device {
/* .iface = */ ggml_backend_kompute_device_i,
/* .reg = */ reg,
/* .context = */ ctx,
});
}
initialized = true;
}
}
GGML_ASSERT(device < devices.size());
return devices[device];
}
static const struct ggml_backend_reg_i ggml_backend_kompute_reg_i = {
/* .get_name = */ ggml_backend_kompute_reg_get_name,
/* .get_device_count = */ ggml_backend_kompute_reg_get_device_count,
/* .get_device = */ ggml_backend_kompute_reg_get_device,
/* .get_proc_address = */ NULL,
};
ggml_backend_reg_t ggml_backend_kompute_reg() {
static ggml_backend_reg reg = {
/* .iface = */ ggml_backend_kompute_reg_i,
/* .context = */ nullptr,
};
return &reg;
}

View File

@ -22102,18 +22102,46 @@ static size_t gguf_type_size(enum gguf_type type) {
return GGUF_TYPE_SIZE[type];
}
static void gguf_tensor_info_sanitize(struct gguf_tensor_info * info) {
GGML_ASSERT(info->n_dims <= GGML_MAX_DIMS);
GGML_ASSERT(0 <= info->type && info->type < GGML_TYPE_COUNT);
static bool gguf_tensor_info_sanitize(struct gguf_tensor_info * info) {
if (info->n_dims > GGML_MAX_DIMS) {
fprintf(stderr, "%s: invalid number of dimensions (%" PRIu32 ")\n", __func__, info->n_dims);
return false;
}
if (info->type < 0 || info->type >= GGML_TYPE_COUNT) {
fprintf(stderr, "%s: invalid type (%d)\n", __func__, info->type);
return false;
}
if (strlen(info->name.data) >= GGML_MAX_NAME) {
fprintf(stderr, "%s: tensor '%s' name is too long\n", __func__, info->name.data);
return false;
}
for (uint32_t i = 0; i < info->n_dims; ++i) {
GGML_ASSERT(info->ne[i] > 0);
if (info->ne[i] <= 0) {
fprintf(stderr, "%s: invalid number of elements (%" PRIu64 ")\n", __func__, info->ne[i]);
return false;
}
}
// prevent overflow for total number of elements
GGML_ASSERT(INT64_MAX/info->ne[1] > info->ne[0]);
GGML_ASSERT(INT64_MAX/info->ne[2] > info->ne[0]*info->ne[1]);
GGML_ASSERT(INT64_MAX/info->ne[3] > info->ne[0]*info->ne[1]*info->ne[2]);
if (INT64_MAX/info->ne[1] <= info->ne[0]) {
fprintf(stderr, "%s: invalid number of elements (%" PRIu64 ")\n", __func__, info->ne[1]);
return false;
}
if (INT64_MAX/info->ne[2] <= info->ne[0]*info->ne[1]) {
fprintf(stderr, "%s: invalid number of elements (%" PRIu64 ")\n", __func__, info->ne[2]);
return false;
}
if (INT64_MAX/info->ne[3] <= info->ne[0]*info->ne[1]*info->ne[2]) {
fprintf(stderr, "%s: invalid number of elements (%" PRIu64 ")\n", __func__, info->ne[3]);
return false;
}
return true;
}
static bool gguf_fread_el(FILE * file, void * dst, size_t size, size_t * offset) {
@ -22414,8 +22442,7 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p
ok = ok && gguf_fread_el (file, &info->type, sizeof(info->type), &offset);
ok = ok && gguf_fread_el (file, &info->offset, sizeof(info->offset), &offset);
// TODO: return an error instead of crashing with GGML_ASSERT
gguf_tensor_info_sanitize(info);
ok = ok && gguf_tensor_info_sanitize(info);
// make sure there is no duplicated tensor names
for (uint64_t j = 0; j < i && ok; ++j) {

View File

@ -15,6 +15,7 @@
#define TWOPI_F 6.283185307179586f
#define QK_K 256
#define K_SCALE_SIZE 12
#define u8BufToU16(buf, idx) (((uint16_t(buf[idx + 1]) << 8)) | buf[idx])
#define u8BufToFloat16(buf, idx) uint16BitsToHalf u8BufToU16(buf, idx)
@ -64,6 +65,14 @@ mat4 dequantize_q4_1(const block_q4_1 xb, uint il) {
return reg;
}
#define sizeof_block_q4_k 144
struct block_q4_k {
float16_t d;
float16_t dmin;
uint8_t scales[K_SCALE_SIZE];
uint8_t qs[QK_K/2];
};
#define sizeof_block_q6_k 210
struct block_q6_k {
uint8_t ql[QK_K/2]; // quants, lower 4 bits

View File

@ -0,0 +1,133 @@
#version 450
#include "common.comp"
#define N_DST 4
#define SIZE_OF_BLOCK sizeof_block_q4_k
layout(local_size_x = 4) in;
layout(local_size_y = 8) in;
layout(local_size_z = 1) in;
layout (binding = 0) readonly buffer tensorInA { block_q4_k inA[]; };
layout (binding = 1) readonly buffer tensorInB { float inB[]; };
layout (binding = 2) writeonly buffer tensorOut { float out_[]; };
layout (push_constant) uniform parameter {
uint inAOff;
uint inBOff;
uint outOff;
int ne00;
int ne10;
int ne0;
int ne1;
int ne01;
int ne02;
int ne12;
int r2;
int r3;
} pcs;
void main() {
const uint16_t kmask1 = uint16_t(0x3f3f);
const uint16_t kmask2 = uint16_t(0x0f0f);
const uint16_t kmask3 = uint16_t(0xc0c0);
const uint ix = gl_SubgroupInvocationID/8; // 0...3
const uint it = gl_SubgroupInvocationID%8; // 0...7
const uint iq = it/4; // 0 or 1
const uint ir = it%4; // 0...3
const uint nb = pcs.ne00/QK_K;
const uint r0 = gl_WorkGroupID.x;
const uint r1 = gl_WorkGroupID.y;
const uint im = gl_WorkGroupID.z;
const uint first_row = r0 * N_DST;
const uint ib_row = first_row * nb;
const uint i12 = im%pcs.ne12;
const uint i13 = im/pcs.ne12;
const uint offset0 = (i12/pcs.r2)*(nb*pcs.ne01) + (i13/pcs.r3)*(nb*pcs.ne01*pcs.ne02);
const uint xblk = ib_row + offset0 + pcs.inAOff;
const uint y = r1*pcs.ne10 + im*pcs.ne00*pcs.ne1 + pcs.inBOff;
float yl[16];
float yh[16];
float sumf[N_DST] = {0.f, 0.f, 0.f, 0.f};
float all_sum = 0.f;
uint y4 = y + ix * QK_K + 64 * iq + 8 * ir;
for (uint ib = ix; ib < nb; ib += 4) {
const uint blk_idx = ib + xblk;
float sumy[4] = {0.f, 0.f, 0.f, 0.f};
for (int i = 0; i < 8; ++i) {
yl[i+0] = inB[y4+i+ 0]; sumy[0] += yl[i+0];
yl[i+8] = inB[y4+i+ 32]; sumy[1] += yl[i+8];
yh[i+0] = inB[y4+i+128]; sumy[2] += yh[i+0];
yh[i+8] = inB[y4+i+160]; sumy[3] += yh[i+8];
}
for (int row = 0; row < N_DST; row++) {
uint row_idx = row * nb;
uint16_t sc_0 = u8BufToU16(inA[blk_idx + row_idx].scales, iq * 2 + 0);
uint16_t sc_1 = u8BufToU16(inA[blk_idx + row_idx].scales, iq * 2 + 2);
uint16_t sc_2 = u8BufToU16(inA[blk_idx + row_idx].scales, iq * 2 + 4);
uint16_t sc_3 = u8BufToU16(inA[blk_idx + row_idx].scales, iq * 2 + 6);
uint16_t sc_4 = u8BufToU16(inA[blk_idx + row_idx].scales, iq * 2 + 8);
uint16_t sc16[4];
sc16[0] = sc_0 & kmask1;
sc16[1] = sc_2 & kmask1;
sc16[2] = ((sc_4 >> 0) & kmask2) | ((sc_0 & kmask3) >> 2);
sc16[3] = ((sc_4 >> 4) & kmask2) | ((sc_2 & kmask3) >> 2);
float acc1[4] = {0.f, 0.f, 0.f, 0.f};
float acc2[4] = {0.f, 0.f, 0.f, 0.f};
for (int i = 0; i < 8; i += 2) {
uint16_t q1 = u8BufToU16(inA[blk_idx + row_idx].qs, 32 * iq + 8 * ir + i);
uint16_t q2 = u8BufToU16(inA[blk_idx + row_idx].qs, 64 + 32 * iq + 8 * ir + i);
acc1[0] += yl[i+0] * (q1 & 0x000F);
acc1[1] += yl[i+1] * (q1 & 0x0F00);
acc1[2] += yl[i+8] * (q1 & 0x00F0);
acc1[3] += yl[i+9] * (q1 & 0xF000);
acc2[0] += yh[i+0] * (q2 & 0x000F);
acc2[1] += yh[i+1] * (q2 & 0x0F00);
acc2[2] += yh[i+8] * (q2 & 0x00F0);
acc2[3] += yh[i+9] * (q2 & 0xF000);
}
uint8_t sc8_0 = uint8_t(sc16[0] & 0xFF);
uint8_t sc8_1 = uint8_t(sc16[0] >> 8 );
uint8_t sc8_2 = uint8_t(sc16[1] & 0xFF);
uint8_t sc8_3 = uint8_t(sc16[1] >> 8 );
uint8_t sc8_4 = uint8_t(sc16[2] & 0xFF);
uint8_t sc8_5 = uint8_t(sc16[2] >> 8 );
uint8_t sc8_6 = uint8_t(sc16[3] & 0xFF);
uint8_t sc8_7 = uint8_t(sc16[3] >> 8 );
float dall = float(inA[blk_idx + row_idx].d);
float dmin = float(inA[blk_idx + row_idx].dmin);
sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc1[1]) * sc8_0 +
(acc1[2] + 1.f/256.f * acc1[3]) * sc8_1 * 1.f/16.f +
(acc2[0] + 1.f/256.f * acc2[1]) * sc8_4 +
(acc2[2] + 1.f/256.f * acc2[3]) * sc8_5 * 1.f/16.f) -
dmin * (sumy[0] * sc8_2 + sumy[1] * sc8_3 + sumy[2] * sc8_6 + sumy[3] * sc8_7);
}
y4 += 4 * QK_K;
}
for (int row = 0; row < N_DST; ++row) {
all_sum = subgroupAdd(sumf[row]);
if (subgroupElect()) {
out_[r1*pcs.ne0 + im*pcs.ne0*pcs.ne1 + first_row + row + pcs.outOff] = all_sum;
}
}
}

View File

@ -4273,8 +4273,11 @@ struct llama_model_loader {
llama_tensor_weight(const llama_file * file, uint16_t idx, const char * name, const struct gguf_context * gguf_ctx, ggml_tensor * tensor) : idx(idx), tensor(tensor) {
const int tensor_idx = gguf_find_tensor(gguf_ctx, name);
offs = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, tensor_idx);
if (tensor_idx < 0) {
throw std::runtime_error(format("tensor '%s' not found in the model", name));
}
offs = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, tensor_idx);
if (offs + ggml_nbytes(tensor) < offs || offs + ggml_nbytes(tensor) > file->size) {
throw std::runtime_error(format("tensor '%s' data is not within the file bounds, model is corrupted or incomplete", name));
}
@ -7426,7 +7429,7 @@ static bool llm_load_tensors(
if (flags & llama_model_loader::TENSOR_NOT_REQUIRED) {
return nullptr;
}
throw std::runtime_error(format("missing tensor %s", tn.str().c_str()));
throw std::runtime_error(format("missing tensor '%s'", tn.str().c_str()));
}
// some models use the token embedding tensor as the output, but since these are used in different layers and with different ops