mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-09-22 21:16:20 +00:00
Compare commits
8 Commits
dc329fa1d9
...
7c5aff0071
Author | SHA1 | Date | |
---|---|---|---|
|
7c5aff0071 | ||
|
8db003a19d | ||
|
0996c5597f | ||
|
5bb2c5dbd2 | ||
|
67155ab7f5 | ||
|
5af118efda | ||
|
951f1d9053 | ||
|
dc0625ab8f |
@ -941,11 +941,37 @@ struct ggml_threadpool_params ggml_threadpool_params_from_cpu_params(const cpu_p
|
||||
|
||||
#ifdef LLAMA_USE_CURL
|
||||
|
||||
#define CURL_MAX_RETRY 3
|
||||
#define CURL_RETRY_DELAY_SECONDS 2
|
||||
|
||||
|
||||
static bool starts_with(const std::string & str, const std::string & prefix) {
|
||||
// While we wait for C++20's std::string::starts_with...
|
||||
return str.rfind(prefix, 0) == 0;
|
||||
}
|
||||
|
||||
static bool curl_perform_with_retry(const std::string& url, CURL* curl, int max_attempts, int retry_delay_seconds) {
|
||||
int remaining_attempts = max_attempts;
|
||||
|
||||
while (remaining_attempts > 0) {
|
||||
fprintf(stderr, "%s: Trying to download from %s (attempt %d of %d)...\n", __func__ , url.c_str(), max_attempts - remaining_attempts + 1, max_attempts);
|
||||
|
||||
CURLcode res = curl_easy_perform(curl);
|
||||
if (res == CURLE_OK) {
|
||||
return true;
|
||||
}
|
||||
|
||||
int exponential_backoff_delay = std::pow(retry_delay_seconds, max_attempts - remaining_attempts) * 1000;
|
||||
fprintf(stderr, "%s: curl_easy_perform() failed: %s, retrying after %d milliseconds...\n", __func__, curl_easy_strerror(res), exponential_backoff_delay);
|
||||
|
||||
remaining_attempts--;
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(exponential_backoff_delay));
|
||||
}
|
||||
|
||||
fprintf(stderr, "%s: curl_easy_perform() failed after %d attempts\n", __func__, max_attempts);
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool llama_download_file(const std::string & url, const std::string & path, const std::string & hf_token) {
|
||||
|
||||
// Initialize libcurl
|
||||
@ -1049,9 +1075,8 @@ static bool llama_download_file(const std::string & url, const std::string & pat
|
||||
curl_easy_setopt(curl.get(), CURLOPT_HEADERFUNCTION, static_cast<CURLOPT_HEADERFUNCTION_PTR>(header_callback));
|
||||
curl_easy_setopt(curl.get(), CURLOPT_HEADERDATA, &headers);
|
||||
|
||||
CURLcode res = curl_easy_perform(curl.get());
|
||||
if (res != CURLE_OK) {
|
||||
fprintf(stderr, "%s: curl_easy_perform() failed: %s\n", __func__, curl_easy_strerror(res));
|
||||
bool was_perform_successful = curl_perform_with_retry(url, curl.get(), CURL_MAX_RETRY, CURL_RETRY_DELAY_SECONDS);
|
||||
if (!was_perform_successful) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1126,11 +1151,10 @@ static bool llama_download_file(const std::string & url, const std::string & pat
|
||||
};
|
||||
|
||||
// start the download
|
||||
fprintf(stderr, "%s: downloading from %s to %s (server_etag:%s, server_last_modified:%s)...\n", __func__,
|
||||
llama_download_hide_password_in_url(url).c_str(), path.c_str(), headers.etag.c_str(), headers.last_modified.c_str());
|
||||
auto res = curl_easy_perform(curl.get());
|
||||
if (res != CURLE_OK) {
|
||||
fprintf(stderr, "%s: curl_easy_perform() failed: %s\n", __func__, curl_easy_strerror(res));
|
||||
fprintf(stderr, "%s: trying to download model from %s to %s (server_etag:%s, server_last_modified:%s)...\n", __func__,
|
||||
llama_download_hide_password_in_url(url).c_str(), path.c_str(), headers.etag.c_str(), headers.last_modified.c_str());
|
||||
bool was_perform_successful = curl_perform_with_retry(url, curl.get(), CURL_MAX_RETRY, CURL_RETRY_DELAY_SECONDS);
|
||||
if (!was_perform_successful) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -31,6 +31,7 @@ import re
|
||||
import requests
|
||||
import sys
|
||||
import json
|
||||
import shutil
|
||||
|
||||
from hashlib import sha256
|
||||
from enum import IntEnum, auto
|
||||
@ -125,12 +126,27 @@ def download_model(model):
|
||||
if tokt == TOKENIZER_TYPE.UGM:
|
||||
files.append("spiece.model")
|
||||
|
||||
for file in files:
|
||||
save_path = f"models/tokenizers/{name}/{file}"
|
||||
if os.path.isfile(save_path):
|
||||
logger.info(f"{name}: File {save_path} already exists - skipping")
|
||||
continue
|
||||
download_file_with_auth(f"{repo}/resolve/main/{file}", token, save_path)
|
||||
if os.path.isdir(repo):
|
||||
# If repo is a path on the file system, copy the directory
|
||||
for file in files:
|
||||
src_path = os.path.join(repo, file)
|
||||
dst_path = f"models/tokenizers/{name}/{file}"
|
||||
if os.path.isfile(dst_path):
|
||||
logger.info(f"{name}: File {dst_path} already exists - skipping")
|
||||
continue
|
||||
if os.path.isfile(src_path):
|
||||
shutil.copy2(src_path, dst_path)
|
||||
logger.info(f"{name}: Copied {src_path} to {dst_path}")
|
||||
else:
|
||||
logger.warning(f"{name}: Source file {src_path} does not exist")
|
||||
else:
|
||||
# If repo is a URL, download the files
|
||||
for file in files:
|
||||
save_path = f"models/tokenizers/{name}/{file}"
|
||||
if os.path.isfile(save_path):
|
||||
logger.info(f"{name}: File {save_path} already exists - skipping")
|
||||
continue
|
||||
download_file_with_auth(f"{repo}/resolve/main/{file}", token, save_path)
|
||||
|
||||
|
||||
for model in models:
|
||||
|
@ -132,6 +132,8 @@ static std::string format(const char * fmt, ...) {
|
||||
#define TN_MVLM_PROJ_BLOCK "mm.model.mb_block.%d.block.%d.%s"
|
||||
#define TN_MVLM_PROJ_PEG "mm.model.peg.%d.%s"
|
||||
#define TN_IMAGE_NEWLINE "model.image_newline"
|
||||
#define TN_SUB_GN "v.sub_gn"
|
||||
#define TN_GLB_GN "v.glb_gn"
|
||||
|
||||
#define TN_MINICPMV_POS_EMBD_K "resampler.pos_embed_k"
|
||||
#define TN_MINICPMV_QUERY "resampler.query"
|
||||
@ -530,6 +532,9 @@ struct clip_vision_model {
|
||||
struct ggml_tensor * mm_model_ln_kv_b;
|
||||
struct ggml_tensor * mm_model_ln_post_w;
|
||||
struct ggml_tensor * mm_model_ln_post_b;
|
||||
|
||||
struct ggml_tensor * sub_gn;
|
||||
struct ggml_tensor * glb_gn;
|
||||
};
|
||||
|
||||
struct clip_ctx {
|
||||
@ -777,6 +782,138 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
||||
|
||||
// print_tensor_info(embeddings, "embeddings");
|
||||
|
||||
// phi-3.5-vision-instruct
|
||||
if (model.sub_gn && model.glb_gn) {
|
||||
// Phi3VisionEmbedding.hd_transform()
|
||||
ggml_tensor * x = embeddings;
|
||||
|
||||
int num_images = batch_size;
|
||||
int h_crop = 1, w_crop = 1;
|
||||
|
||||
int C = x->ne[0];
|
||||
int L = x->ne[1];
|
||||
int N = x->ne[2];
|
||||
|
||||
int H = (int)sqrt((float)L);
|
||||
|
||||
GGML_ASSERT(H * H == L);
|
||||
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
|
||||
// Phi3ImageEmbedding.reshape_hd_patches_2x2merge()
|
||||
x = ggml_reshape_4d(ctx0, x, N, H, H, C);
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 2, 1, 3));
|
||||
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 0, 1, 2));
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 2, 3, 1, 0));
|
||||
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
x = ggml_reshape_4d(ctx0, x, 2, H / 2, 2, H / 2 * C * N);
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 2, 1, 3));
|
||||
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 1, 3, 2));
|
||||
x = ggml_reshape_3d(ctx0, x, N * C * (H / 2), (H / 2), 4);
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
x = ggml_reshape_4d(ctx0, x, 4, H / 2, H / 2, N * C);
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
x = ggml_reshape_4d(ctx0, x, 4, (H / 2) * (H / 2), C, N);
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 3, 1, 2));
|
||||
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
x = ggml_reshape_4d(ctx0, x, 4 * C, H / 2, H / 2, N);
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
x = ggml_reshape_4d(ctx0, x, (H / 2) * 4 * C, (H / 2), w_crop, num_images * h_crop);
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 2, 1, 3));
|
||||
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
x = ggml_reshape_4d(ctx0, x, 4 * C, w_crop * (H / 2), h_crop * (H / 2), num_images);
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
|
||||
ggml_tensor * global_image_features_hd = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
|
||||
// Phi3ImageEmbedding.add_image_newline()
|
||||
ggml_tensor * newline_embedding = model.sub_gn;
|
||||
for (int i = 0; i < H/2-1; i++) {
|
||||
newline_embedding = ggml_concat(ctx0, newline_embedding, model.sub_gn, 2);
|
||||
}
|
||||
ggml_tensor * global_image_features_hd_newline = ggml_concat(ctx0, global_image_features_hd, newline_embedding, 1);
|
||||
|
||||
global_image_features_hd_newline = ggml_cont(ctx0, ggml_permute(ctx0, global_image_features_hd_newline, 3, 2, 1, 0));
|
||||
global_image_features_hd_newline = ggml_reshape_4d(ctx0, global_image_features_hd_newline, 1, 1, (w_crop*(H/2)+1) * h_crop*(H/2), 4*C);
|
||||
global_image_features_hd_newline = ggml_cont(ctx0, ggml_permute(ctx0, global_image_features_hd_newline, 3, 2, 1, 0));
|
||||
|
||||
h_crop = image_size / 336;
|
||||
w_crop = image_size / 336;
|
||||
|
||||
// sub_image_features_hd
|
||||
x = embeddings;
|
||||
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
|
||||
// Phi3ImageEmbedding.reshape_hd_patches_2x2merge()
|
||||
x = ggml_reshape_4d(ctx0, x, N, H, H, C);
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 2, 1, 3));
|
||||
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 0, 1, 2));
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 2, 3, 1, 0));
|
||||
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
x = ggml_reshape_4d(ctx0, x, 2, H / 2, 2, H / 2 * C * N);
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 2, 1, 3));
|
||||
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 1, 3, 2));
|
||||
x = ggml_reshape_3d(ctx0, x, N * C * (H / 2), (H / 2), 4);
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
x = ggml_reshape_4d(ctx0, x, 4, H / 2, H / 2, N * C);
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
x = ggml_reshape_4d(ctx0, x, 4, (H / 2) * (H / 2), C, N);
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 3, 1, 2));
|
||||
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
x = ggml_reshape_4d(ctx0, x, 4 * C, H / 2, H / 2, N);
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
x = ggml_reshape_4d(ctx0, x, (H / 2) * 4 * C, (H / 2), w_crop, num_images * h_crop);
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 0, 2, 1, 3));
|
||||
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
x = ggml_reshape_4d(ctx0, x, 4 * C, w_crop * (H / 2), h_crop * (H / 2), num_images);
|
||||
x = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
|
||||
ggml_tensor * sub_image_features_hd = ggml_cont(ctx0, ggml_permute(ctx0, x, 3, 2, 1, 0));
|
||||
|
||||
// Phi3ImageEmbedding.add_image_newline()
|
||||
newline_embedding = model.sub_gn;
|
||||
for (int i = 0; i < (H/2-1); i++) {
|
||||
newline_embedding = ggml_concat(ctx0, newline_embedding, model.sub_gn, 2);
|
||||
}
|
||||
ggml_tensor * sub_image_features_hd_newline = ggml_concat(ctx0, sub_image_features_hd, newline_embedding, 1);
|
||||
|
||||
sub_image_features_hd_newline = ggml_cont(ctx0, ggml_permute(ctx0, sub_image_features_hd_newline, 3, 2, 1, 0));
|
||||
sub_image_features_hd_newline = ggml_reshape_4d(ctx0, sub_image_features_hd_newline, 1, 1, (w_crop*(H/2)+1) * h_crop*(H/2), 4*C);
|
||||
sub_image_features_hd_newline = ggml_cont(ctx0, ggml_permute(ctx0, sub_image_features_hd_newline, 3, 2, 1, 0));
|
||||
|
||||
embeddings = ggml_concat(ctx0, sub_image_features_hd_newline, model.glb_gn, 1);
|
||||
embeddings = ggml_concat(ctx0, embeddings, global_image_features_hd_newline, 1);
|
||||
}
|
||||
|
||||
// llava projector
|
||||
if (ctx->proj_type == PROJECTOR_TYPE_MLP) {
|
||||
embeddings = ggml_mul_mat(ctx0, model.mm_0_w, embeddings);
|
||||
@ -1402,6 +1539,10 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||
vision_model.image_newline = get_tensor(new_clip->ctx_data, TN_IMAGE_NEWLINE);
|
||||
// LOG_TEE("%s: image_newline tensor (llava-1.6) found\n", __func__);
|
||||
} catch (std::runtime_error & /*e*/) { }
|
||||
try {
|
||||
vision_model.sub_gn = get_tensor(new_clip->ctx_data, TN_SUB_GN);
|
||||
vision_model.glb_gn = get_tensor(new_clip->ctx_data, TN_GLB_GN);
|
||||
} catch (std::runtime_error & /*e*/) { }
|
||||
} else if (new_clip->proj_type == PROJECTOR_TYPE_LDP) {
|
||||
// MobileVLM projection
|
||||
vision_model.mm_model_mlp_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 1, "weight"));
|
||||
|
@ -18,8 +18,8 @@ struct llava_context {
|
||||
};
|
||||
|
||||
static void show_additional_info(int /*argc*/, char ** argv) {
|
||||
LOG_TEE("\n example usage: %s -m <llava-v1.5-7b/ggml-model-q5_k.gguf> --mmproj <llava-v1.5-7b/mmproj-model-f16.gguf> --image <path/to/an/image.jpg> --image <path/to/another/image.jpg> [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]);
|
||||
LOG_TEE(" note: a lower temperature value like 0.1 is recommended for better quality.\n");
|
||||
LOG_TEE("\nexample usage:\n\n%s -m <llava-v1.5-7b/ggml-model-q5_k.gguf> --mmproj <llava-v1.5-7b/mmproj-model-f16.gguf> --image <path/to/an/image.jpg> --image <path/to/another/image.jpg> [--temp 0.1] [-p \"describe the image in detail.\"]\n", argv[0]);
|
||||
LOG_TEE("\nnote: a lower temperature value like 0.1 is recommended for better quality.\n");
|
||||
}
|
||||
|
||||
static void llama_log_callback_logTee(ggml_log_level level, const char * text, void * user_data) {
|
||||
@ -255,7 +255,7 @@ int main(int argc, char ** argv) {
|
||||
|
||||
gpt_params params;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON, show_additional_info)) {
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_LLAVA, show_additional_info)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,11 @@ void ggml_cuda_op_mul_mat_q(
|
||||
// nrows_dst == nrows of the matrix that the kernel writes into
|
||||
const int64_t nrows_dst = id == ctx.device ? ne0 : row_diff;
|
||||
|
||||
const mmq_args args = {src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stride00, src1_padded_row_size, src1_ncols, ne11, nrows_dst};
|
||||
// The stream-k decomposition is only faster for recent NVIDIA GPUs.
|
||||
// Also its fixup needs to allocate a temporary buffer in the memory pool.
|
||||
// There are multiple parallel CUDA streams for src1_ncols != ne11 which would introduce a race condition for this buffer.
|
||||
const bool use_stream_k = compute_capability >= CC_VOLTA && compute_capability < CC_OFFSET_AMD && src1_ncols == ne11;
|
||||
const mmq_args args = {src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, stride00, src1_padded_row_size, src1_ncols, ne11, nrows_dst, use_stream_k};
|
||||
|
||||
switch (src0->type) {
|
||||
case GGML_TYPE_Q4_0:
|
||||
|
@ -2742,6 +2742,7 @@ struct mmq_args {
|
||||
int64_t ne00; int64_t ne01; int64_t stride01;
|
||||
int64_t ne10; int64_t ne11; int64_t stride11;
|
||||
int64_t ne0;
|
||||
bool use_stream_k;
|
||||
};
|
||||
|
||||
template<ggml_type type>
|
||||
@ -2777,8 +2778,7 @@ static void launch_mul_mat_q(ggml_backend_cuda_context & ctx, const mmq_args & a
|
||||
const int ntx = (args.ne11 + mmq_x - 1) / mmq_x;
|
||||
const dim3 block_nums_xy_tiling(nty, ntx, 1);
|
||||
|
||||
const bool use_stream_k = cc >= CC_VOLTA && cc < CC_OFFSET_AMD;
|
||||
if (!use_stream_k) {
|
||||
if (!args.use_stream_k) {
|
||||
if (args.ne01 % mmq_y == 0) {
|
||||
constexpr bool need_check = false;
|
||||
mul_mat_q<type, mmq_x, MMQ_NWARPS, need_check><<<block_nums_xy_tiling, block_dims, shmem, stream>>>
|
||||
|
Loading…
Reference in New Issue
Block a user