From 344f9126cc0d15891fde9472fe40b8572628ad7d Mon Sep 17 00:00:00 2001 From: slaren Date: Wed, 15 May 2024 15:08:48 +0200 Subject: [PATCH] ggml : tag ggml_tensor::backend as deprecated (#7290) --- examples/llava/llava.cpp | 15 --------------- ggml-backend.c | 1 - ggml.c | 10 ++++++++++ ggml.h | 3 ++- 4 files changed, 12 insertions(+), 17 deletions(-) diff --git a/examples/llava/llava.cpp b/examples/llava/llava.cpp index 9a990bb18..63878d176 100644 --- a/examples/llava/llava.cpp +++ b/examples/llava/llava.cpp @@ -88,7 +88,6 @@ static struct clip_image_grid_shape get_anyres_image_grid_shape(const std::pair< // Take the image segments in a grid configuration and return the embeddings and the number of embeddings into preallocated memory (image_embd_out) static bool clip_llava_handle_patches(clip_ctx * ctx_clip, std::vector & image_embd_v, struct clip_image_grid_shape grid_shape, float * image_embd_out, int * n_img_pos_out) { struct { - struct ggml_tensor * newline; struct ggml_context * ctx; } model; @@ -150,20 +149,6 @@ static bool clip_llava_handle_patches(clip_ctx * ctx_clip, std::vector model.ctx = ggml_init(params); - ggml_tensor * newline_tmp = clip_get_newline_tensor(ctx_clip); - model.newline = ggml_new_tensor_1d(model.ctx, GGML_TYPE_F32, newline_tmp->ne[0]); - if (newline_tmp->backend != GGML_BACKEND_TYPE_CPU) { - if (newline_tmp->buffer == NULL) { - LOG_TEE("newline_tmp tensor buffer is NULL\n"); - } - ggml_backend_tensor_get(newline_tmp, model.newline->data, 0, ggml_nbytes(newline_tmp)); - } else { - model.newline->data = newline_tmp->data; - if (model.newline->data == NULL) { - LOG_TEE("newline_tmp tensor data is NULL\n"); - } - } - struct ggml_tensor * image_features = ggml_new_tensor_3d(model.ctx, GGML_TYPE_F32, clip_n_mmproj_embd(ctx_clip), clip_n_patches(ctx_clip), num_images - 1); // example: 4096 x 576 x 4 // ggml_tensor_printf(image_features,"image_features",__LINE__,false,false); // fill it with the image embeddings, ignoring the base diff --git a/ggml-backend.c b/ggml-backend.c index dd090a583..9e35ce98d 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -1895,7 +1895,6 @@ void ggml_backend_view_init(ggml_backend_buffer_t buffer, struct ggml_tensor * t tensor->buffer = buffer; tensor->data = (char *)tensor->view_src->data + tensor->view_offs; - tensor->backend = tensor->view_src->backend; ggml_backend_buffer_init_tensor(buffer, tensor); } diff --git a/ggml.c b/ggml.c index f09cc3060..67e17a210 100644 --- a/ggml.c +++ b/ggml.c @@ -3178,6 +3178,12 @@ static struct ggml_tensor * ggml_new_tensor_impl( struct ggml_tensor * const result = (struct ggml_tensor *)((char *)ctx->mem_buffer + obj_new->offs); +#ifdef __clang__ + // temporary until ggml_tensor::backend is removed + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wdeprecated-declarations" +#endif + *result = (struct ggml_tensor) { /*.type =*/ type, /*.backend =*/ GGML_BACKEND_TYPE_CPU, @@ -3200,6 +3206,10 @@ static struct ggml_tensor * ggml_new_tensor_impl( /*.padding =*/ { 0 }, }; +#ifdef __clang__ + #pragma clang diagnostic pop +#endif + // TODO: this should not be needed as long as we don't rely on aligned SIMD loads //ggml_assert_aligned(result->data); diff --git a/ggml.h b/ggml.h index 5e121604a..8c13f4ba8 100644 --- a/ggml.h +++ b/ggml.h @@ -565,7 +565,8 @@ extern "C" { // n-dimensional tensor struct ggml_tensor { enum ggml_type type; - enum ggml_backend_type backend; + + GGML_DEPRECATED(enum ggml_backend_type backend, "use the buffer type to find the storage location of the tensor"); struct ggml_backend_buffer * buffer;