mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-10 18:51:45 +00:00
Moving Block_release to the deallocation code
This commit is contained in:
parent
7403c05c06
commit
5e6358398c
@ -239,8 +239,6 @@ struct ggml_backend_metal_context {
|
||||
struct ggml_cgraph * gf;
|
||||
|
||||
// the callback given to the thread pool
|
||||
// TODO: ideally, this should be created once, utilizing the command buffer state above
|
||||
// for some reason, doing it like this leads to a crash
|
||||
void (^encode_async)(size_t ith);
|
||||
|
||||
// n_cb command buffers + 1 used by the main thread
|
||||
@ -438,7 +436,6 @@ static struct ggml_backend_metal_context * ggml_metal_init(void) {
|
||||
ctx->capture_scope = nil;
|
||||
|
||||
ctx->gf = nil;
|
||||
Block_release(ctx->encode_async);
|
||||
ctx->encode_async = nil;
|
||||
for (int i = 0; i < GGML_METAL_MAX_COMMAND_BUFFERS; ++i) {
|
||||
ctx->command_buffers[i] = nil;
|
||||
@ -684,6 +681,8 @@ static void ggml_metal_free(struct ggml_backend_metal_context * ctx) {
|
||||
[ctx->kernels[i].pipeline release];
|
||||
}
|
||||
|
||||
Block_release(ctx->encode_async);
|
||||
|
||||
[ctx->queue release];
|
||||
[ctx->device release];
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user