mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2025-01-12 03:31:46 +00:00
server: free sampling contexts on exit (#7264)
* server: free sampling contexts on exit This cleans up last leak found by the address sanitizer. * fix whitespace * fix whitespace
This commit is contained in:
parent
1265c670fd
commit
4f0263633b
@ -671,6 +671,13 @@ struct server_context {
|
|||||||
model = nullptr;
|
model = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Clear any sampling context
|
||||||
|
for (server_slot & slot : slots) {
|
||||||
|
if (slot.ctx_sampling != nullptr) {
|
||||||
|
llama_sampling_free(slot.ctx_sampling);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
llama_batch_free(batch);
|
llama_batch_free(batch);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user