mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-28 12:24:35 +00:00
13 lines
250 B
C++
13 lines
250 B
C++
|
#include "ggml-threading.h"
|
||
|
#include <mutex>
|
||
|
|
||
|
std::mutex ggml_critical_section_mutex;
|
||
|
|
||
|
void ggml_critical_section_start() {
|
||
|
ggml_critical_section_mutex.lock();
|
||
|
}
|
||
|
|
||
|
void ggml_critical_section_end(void) {
|
||
|
ggml_critical_section_mutex.unlock();
|
||
|
}
|