mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-15 07:19:53 +00:00
2948768e25
ggml-ci
38 lines
996 B
C++
38 lines
996 B
C++
#include "log.h"
|
|
|
|
#include <cstdlib>
|
|
#include <thread>
|
|
|
|
int main() {
|
|
const int n_thread = 8;
|
|
const int n_msg = 1000;
|
|
|
|
std::thread threads[n_thread];
|
|
for (int i = 0; i < n_thread; i++) {
|
|
threads[i] = std::thread([i, n_msg]() {
|
|
for (int j = 0; j < n_msg; j++) {
|
|
const int log_type = std::rand() % 4;
|
|
|
|
switch (log_type) {
|
|
case 0: LOG_INF("Thread %d: %d\n", i, j); break;
|
|
case 1: LOG_WRN("Thread %d: %d\n", i, j); break;
|
|
case 2: LOG_ERR("Thread %d: %d\n", i, j); break;
|
|
case 3: LOG_DBG("Thread %d: %d\n", i, j); break;
|
|
default:
|
|
break;
|
|
}
|
|
|
|
if (rand () % 10 < 5) {
|
|
gpt_log_set_timestamps(gpt_log_main(), rand() % 2);
|
|
}
|
|
}
|
|
});
|
|
}
|
|
|
|
for (int i = 0; i < n_thread; i++) {
|
|
threads[i].join();
|
|
}
|
|
|
|
return 0;
|
|
}
|