mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-27 03:44:35 +00:00
perplexity : fix MSVC build after #5020
This commit is contained in:
parent
381ee19572
commit
e15c61635f
@ -469,7 +469,7 @@ static void compute_logprobs(const float * batch_logits, int n_vocab, std::vecto
|
||||
size_t max_threads = std::min((eval_pairs.size() + k_token_chunk - 1)/k_token_chunk, workers.size());
|
||||
|
||||
std::atomic<int> counter(0);
|
||||
auto compute = [&counter, &eval_pairs, &eval_results, batch_logits, n_vocab] () {
|
||||
auto compute = [&counter, &eval_pairs, &eval_results, batch_logits, n_vocab, k_token_chunk] () {
|
||||
float local_logprobs[k_token_chunk];
|
||||
while (true) {
|
||||
size_t first = counter.fetch_add(k_token_chunk, std::memory_order_relaxed);
|
||||
|
Loading…
Reference in New Issue
Block a user