mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-11 21:39:52 +00:00
remove candidates_decoded
This commit is contained in:
parent
eb9d1fcd7d
commit
3773328080
@ -7356,8 +7356,6 @@ void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * c
|
|||||||
|
|
||||||
const llama_token eos = llama_token_eos(&ctx->model);
|
const llama_token eos = llama_token_eos(&ctx->model);
|
||||||
|
|
||||||
std::vector<std::pair<std::vector<uint32_t>, llama_partial_utf8>> candidates_decoded;
|
|
||||||
candidates_decoded.reserve(candidates->size);
|
|
||||||
std::vector<llama_grammar_candidate> candidates_grammar;
|
std::vector<llama_grammar_candidate> candidates_grammar;
|
||||||
candidates_grammar.reserve(candidates->size);
|
candidates_grammar.reserve(candidates->size);
|
||||||
|
|
||||||
@ -7371,8 +7369,8 @@ void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * c
|
|||||||
} else if (piece.empty() || piece[0] == 0) {
|
} else if (piece.empty() || piece[0] == 0) {
|
||||||
candidates->data[i].logit = -INFINITY;
|
candidates->data[i].logit = -INFINITY;
|
||||||
} else {
|
} else {
|
||||||
candidates_decoded.push_back(decode_utf8(piece, grammar->partial_utf8));
|
std::pair<std::vector<uint32_t>, llama_partial_utf8> decoded = decode_utf8(piece, grammar->partial_utf8);
|
||||||
candidates_grammar.push_back({ i, candidates_decoded.back().first.data(), candidates_decoded.back().second });
|
candidates_grammar.push_back({ i, decoded.first.data(), decoded.second });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user