mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-11 21:39:52 +00:00
use %ld instead of %lld
This commit is contained in:
parent
6e27406352
commit
ea753ede90
@ -116,8 +116,8 @@ int main(int argc, char ** argv) {
|
|||||||
for (const auto & prompt : prompts) {
|
for (const auto & prompt : prompts) {
|
||||||
auto inp = ::llama_tokenize(ctx, prompt, true, false);
|
auto inp = ::llama_tokenize(ctx, prompt, true, false);
|
||||||
if (inp.size() > n_batch) {
|
if (inp.size() > n_batch) {
|
||||||
fprintf(stderr, "%s: error: number of tokens in input line (%lld) exceeds batch size (%lld), increase batch size and re-run\n",
|
fprintf(stderr, "%s: error: number of tokens in input line (%ld) exceeds batch size (%ld), increase batch size and re-run\n",
|
||||||
__func__, (long long int) inp.size(), (long long int) n_batch);
|
__func__, inp.size(), n_batch);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
inputs.push_back(inp);
|
inputs.push_back(inp);
|
||||||
|
Loading…
Reference in New Issue
Block a user