llama : fix MPI build

ggml-ci
This commit is contained in:
Georgi Gerganov 2023-08-18 17:34:27 +03:00
parent 5d2656d670
commit a4ad2bf35c
No known key found for this signature in database
GPG Key ID: 449E073F9DC10735

View File

@ -4419,7 +4419,7 @@ struct llama_context * llama_new_context_with_model(
if (ggml_mpi_rank(ctx->ctx_mpi) > 0) { if (ggml_mpi_rank(ctx->ctx_mpi) > 0) {
// Enter a blocking eval loop with dummy input, letting rank=0 drive the process // Enter a blocking eval loop with dummy input, letting rank=0 drive the process
const std::vector<llama_token> tmp(ctx->model.hparams.n_ctx, llama_token_bos()); const std::vector<llama_token> tmp(ctx->model.hparams.n_ctx, llama_token_bos(ctx));
while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {}; while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {};
llama_backend_free(); llama_backend_free();
exit(1); exit(1);