mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-28 12:24:35 +00:00
llama : fix MPI build
ggml-ci
This commit is contained in:
parent
5d2656d670
commit
a4ad2bf35c
@ -4419,7 +4419,7 @@ struct llama_context * llama_new_context_with_model(
|
|||||||
|
|
||||||
if (ggml_mpi_rank(ctx->ctx_mpi) > 0) {
|
if (ggml_mpi_rank(ctx->ctx_mpi) > 0) {
|
||||||
// Enter a blocking eval loop with dummy input, letting rank=0 drive the process
|
// Enter a blocking eval loop with dummy input, letting rank=0 drive the process
|
||||||
const std::vector<llama_token> tmp(ctx->model.hparams.n_ctx, llama_token_bos());
|
const std::vector<llama_token> tmp(ctx->model.hparams.n_ctx, llama_token_bos(ctx));
|
||||||
while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {};
|
while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {};
|
||||||
llama_backend_free();
|
llama_backend_free();
|
||||||
exit(1);
|
exit(1);
|
||||||
|
Loading…
Reference in New Issue
Block a user