Skip to content

Commit

Permalink
llama : disable MPI for now
Browse files Browse the repository at this point in the history
ggml-ci
  • Loading branch information
ggerganov committed Sep 20, 2023
1 parent e04dc51 commit 5420696
Showing 1 changed file with 6 additions and 3 deletions.
9 changes: 6 additions & 3 deletions llama.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4072,7 +4072,8 @@ static int llama_decode_internal(

#ifdef GGML_USE_MPI
// TODO: needs fix after #3228
ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads);
GGML_ASSERT(false && "not implemented");
//ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads);
#endif

GGML_ASSERT(n_threads > 0);
Expand Down Expand Up @@ -6846,8 +6847,10 @@ struct llama_context * llama_new_context_with_model(

if (ggml_mpi_rank(ctx->ctx_mpi) > 0) {
// Enter a blocking eval loop with dummy input, letting rank=0 drive the process
const std::vector<llama_token> tmp(ctx->model.hparams.n_ctx, llama_token_bos(ctx));
while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {};
// TODO: needs fix after #3228
GGML_ASSERT(false && "not implemented");
//const std::vector<llama_token> tmp(ctx->model.hparams.n_ctx, llama_token_bos(ctx));
//while (!llama_eval(ctx, tmp.data(), tmp.size(), 0, 0)) {};
llama_backend_free();
exit(1);
}
Expand Down

0 comments on commit 5420696

Please sign in to comment.