mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 02:44:36 +00:00
llama : remove all_pos_0, all_pos_1, all_seq_id from llama_batch (#9745)
Some checks failed
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/full-cuda.Dockerfile platforms:linux/amd64 tag:full-cuda]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/full-musa.Dockerfile platforms:linux/amd64 tag:full-musa]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/full.Dockerfile platforms:linux/amd64,linux/arm64 tag:full]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli-cuda.Dockerfile platforms:linux/amd64 tag:light-cuda]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli-intel.Dockerfile platforms:linux/amd64 tag:light-intel]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli-musa.Dockerfile platforms:linux/amd64 tag:light-musa]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli.Dockerfile platforms:linux/amd64,linux/arm64 tag:light]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server-cuda.Dockerfile platforms:linux/amd64 tag:server-cuda]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server-intel.Dockerfile platforms:linux/amd64 tag:server-intel]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server-musa.Dockerfile platforms:linux/amd64 tag:server-musa]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server.Dockerfile platforms:linux/amd64,linux/arm64 tag:server]) (push) Has been cancelled
Nix CI / nix-eval (macos-latest) (push) Has been cancelled
Nix CI / nix-eval (ubuntu-latest) (push) Has been cancelled
Nix CI / nix-build (macos-latest) (push) Has been cancelled
Nix CI / nix-build (ubuntu-latest) (push) Has been cancelled
flake8 Lint / Lint (push) Has been cancelled
update-flake-lock / lockfile (push) Has been cancelled
Some checks failed
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/full-cuda.Dockerfile platforms:linux/amd64 tag:full-cuda]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/full-musa.Dockerfile platforms:linux/amd64 tag:full-musa]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/full.Dockerfile platforms:linux/amd64,linux/arm64 tag:full]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli-cuda.Dockerfile platforms:linux/amd64 tag:light-cuda]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli-intel.Dockerfile platforms:linux/amd64 tag:light-intel]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli-musa.Dockerfile platforms:linux/amd64 tag:light-musa]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-cli.Dockerfile platforms:linux/amd64,linux/arm64 tag:light]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server-cuda.Dockerfile platforms:linux/amd64 tag:server-cuda]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server-intel.Dockerfile platforms:linux/amd64 tag:server-intel]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server-musa.Dockerfile platforms:linux/amd64 tag:server-musa]) (push) Has been cancelled
Publish Docker image / Push Docker image to Docker Hub (map[dockerfile:.devops/llama-server.Dockerfile platforms:linux/amd64,linux/arm64 tag:server]) (push) Has been cancelled
Nix CI / nix-eval (macos-latest) (push) Has been cancelled
Nix CI / nix-eval (ubuntu-latest) (push) Has been cancelled
Nix CI / nix-build (macos-latest) (push) Has been cancelled
Nix CI / nix-build (ubuntu-latest) (push) Has been cancelled
flake8 Lint / Lint (push) Has been cancelled
update-flake-lock / lockfile (push) Has been cancelled
* refactor llama_batch_get_one * adapt all examples * fix simple.cpp * fix llama_bench * fix * fix context shifting * free batch before return * use common_batch_add, reuse llama_batch in loop * null terminated seq_id list * fix save-load-state example * fix perplexity * correct token pos in llama_batch_allocr
This commit is contained in:
parent
afd9909a64
commit
cda0e4b648
@ -955,7 +955,7 @@ struct common_init_result common_init_from_params(common_params & params) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (llama_model_has_encoder(model)) {
|
if (llama_model_has_encoder(model)) {
|
||||||
llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size(), 0, 0));
|
llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size()));
|
||||||
llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
|
llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
|
||||||
if (decoder_start_token_id == -1) {
|
if (decoder_start_token_id == -1) {
|
||||||
decoder_start_token_id = bos;
|
decoder_start_token_id = bos;
|
||||||
@ -964,7 +964,7 @@ struct common_init_result common_init_from_params(common_params & params) {
|
|||||||
tmp.push_back(decoder_start_token_id);
|
tmp.push_back(decoder_start_token_id);
|
||||||
}
|
}
|
||||||
if (llama_model_has_decoder(model)) {
|
if (llama_model_has_decoder(model)) {
|
||||||
llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch), 0, 0));
|
llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch)));
|
||||||
}
|
}
|
||||||
llama_kv_cache_clear(lctx);
|
llama_kv_cache_clear(lctx);
|
||||||
llama_synchronize(lctx);
|
llama_synchronize(lctx);
|
||||||
|
@ -74,7 +74,6 @@ int main(int argc, char ** argv) {
|
|||||||
batch.n_seq_id + i,
|
batch.n_seq_id + i,
|
||||||
batch.seq_id + i,
|
batch.seq_id + i,
|
||||||
batch.logits + i,
|
batch.logits + i,
|
||||||
0, 0, 0, // unused
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const int ret = llama_decode(ctx, batch_view);
|
const int ret = llama_decode(ctx, batch_view);
|
||||||
|
@ -339,7 +339,7 @@ static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) {
|
|||||||
|
|
||||||
static bool get_hidden_layers(llama_context * ctx, std::vector<llama_token> & tokens) {
|
static bool get_hidden_layers(llama_context * ctx, std::vector<llama_token> & tokens) {
|
||||||
llama_kv_cache_clear(ctx);
|
llama_kv_cache_clear(ctx);
|
||||||
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), 0, 0))) {
|
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size()))) {
|
||||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -131,7 +131,7 @@ static bool run(llama_context * ctx, const common_params & params) {
|
|||||||
|
|
||||||
std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, add_bos);
|
std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, add_bos);
|
||||||
|
|
||||||
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), 0, 0))) {
|
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size()))) {
|
||||||
LOG_ERR("%s : failed to eval\n", __func__);
|
LOG_ERR("%s : failed to eval\n", __func__);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -496,6 +496,8 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) {
|
|||||||
// clear the KV cache
|
// clear the KV cache
|
||||||
llama_kv_cache_clear(ctx);
|
llama_kv_cache_clear(ctx);
|
||||||
|
|
||||||
|
llama_batch batch = llama_batch_init(n_batch, 0, 1);
|
||||||
|
|
||||||
for (int j = 0; j < num_batches; ++j) {
|
for (int j = 0; j < num_batches; ++j) {
|
||||||
const int batch_start = start + j * n_batch;
|
const int batch_start = start + j * n_batch;
|
||||||
const int batch_size = std::min(end - batch_start, n_batch);
|
const int batch_size = std::min(end - batch_start, n_batch);
|
||||||
@ -508,9 +510,14 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) {
|
|||||||
tokens[batch_start] = llama_token_bos(llama_get_model(ctx));
|
tokens[batch_start] = llama_token_bos(llama_get_model(ctx));
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: use batch.logits to save computations instead of relying on logits_all == true
|
common_batch_clear(batch);
|
||||||
if (llama_decode(ctx, llama_batch_get_one(tokens.data() + batch_start, batch_size, j * n_batch, 0))) {
|
for (int i = 0; i < batch_size; i++) {
|
||||||
|
common_batch_add(batch, tokens[batch_start + i], j*n_batch + i, {0}, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (llama_decode(ctx, batch)) {
|
||||||
LOG_ERR("%s : failed to eval\n", __func__);
|
LOG_ERR("%s : failed to eval\n", __func__);
|
||||||
|
llama_batch_free(batch);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -523,6 +530,8 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
llama_batch_free(batch);
|
||||||
|
|
||||||
const auto t_end = std::chrono::high_resolution_clock::now();
|
const auto t_end = std::chrono::high_resolution_clock::now();
|
||||||
|
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
|
@ -396,7 +396,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
LOG_DBG("eval: %s\n", string_from(ctx, embd).c_str());
|
LOG_DBG("eval: %s\n", string_from(ctx, embd).c_str());
|
||||||
|
|
||||||
if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval, n_past, 0))) {
|
if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval))) {
|
||||||
LOG_ERR("%s : failed to eval\n", __func__);
|
LOG_ERR("%s : failed to eval\n", __func__);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -1428,7 +1428,7 @@ struct sql_printer : public printer {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_batch, int n_threads) {
|
static void test_prompt(llama_context * ctx, int n_prompt, int n_batch, int n_threads) {
|
||||||
llama_set_n_threads(ctx, n_threads, n_threads);
|
llama_set_n_threads(ctx, n_threads, n_threads);
|
||||||
|
|
||||||
const llama_model * model = llama_get_model(ctx);
|
const llama_model * model = llama_get_model(ctx);
|
||||||
@ -1444,14 +1444,14 @@ static void test_prompt(llama_context * ctx, int n_prompt, int n_past, int n_bat
|
|||||||
for (int i = 1; i < n_tokens; i++) {
|
for (int i = 1; i < n_tokens; i++) {
|
||||||
tokens[i] = std::rand() % n_vocab;
|
tokens[i] = std::rand() % n_vocab;
|
||||||
}
|
}
|
||||||
llama_decode(ctx, llama_batch_get_one(tokens.data(), n_tokens, n_past + n_processed, 0));
|
llama_decode(ctx, llama_batch_get_one(tokens.data(), n_tokens));
|
||||||
n_processed += n_tokens;
|
n_processed += n_tokens;
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_synchronize(ctx);
|
llama_synchronize(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void test_gen(llama_context * ctx, int n_gen, int n_past, int n_threads) {
|
static void test_gen(llama_context * ctx, int n_gen, int n_threads) {
|
||||||
llama_set_n_threads(ctx, n_threads, n_threads);
|
llama_set_n_threads(ctx, n_threads, n_threads);
|
||||||
|
|
||||||
const llama_model * model = llama_get_model(ctx);
|
const llama_model * model = llama_get_model(ctx);
|
||||||
@ -1460,7 +1460,7 @@ static void test_gen(llama_context * ctx, int n_gen, int n_past, int n_threads)
|
|||||||
llama_token token = llama_add_bos_token(model) ? llama_token_bos(model) : std::rand() % n_vocab;
|
llama_token token = llama_add_bos_token(model) ? llama_token_bos(model) : std::rand() % n_vocab;
|
||||||
|
|
||||||
for (int i = 0; i < n_gen; i++) {
|
for (int i = 0; i < n_gen; i++) {
|
||||||
llama_decode(ctx, llama_batch_get_one(&token, 1, n_past + i, 0));
|
llama_decode(ctx, llama_batch_get_one(&token, 1));
|
||||||
llama_synchronize(ctx);
|
llama_synchronize(ctx);
|
||||||
token = std::rand() % n_vocab;
|
token = std::rand() % n_vocab;
|
||||||
}
|
}
|
||||||
@ -1596,13 +1596,13 @@ int main(int argc, char ** argv) {
|
|||||||
fprintf(stderr, "llama-bench: benchmark %d/%ld: warmup prompt run\n", params_idx, params_count);
|
fprintf(stderr, "llama-bench: benchmark %d/%ld: warmup prompt run\n", params_idx, params_count);
|
||||||
}
|
}
|
||||||
//test_prompt(ctx, std::min(t.n_batch, std::min(t.n_prompt, 32)), 0, t.n_batch, t.n_threads);
|
//test_prompt(ctx, std::min(t.n_batch, std::min(t.n_prompt, 32)), 0, t.n_batch, t.n_threads);
|
||||||
test_prompt(ctx, t.n_prompt, 0, t.n_batch, t.n_threads);
|
test_prompt(ctx, t.n_prompt, t.n_batch, t.n_threads);
|
||||||
}
|
}
|
||||||
if (t.n_gen > 0) {
|
if (t.n_gen > 0) {
|
||||||
if (params.progress) {
|
if (params.progress) {
|
||||||
fprintf(stderr, "llama-bench: benchmark %d/%ld: warmup generation run\n", params_idx, params_count);
|
fprintf(stderr, "llama-bench: benchmark %d/%ld: warmup generation run\n", params_idx, params_count);
|
||||||
}
|
}
|
||||||
test_gen(ctx, 1, 0, t.n_threads);
|
test_gen(ctx, 1, t.n_threads);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < params.reps; i++) {
|
for (int i = 0; i < params.reps; i++) {
|
||||||
@ -1614,13 +1614,13 @@ int main(int argc, char ** argv) {
|
|||||||
if (params.progress) {
|
if (params.progress) {
|
||||||
fprintf(stderr, "llama-bench: benchmark %d/%ld: prompt run %d/%d\n", params_idx, params_count, i + 1, params.reps);
|
fprintf(stderr, "llama-bench: benchmark %d/%ld: prompt run %d/%d\n", params_idx, params_count, i + 1, params.reps);
|
||||||
}
|
}
|
||||||
test_prompt(ctx, t.n_prompt, 0, t.n_batch, t.n_threads);
|
test_prompt(ctx, t.n_prompt, t.n_batch, t.n_threads);
|
||||||
}
|
}
|
||||||
if (t.n_gen > 0) {
|
if (t.n_gen > 0) {
|
||||||
if (params.progress) {
|
if (params.progress) {
|
||||||
fprintf(stderr, "llama-bench: benchmark %d/%ld: generation run %d/%d\n", params_idx, params_count, i + 1, params.reps);
|
fprintf(stderr, "llama-bench: benchmark %d/%ld: generation run %d/%d\n", params_idx, params_count, i + 1, params.reps);
|
||||||
}
|
}
|
||||||
test_gen(ctx, t.n_gen, t.n_prompt, t.n_threads);
|
test_gen(ctx, t.n_gen, t.n_threads);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t t_ns = get_time_ns() - t_start;
|
uint64_t t_ns = get_time_ns() - t_start;
|
||||||
|
@ -283,9 +283,6 @@ Java_android_llama_cpp_LLamaAndroid_new_1batch(JNIEnv *, jobject, jint n_tokens,
|
|||||||
nullptr,
|
nullptr,
|
||||||
nullptr,
|
nullptr,
|
||||||
nullptr,
|
nullptr,
|
||||||
0,
|
|
||||||
0,
|
|
||||||
0,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if (embd) {
|
if (embd) {
|
||||||
|
@ -20,7 +20,7 @@ static bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_toke
|
|||||||
if (n_eval > n_batch) {
|
if (n_eval > n_batch) {
|
||||||
n_eval = n_batch;
|
n_eval = n_batch;
|
||||||
}
|
}
|
||||||
if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) {
|
if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval))) {
|
||||||
LOG_ERR("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past);
|
LOG_ERR("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -401,6 +401,39 @@ bool llava_image_embed_make_with_clip_img(clip_ctx * ctx_clip, int n_threads, co
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct llava_embd_batch {
|
||||||
|
std::vector<llama_pos> pos;
|
||||||
|
std::vector<int32_t> n_seq_id;
|
||||||
|
std::vector<llama_seq_id> seq_id_0;
|
||||||
|
std::vector<llama_seq_id *> seq_ids;
|
||||||
|
std::vector<int8_t> logits;
|
||||||
|
llama_batch batch;
|
||||||
|
llava_embd_batch(float * embd, int32_t n_tokens, llama_pos pos_0, llama_seq_id seq_id) {
|
||||||
|
pos .resize(n_tokens);
|
||||||
|
n_seq_id.resize(n_tokens);
|
||||||
|
seq_ids .resize(n_tokens + 1);
|
||||||
|
logits .resize(n_tokens);
|
||||||
|
seq_id_0.resize(1);
|
||||||
|
seq_id_0[0] = seq_id;
|
||||||
|
seq_ids [n_tokens] = nullptr;
|
||||||
|
batch = {
|
||||||
|
/*n_tokens =*/ n_tokens,
|
||||||
|
/*tokens =*/ nullptr,
|
||||||
|
/*embd =*/ embd,
|
||||||
|
/*pos =*/ pos.data(),
|
||||||
|
/*n_seq_id =*/ n_seq_id.data(),
|
||||||
|
/*seq_id =*/ seq_ids.data(),
|
||||||
|
/*logits =*/ logits.data(),
|
||||||
|
};
|
||||||
|
for (int i = 0; i < n_tokens; i++) {
|
||||||
|
batch.pos [i] = pos_0 + i;
|
||||||
|
batch.n_seq_id[i] = 1;
|
||||||
|
batch.seq_id [i] = seq_id_0.data();
|
||||||
|
batch.logits [i] = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
bool llava_eval_image_embed(llama_context * ctx_llama, const struct llava_image_embed * image_embed, int n_batch, int * n_past) {
|
bool llava_eval_image_embed(llama_context * ctx_llama, const struct llava_image_embed * image_embed, int n_batch, int * n_past) {
|
||||||
int n_embd = llama_n_embd(llama_get_model(ctx_llama));
|
int n_embd = llama_n_embd(llama_get_model(ctx_llama));
|
||||||
|
|
||||||
@ -409,8 +442,9 @@ bool llava_eval_image_embed(llama_context * ctx_llama, const struct llava_image_
|
|||||||
if (n_eval > n_batch) {
|
if (n_eval > n_batch) {
|
||||||
n_eval = n_batch;
|
n_eval = n_batch;
|
||||||
}
|
}
|
||||||
llama_batch batch = {int32_t(n_eval), nullptr, (image_embed->embed+i*n_embd), nullptr, nullptr, nullptr, nullptr, *n_past, 1, 0, };
|
float * embd = image_embed->embed+i*n_embd;
|
||||||
if (llama_decode(ctx_llama, batch)) {
|
llava_embd_batch llava_batch = llava_embd_batch(embd, n_eval, *n_past, 0);
|
||||||
|
if (llama_decode(ctx_llama, llava_batch.batch)) {
|
||||||
LOG_ERR("%s : failed to eval\n", __func__);
|
LOG_ERR("%s : failed to eval\n", __func__);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -97,7 +97,7 @@ static bool eval_tokens(struct llama_context * ctx_llama, std::vector<llama_toke
|
|||||||
if (n_eval > n_batch) {
|
if (n_eval > n_batch) {
|
||||||
n_eval = n_batch;
|
n_eval = n_batch;
|
||||||
}
|
}
|
||||||
if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval, *n_past, 0))) {
|
if (llama_decode(ctx_llama, llama_batch_get_one(&tokens[i], n_eval))) {
|
||||||
LOG_ERR("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past);
|
LOG_ERR("%s : failed to eval. token %d/%d (batch size %d, n_past %d)\n", __func__, i, N, n_batch, *n_past);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -89,8 +89,8 @@ int main(int argc, char ** argv) {
|
|||||||
const auto t_enc_start = ggml_time_us();
|
const auto t_enc_start = ggml_time_us();
|
||||||
|
|
||||||
// eval the prompt
|
// eval the prompt
|
||||||
llama_decode(ctx, llama_batch_get_one( inp.data(), n_input - 1, 0, 0));
|
llama_decode(ctx, llama_batch_get_one( inp.data(), n_input - 1));
|
||||||
llama_decode(ctx, llama_batch_get_one(&inp.back(), 1, n_input - 1, 0));
|
llama_decode(ctx, llama_batch_get_one(&inp.back(), 1));
|
||||||
|
|
||||||
for (int s = 1; s < W + G + 1; ++s) {
|
for (int s = 1; s < W + G + 1; ++s) {
|
||||||
llama_kv_cache_seq_cp(ctx, 0, s, -1, -1);
|
llama_kv_cache_seq_cp(ctx, 0, s, -1, -1);
|
||||||
|
@ -89,8 +89,8 @@ int main(int argc, char ** argv){
|
|||||||
|
|
||||||
const auto t_enc_start = ggml_time_us();
|
const auto t_enc_start = ggml_time_us();
|
||||||
|
|
||||||
llama_decode(ctx, llama_batch_get_one( inp.data(), n_input - 1, 0, 0));
|
llama_decode(ctx, llama_batch_get_one( inp.data(), n_input - 1));
|
||||||
llama_decode(ctx, llama_batch_get_one(&inp.back(), 1, n_input - 1, 0));
|
llama_decode(ctx, llama_batch_get_one(&inp.back(), 1));
|
||||||
|
|
||||||
const auto t_enc_end = ggml_time_us();
|
const auto t_enc_end = ggml_time_us();
|
||||||
|
|
||||||
|
@ -528,7 +528,7 @@ int main(int argc, char ** argv) {
|
|||||||
int enc_input_size = embd_inp.size();
|
int enc_input_size = embd_inp.size();
|
||||||
llama_token * enc_input_buf = embd_inp.data();
|
llama_token * enc_input_buf = embd_inp.data();
|
||||||
|
|
||||||
if (llama_encode(ctx, llama_batch_get_one(enc_input_buf, enc_input_size, 0, 0))) {
|
if (llama_encode(ctx, llama_batch_get_one(enc_input_buf, enc_input_size))) {
|
||||||
LOG_ERR("%s : failed to eval\n", __func__);
|
LOG_ERR("%s : failed to eval\n", __func__);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
@ -648,7 +648,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
LOG_DBG("eval: %s\n", string_from(ctx, embd).c_str());
|
LOG_DBG("eval: %s\n", string_from(ctx, embd).c_str());
|
||||||
|
|
||||||
if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval, n_past, 0))) {
|
if (llama_decode(ctx, llama_batch_get_one(&embd[i], n_eval))) {
|
||||||
LOG_ERR("%s : failed to eval\n", __func__);
|
LOG_ERR("%s : failed to eval\n", __func__);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -308,7 +308,6 @@ int main(int argc, char ** argv) {
|
|||||||
batch.n_seq_id + i,
|
batch.n_seq_id + i,
|
||||||
batch.seq_id + i,
|
batch.seq_id + i,
|
||||||
batch.logits + i,
|
batch.logits + i,
|
||||||
0, 0, 0, // unused
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const int ret = llama_decode(ctx, batch_view);
|
const int ret = llama_decode(ctx, batch_view);
|
||||||
|
@ -408,14 +408,21 @@ static results_perplexity perplexity_v2(llama_context * ctx, const common_params
|
|||||||
// clear the KV cache
|
// clear the KV cache
|
||||||
llama_kv_cache_clear(ctx);
|
llama_kv_cache_clear(ctx);
|
||||||
|
|
||||||
|
llama_batch batch = llama_batch_init(n_batch, 0, 1);
|
||||||
|
|
||||||
for (int j = 0; j < num_batches; ++j) {
|
for (int j = 0; j < num_batches; ++j) {
|
||||||
const int batch_start = start + j * n_batch;
|
const int batch_start = start + j * n_batch;
|
||||||
const int batch_size = std::min(end - batch_start, n_batch);
|
const int batch_size = std::min(end - batch_start, n_batch);
|
||||||
|
|
||||||
|
common_batch_clear(batch);
|
||||||
|
for (int i = 0; i < batch_size; i++) {
|
||||||
|
common_batch_add(batch, tokens[batch_start + i], j*n_batch + i, {0}, true);
|
||||||
|
}
|
||||||
|
|
||||||
//LOG_DBG(" Batch %d: starts at %d, size is %d, n_past is %d\n",j,batch_start,batch_size,j * n_batch);
|
//LOG_DBG(" Batch %d: starts at %d, size is %d, n_past is %d\n",j,batch_start,batch_size,j * n_batch);
|
||||||
// TODO: use llama_batch.logits instead of relying on logits_all == true
|
if (llama_decode(ctx, batch)) {
|
||||||
if (llama_decode(ctx, llama_batch_get_one(tokens.data() + batch_start, batch_size, j * n_batch, 0))) {
|
|
||||||
//LOG_ERR("%s : failed to eval\n", __func__);
|
//LOG_ERR("%s : failed to eval\n", __func__);
|
||||||
|
llama_batch_free(batch);
|
||||||
return {tokens, -1, logit_history, prob_history};
|
return {tokens, -1, logit_history, prob_history};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -435,6 +442,8 @@ static results_perplexity perplexity_v2(llama_context * ctx, const common_params
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
llama_batch_free(batch);
|
||||||
|
|
||||||
const auto t_end = std::chrono::high_resolution_clock::now();
|
const auto t_end = std::chrono::high_resolution_clock::now();
|
||||||
|
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
@ -704,7 +713,6 @@ static bool decode_helper(llama_context * ctx, llama_batch & batch, std::vector<
|
|||||||
batch.n_seq_id + i,
|
batch.n_seq_id + i,
|
||||||
batch.seq_id + i,
|
batch.seq_id + i,
|
||||||
batch.logits + i,
|
batch.logits + i,
|
||||||
0, 0, 0, // unused
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const int ret = llama_decode(ctx, batch_view);
|
const int ret = llama_decode(ctx, batch_view);
|
||||||
@ -1791,6 +1799,8 @@ static void kl_divergence(llama_context * ctx, const common_params & params) {
|
|||||||
// clear the KV cache
|
// clear the KV cache
|
||||||
llama_kv_cache_clear(ctx);
|
llama_kv_cache_clear(ctx);
|
||||||
|
|
||||||
|
llama_batch batch = llama_batch_init(n_batch, 0, 1);
|
||||||
|
|
||||||
for (int j = 0; j < num_batches; ++j) {
|
for (int j = 0; j < num_batches; ++j) {
|
||||||
const int batch_start = start + j * n_batch;
|
const int batch_start = start + j * n_batch;
|
||||||
const int batch_size = std::min(end - batch_start, n_batch);
|
const int batch_size = std::min(end - batch_start, n_batch);
|
||||||
@ -1803,9 +1813,14 @@ static void kl_divergence(llama_context * ctx, const common_params & params) {
|
|||||||
tokens[batch_start] = llama_token_bos(llama_get_model(ctx));
|
tokens[batch_start] = llama_token_bos(llama_get_model(ctx));
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: use llama_batch.logits instead of relying on logits_all == true
|
common_batch_clear(batch);
|
||||||
if (llama_decode(ctx, llama_batch_get_one(tokens.data() + batch_start, batch_size, j * n_batch, 0))) {
|
for (int i = 0; i < batch_size; i++) {
|
||||||
|
common_batch_add(batch, tokens[batch_start + i], j*n_batch + i, {0}, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (llama_decode(ctx, batch)) {
|
||||||
LOG_ERR("%s : failed to eval\n", __func__);
|
LOG_ERR("%s : failed to eval\n", __func__);
|
||||||
|
llama_batch_free(batch);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1818,6 +1833,8 @@ static void kl_divergence(llama_context * ctx, const common_params & params) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
llama_batch_free(batch);
|
||||||
|
|
||||||
const auto t_end = std::chrono::high_resolution_clock::now();
|
const auto t_end = std::chrono::high_resolution_clock::now();
|
||||||
|
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
|
@ -48,9 +48,16 @@ int main(int argc, char ** argv) {
|
|||||||
// tokenize prompt
|
// tokenize prompt
|
||||||
auto tokens = common_tokenize(ctx, params.prompt, true);
|
auto tokens = common_tokenize(ctx, params.prompt, true);
|
||||||
|
|
||||||
|
// prepare the batch
|
||||||
|
llama_batch batch = llama_batch_init(tokens.size(), 0, 1);
|
||||||
|
for (size_t i = 0; i < tokens.size(); i++) {
|
||||||
|
common_batch_add(batch, tokens[i], i, {0}, false);
|
||||||
|
}
|
||||||
|
batch.logits[batch.n_tokens - 1] = true; // generate next token
|
||||||
|
|
||||||
// evaluate prompt
|
// evaluate prompt
|
||||||
llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), n_past, 0));
|
llama_decode(ctx, batch);
|
||||||
n_past += tokens.size();
|
n_past += batch.n_tokens;
|
||||||
|
|
||||||
// save state (rng, logits, embedding and kv_cache) to file
|
// save state (rng, logits, embedding and kv_cache) to file
|
||||||
{
|
{
|
||||||
@ -77,8 +84,12 @@ int main(int argc, char ** argv) {
|
|||||||
printf("%s", next_token_str.c_str());
|
printf("%s", next_token_str.c_str());
|
||||||
result0 += next_token_str;
|
result0 += next_token_str;
|
||||||
|
|
||||||
if (llama_decode(ctx, llama_batch_get_one(&next_token, 1, n_past, 0))) {
|
common_batch_clear(batch);
|
||||||
|
common_batch_add(batch, next_token, n_past, {0}, true);
|
||||||
|
|
||||||
|
if (llama_decode(ctx, batch)) {
|
||||||
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
|
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
|
||||||
|
llama_batch_free(batch);
|
||||||
llama_free(ctx);
|
llama_free(ctx);
|
||||||
llama_free_model(model);
|
llama_free_model(model);
|
||||||
return 1;
|
return 1;
|
||||||
@ -133,8 +144,12 @@ int main(int argc, char ** argv) {
|
|||||||
printf("%s", next_token_str.c_str());
|
printf("%s", next_token_str.c_str());
|
||||||
result1 += next_token_str;
|
result1 += next_token_str;
|
||||||
|
|
||||||
if (llama_decode(ctx2, llama_batch_get_one(&next_token, 1, n_past, 0))) {
|
common_batch_clear(batch);
|
||||||
|
common_batch_add(batch, next_token, n_past, {0}, true);
|
||||||
|
|
||||||
|
if (llama_decode(ctx2, batch)) {
|
||||||
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
|
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
|
||||||
|
llama_batch_free(batch);
|
||||||
llama_free(ctx2);
|
llama_free(ctx2);
|
||||||
llama_free_model(model);
|
llama_free_model(model);
|
||||||
return 1;
|
return 1;
|
||||||
@ -221,8 +236,12 @@ int main(int argc, char ** argv) {
|
|||||||
printf("%s", next_token_str.c_str());
|
printf("%s", next_token_str.c_str());
|
||||||
result2 += next_token_str;
|
result2 += next_token_str;
|
||||||
|
|
||||||
if (llama_decode(ctx3, llama_batch_get_one(&next_token, 1, n_past, 1))) {
|
common_batch_clear(batch);
|
||||||
|
common_batch_add(batch, next_token, n_past, {1}, true);
|
||||||
|
|
||||||
|
if (llama_decode(ctx3, batch)) {
|
||||||
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
|
fprintf(stderr, "\n%s : failed to evaluate\n", __func__);
|
||||||
|
llama_batch_free(batch);
|
||||||
llama_free(ctx3);
|
llama_free(ctx3);
|
||||||
llama_free_model(model);
|
llama_free_model(model);
|
||||||
return 1;
|
return 1;
|
||||||
@ -236,6 +255,7 @@ int main(int argc, char ** argv) {
|
|||||||
llama_sampler_free(smpl2);
|
llama_sampler_free(smpl2);
|
||||||
llama_sampler_free(smpl3);
|
llama_sampler_free(smpl3);
|
||||||
|
|
||||||
|
llama_batch_free(batch);
|
||||||
llama_free(ctx3);
|
llama_free(ctx3);
|
||||||
llama_free_model(model);
|
llama_free_model(model);
|
||||||
|
|
||||||
|
@ -2326,7 +2326,6 @@ struct server_context {
|
|||||||
batch.n_seq_id + i,
|
batch.n_seq_id + i,
|
||||||
batch.seq_id + i,
|
batch.seq_id + i,
|
||||||
batch.logits + i,
|
batch.logits + i,
|
||||||
0, 0, 0, // unused
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const int ret = llama_decode(ctx, batch_view);
|
const int ret = llama_decode(ctx, batch_view);
|
||||||
|
@ -138,7 +138,7 @@ int main(int argc, char ** argv) {
|
|||||||
|
|
||||||
// prepare a batch for the prompt
|
// prepare a batch for the prompt
|
||||||
|
|
||||||
llama_batch batch = llama_batch_get_one(prompt_tokens.data(), prompt_tokens.size(), 0, 0);
|
llama_batch batch = llama_batch_get_one(prompt_tokens.data(), prompt_tokens.size());
|
||||||
|
|
||||||
// main loop
|
// main loop
|
||||||
|
|
||||||
@ -175,7 +175,7 @@ int main(int argc, char ** argv) {
|
|||||||
fflush(stdout);
|
fflush(stdout);
|
||||||
|
|
||||||
// prepare the next batch with the sampled token
|
// prepare the next batch with the sampled token
|
||||||
batch = llama_batch_get_one(&new_token_id, 1, n_pos, 0);
|
batch = llama_batch_get_one(&new_token_id, 1);
|
||||||
|
|
||||||
n_decode += 1;
|
n_decode += 1;
|
||||||
}
|
}
|
||||||
|
@ -155,9 +155,9 @@ int main(int argc, char ** argv) {
|
|||||||
const auto t_enc_start = ggml_time_us();
|
const auto t_enc_start = ggml_time_us();
|
||||||
|
|
||||||
// eval the prompt with both models
|
// eval the prompt with both models
|
||||||
llama_decode(ctx_tgt, llama_batch_get_one( inp.data(), n_input - 1, 0, 0));
|
llama_decode(ctx_tgt, llama_batch_get_one( inp.data(), n_input - 1));
|
||||||
llama_decode(ctx_tgt, llama_batch_get_one(&inp.back(), 1, n_input - 1, 0));
|
llama_decode(ctx_tgt, llama_batch_get_one(&inp.back(), 1));
|
||||||
llama_decode(ctx_dft, llama_batch_get_one( inp.data(), n_input, 0, 0));
|
llama_decode(ctx_dft, llama_batch_get_one( inp.data(), n_input));
|
||||||
|
|
||||||
const auto t_enc_end = ggml_time_us();
|
const auto t_enc_end = ggml_time_us();
|
||||||
|
|
||||||
|
@ -232,8 +232,11 @@ extern "C" {
|
|||||||
// - token : the token ids of the input (used when embd is NULL)
|
// - token : the token ids of the input (used when embd is NULL)
|
||||||
// - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL)
|
// - embd : token embeddings (i.e. float vector of size n_embd) (used when token is NULL)
|
||||||
// - pos : the positions of the respective token in the sequence
|
// - pos : the positions of the respective token in the sequence
|
||||||
|
// (if set to NULL, the token position will be tracked automatically by llama_decode)
|
||||||
// - seq_id : the sequence to which the respective token belongs
|
// - seq_id : the sequence to which the respective token belongs
|
||||||
|
// (if set to NULL, the sequence ID will be assumed to be 0)
|
||||||
// - logits : if zero, the logits (and/or the embeddings) for the respective token will not be output
|
// - logits : if zero, the logits (and/or the embeddings) for the respective token will not be output
|
||||||
|
// (if set to NULL, only the logits for last token will be returned)
|
||||||
//
|
//
|
||||||
typedef struct llama_batch {
|
typedef struct llama_batch {
|
||||||
int32_t n_tokens;
|
int32_t n_tokens;
|
||||||
@ -244,15 +247,6 @@ extern "C" {
|
|||||||
int32_t * n_seq_id;
|
int32_t * n_seq_id;
|
||||||
llama_seq_id ** seq_id;
|
llama_seq_id ** seq_id;
|
||||||
int8_t * logits; // TODO: rename this to "output"
|
int8_t * logits; // TODO: rename this to "output"
|
||||||
|
|
||||||
// NOTE: helpers for smooth API transition - can be deprecated in the future
|
|
||||||
// for future-proof code, use the above fields instead and ignore everything below
|
|
||||||
//
|
|
||||||
// pos[i] = all_pos_0 + i*all_pos_1
|
|
||||||
//
|
|
||||||
llama_pos all_pos_0; // used if pos == NULL
|
|
||||||
llama_pos all_pos_1; // used if pos == NULL
|
|
||||||
llama_seq_id all_seq_id; // used if seq_id == NULL
|
|
||||||
} llama_batch;
|
} llama_batch;
|
||||||
|
|
||||||
enum llama_model_kv_override_type {
|
enum llama_model_kv_override_type {
|
||||||
@ -776,15 +770,15 @@ extern "C" {
|
|||||||
// Decoding
|
// Decoding
|
||||||
//
|
//
|
||||||
|
|
||||||
// Return batch for single sequence of tokens starting at pos_0
|
// Return batch for single sequence of tokens
|
||||||
|
// The sequence ID will be fixed to 0
|
||||||
|
// The position of the tokens will be tracked automatically by llama_decode
|
||||||
//
|
//
|
||||||
// NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it
|
// NOTE: this is a helper function to facilitate transition to the new batch API - avoid using it
|
||||||
//
|
//
|
||||||
LLAMA_API struct llama_batch llama_batch_get_one(
|
LLAMA_API struct llama_batch llama_batch_get_one(
|
||||||
llama_token * tokens,
|
llama_token * tokens,
|
||||||
int32_t n_tokens,
|
int32_t n_tokens);
|
||||||
llama_pos pos_0,
|
|
||||||
llama_seq_id seq_id);
|
|
||||||
|
|
||||||
// Allocates a batch of tokens on the heap that can hold a maximum of n_tokens
|
// Allocates a batch of tokens on the heap that can hold a maximum of n_tokens
|
||||||
// Each token can be assigned up to n_seq_max sequence ids
|
// Each token can be assigned up to n_seq_max sequence ids
|
||||||
|
@ -2949,9 +2949,6 @@ struct llama_sbatch_seq {
|
|||||||
llama_seq_id * seq_id;
|
llama_seq_id * seq_id;
|
||||||
size_t offset;
|
size_t offset;
|
||||||
size_t length;
|
size_t length;
|
||||||
|
|
||||||
// helper for smoother batch API transition -- can be deprecated in the future
|
|
||||||
llama_seq_id all_seq_id; // used if seq_id == NULL
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// sequence-length-aware batch splitting
|
// sequence-length-aware batch splitting
|
||||||
@ -3046,9 +3043,6 @@ struct llama_sbatch {
|
|||||||
} else {
|
} else {
|
||||||
ubatch.embd = nullptr;
|
ubatch.embd = nullptr;
|
||||||
}
|
}
|
||||||
// from here on, the else branches are deprecated;
|
|
||||||
// they are helpers for smoother batch API transition
|
|
||||||
if (batch->pos) {
|
|
||||||
if (ubatch.equal_seqs) {
|
if (ubatch.equal_seqs) {
|
||||||
for (size_t i = 0; i < length; ++i) {
|
for (size_t i = 0; i < length; ++i) {
|
||||||
ubatch.pos[ubatch.n_tokens + i] = batch->pos[ids[seq.offset + i]];
|
ubatch.pos[ubatch.n_tokens + i] = batch->pos[ids[seq.offset + i]];
|
||||||
@ -3057,19 +3051,10 @@ struct llama_sbatch {
|
|||||||
// simple split
|
// simple split
|
||||||
ubatch.pos = batch->pos + seq.offset;
|
ubatch.pos = batch->pos + seq.offset;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
for (size_t i = 0; i < length; ++i) {
|
|
||||||
llama_pos bi = ids[seq.offset + i];
|
|
||||||
ubatch.pos[ubatch.n_tokens + i] = batch->all_pos_0 + (bi * batch->all_pos_1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (ubatch.equal_seqs) {
|
if (ubatch.equal_seqs) {
|
||||||
ubatch.n_seq_id[ubatch.n_seqs] = seq.n_seq_id;
|
ubatch.n_seq_id[ubatch.n_seqs] = seq.n_seq_id;
|
||||||
if (seq.seq_id) {
|
if (seq.seq_id) {
|
||||||
ubatch.seq_id[ubatch.n_seqs] = seq.seq_id;
|
ubatch.seq_id[ubatch.n_seqs] = seq.seq_id;
|
||||||
} else {
|
|
||||||
GGML_ASSERT(seq.n_seq_id == 1);
|
|
||||||
ubatch.seq_id[ubatch.n_seqs] = &seq.all_seq_id;
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// simple split
|
// simple split
|
||||||
@ -3082,10 +3067,6 @@ struct llama_sbatch {
|
|||||||
}
|
}
|
||||||
if (batch->seq_id) {
|
if (batch->seq_id) {
|
||||||
ubatch.seq_id = batch->seq_id + seq.offset;
|
ubatch.seq_id = batch->seq_id + seq.offset;
|
||||||
} else {
|
|
||||||
for (size_t i = 0; i < length; ++i) {
|
|
||||||
ubatch.seq_id[ubatch.n_seqs + i] = &seq.all_seq_id;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (logits_all) {
|
if (logits_all) {
|
||||||
@ -3204,7 +3185,6 @@ struct llama_sbatch {
|
|||||||
s.seq_id = nullptr;
|
s.seq_id = nullptr;
|
||||||
s.offset = 0;
|
s.offset = 0;
|
||||||
s.length = n_tokens;
|
s.length = n_tokens;
|
||||||
s.all_seq_id = batch.all_seq_id;
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
std::sort(ids.begin(), ids.end(),
|
std::sort(ids.begin(), ids.end(),
|
||||||
@ -3227,7 +3207,7 @@ struct llama_sbatch {
|
|||||||
if (batch.pos) {
|
if (batch.pos) {
|
||||||
return batch.pos[a] < batch.pos[b];
|
return batch.pos[a] < batch.pos[b];
|
||||||
}
|
}
|
||||||
// no pos, sort by id (assuming batch.all_pos_1 is positive)
|
// no pos, sort by id
|
||||||
return a < b;
|
return a < b;
|
||||||
}
|
}
|
||||||
// shared prompts go first
|
// shared prompts go first
|
||||||
@ -3237,7 +3217,6 @@ struct llama_sbatch {
|
|||||||
// init seq
|
// init seq
|
||||||
llama_sbatch_seq * last_seq = nullptr;
|
llama_sbatch_seq * last_seq = nullptr;
|
||||||
|
|
||||||
if (batch.n_seq_id != nullptr && batch.seq_id != nullptr) {
|
|
||||||
for (size_t i = 0; i < n_tokens; ++i) {
|
for (size_t i = 0; i < n_tokens; ++i) {
|
||||||
const size_t bi = ids[i];
|
const size_t bi = ids[i];
|
||||||
const int32_t n_seqs = batch.n_seq_id[bi];
|
const int32_t n_seqs = batch.n_seq_id[bi];
|
||||||
@ -3254,14 +3233,10 @@ struct llama_sbatch {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
llama_sbatch_seq new_seq = {n_seqs, seq_ids, i, 1, batch.all_seq_id};
|
llama_sbatch_seq new_seq = {n_seqs, seq_ids, i, 1};
|
||||||
seq.push_back(new_seq);
|
seq.push_back(new_seq);
|
||||||
last_seq = &seq.back();
|
last_seq = &seq.back();
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
llama_sbatch_seq new_seq = {1, nullptr, 0, n_tokens, batch.all_seq_id};
|
|
||||||
seq.push_back(new_seq);
|
|
||||||
}
|
|
||||||
// keep shared prompts first at the end, then sort by length descending.
|
// keep shared prompts first at the end, then sort by length descending.
|
||||||
std::sort(seq.begin(), seq.end(),
|
std::sort(seq.begin(), seq.end(),
|
||||||
[](llama_sbatch_seq & a, llama_sbatch_seq & b) {
|
[](llama_sbatch_seq & a, llama_sbatch_seq & b) {
|
||||||
@ -21096,9 +21071,7 @@ void llama_set_causal_attn(struct llama_context * ctx, bool causal_attn) {
|
|||||||
|
|
||||||
struct llama_batch llama_batch_get_one(
|
struct llama_batch llama_batch_get_one(
|
||||||
llama_token * tokens,
|
llama_token * tokens,
|
||||||
int32_t n_tokens,
|
int32_t n_tokens) {
|
||||||
llama_pos pos_0,
|
|
||||||
llama_seq_id seq_id) {
|
|
||||||
return {
|
return {
|
||||||
/*n_tokens =*/ n_tokens,
|
/*n_tokens =*/ n_tokens,
|
||||||
/*tokens =*/ tokens,
|
/*tokens =*/ tokens,
|
||||||
@ -21107,9 +21080,6 @@ struct llama_batch llama_batch_get_one(
|
|||||||
/*n_seq_id =*/ nullptr,
|
/*n_seq_id =*/ nullptr,
|
||||||
/*seq_id =*/ nullptr,
|
/*seq_id =*/ nullptr,
|
||||||
/*logits =*/ nullptr,
|
/*logits =*/ nullptr,
|
||||||
/*all_pos_0 =*/ pos_0,
|
|
||||||
/*all_pos_1 =*/ 1,
|
|
||||||
/*all_seq_id =*/ seq_id,
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -21122,9 +21092,6 @@ struct llama_batch llama_batch_init(int32_t n_tokens_alloc, int32_t embd, int32_
|
|||||||
/*n_seq_id =*/ nullptr,
|
/*n_seq_id =*/ nullptr,
|
||||||
/*seq_id =*/ nullptr,
|
/*seq_id =*/ nullptr,
|
||||||
/*logits =*/ nullptr,
|
/*logits =*/ nullptr,
|
||||||
/*all_pos_0 =*/ 0,
|
|
||||||
/*all_pos_1 =*/ 0,
|
|
||||||
/*all_seq_id =*/ 0,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if (embd) {
|
if (embd) {
|
||||||
@ -21160,11 +21127,62 @@ void llama_batch_free(struct llama_batch batch) {
|
|||||||
if (batch.logits) free(batch.logits);
|
if (batch.logits) free(batch.logits);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// temporary allocate memory for the input batch if needed
|
||||||
|
static const llama_seq_id batch_default_seq_id = 0;
|
||||||
|
struct llama_batch_allocr {
|
||||||
|
std::array<llama_seq_id, 1> seq_id_0 = {batch_default_seq_id};
|
||||||
|
std::vector<llama_pos> pos;
|
||||||
|
std::vector<int32_t> n_seq_id;
|
||||||
|
std::vector<llama_seq_id *> seq_id;
|
||||||
|
std::vector<int8_t> logits;
|
||||||
|
struct llama_batch batch;
|
||||||
|
// optionally fulfill the batch returned by llama_batch_get_one
|
||||||
|
llama_batch_allocr(struct llama_context * ctx, struct llama_batch in_batch) {
|
||||||
|
batch = in_batch;
|
||||||
|
if (!batch.pos) {
|
||||||
|
// determine the last position in KV cache
|
||||||
|
llama_pos last_pos = -1;
|
||||||
|
for (const auto & cell : ctx->kv_self.cells) {
|
||||||
|
if (cell.has_seq_id(batch_default_seq_id)) {
|
||||||
|
last_pos = std::max(last_pos, cell.pos);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
last_pos++; // next position
|
||||||
|
pos.resize(batch.n_tokens);
|
||||||
|
for (int32_t i = 0; i < batch.n_tokens; i++) {
|
||||||
|
pos[i] = i+last_pos;
|
||||||
|
}
|
||||||
|
batch.pos = pos.data();
|
||||||
|
}
|
||||||
|
if (!batch.n_seq_id) {
|
||||||
|
n_seq_id.resize(batch.n_tokens);
|
||||||
|
for (int32_t i = 0; i < batch.n_tokens; i++) {
|
||||||
|
n_seq_id[i] = seq_id_0.size();
|
||||||
|
}
|
||||||
|
batch.n_seq_id = n_seq_id.data();
|
||||||
|
}
|
||||||
|
if (!batch.seq_id) {
|
||||||
|
seq_id.resize(batch.n_tokens + 1);
|
||||||
|
seq_id[batch.n_tokens] = NULL;
|
||||||
|
for (int32_t i = 0; i < batch.n_tokens; i++) {
|
||||||
|
seq_id[i] = seq_id_0.data();
|
||||||
|
}
|
||||||
|
batch.seq_id = seq_id.data();
|
||||||
|
}
|
||||||
|
if (!batch.logits) {
|
||||||
|
logits.resize(batch.n_tokens);
|
||||||
|
logits[logits.size() - 1] = true;
|
||||||
|
batch.logits = logits.data();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
int32_t llama_encode(
|
int32_t llama_encode(
|
||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
struct llama_batch batch) {
|
struct llama_batch batch) {
|
||||||
const int ret = llama_encode_internal(*ctx, batch);
|
llama_batch_allocr batch_allocr(ctx, batch);
|
||||||
if (ret < 0) {
|
const int ret = llama_encode_internal(*ctx, batch_allocr.batch);
|
||||||
|
if (ret != 0) {
|
||||||
LLAMA_LOG_ERROR("%s: failed to encode, ret = %d\n", __func__, ret);
|
LLAMA_LOG_ERROR("%s: failed to encode, ret = %d\n", __func__, ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -21174,8 +21192,9 @@ int32_t llama_encode(
|
|||||||
int32_t llama_decode(
|
int32_t llama_decode(
|
||||||
struct llama_context * ctx,
|
struct llama_context * ctx,
|
||||||
struct llama_batch batch) {
|
struct llama_batch batch) {
|
||||||
const int ret = llama_decode_internal(*ctx, batch);
|
llama_batch_allocr batch_allocr(ctx, batch);
|
||||||
if (ret < 0) {
|
const int ret = llama_decode_internal(*ctx, batch_allocr.batch);
|
||||||
|
if (ret != 0) {
|
||||||
LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
|
LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user