From 9e897d44390f3196c6680e1847c3693354de00ed Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 4 Oct 2024 12:04:54 +0300 Subject: [PATCH] common : sanity check for non-NULL tokens ggml-ci --- common/common.cpp | 30 +++++++++++++++++++++++++++++- src/llama-vocab.h | 18 +++++++++--------- src/llama.cpp | 2 +- 3 files changed, 39 insertions(+), 11 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index a0611f3d1..29df16c95 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -838,6 +838,31 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) { return iparams; } + if (params.reranking) { + bool ok = true; + + if (llama_token_bos(model) == LLAMA_TOKEN_NULL) { + LOG_WRN("%s: warning: model does not have a BOS token, reranking will not work\n", __func__); + ok = false; + } + + if (llama_token_eos(model) == LLAMA_TOKEN_NULL) { + LOG_WRN("%s: warning: model does not have an EOS token, reranking will not work\n", __func__); + ok = false; + } + + if (llama_token_sep(model) == LLAMA_TOKEN_NULL) { + LOG_WRN("%s: warning: model does not have a SEP token, reranking will not work\n", __func__); + ok = false; + } + + if (!ok) { + llama_free_model(model); + + return iparams; + } + } + auto cparams = llama_context_params_from_gpt_params(params); llama_context * lctx = llama_new_context_with_model(model, cparams); @@ -855,6 +880,7 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) { if (cvec.n_embd == -1) { llama_free(lctx); llama_free_model(model); + return iparams; } @@ -867,6 +893,7 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) { if (err) { llama_free(lctx); llama_free_model(model); + return iparams; } } @@ -889,7 +916,7 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) { llama_lora_adapters_apply(lctx, iparams.lora_adapters); } - if (params.sparams.ignore_eos && llama_token_eos(model) == -1) { + if (params.sparams.ignore_eos && llama_token_eos(model) == LLAMA_TOKEN_NULL) { LOG_WRN("%s: warning: model does not have an EOS token, ignoring --ignore-eos\n", __func__); params.sparams.ignore_eos = false; } @@ -930,6 +957,7 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) { iparams.model = model; iparams.context = lctx; + return iparams; } diff --git a/src/llama-vocab.h b/src/llama-vocab.h index 069bdc423..28bad9135 100644 --- a/src/llama-vocab.h +++ b/src/llama-vocab.h @@ -40,17 +40,17 @@ struct llama_vocab { id special_bos_id = 1; id special_eos_id = 2; id special_unk_id = 0; - id special_sep_id = -1; - id special_pad_id = -1; - id special_cls_id = -1; - id special_mask_id = -1; + id special_sep_id = LLAMA_TOKEN_NULL; + id special_pad_id = LLAMA_TOKEN_NULL; + id special_cls_id = LLAMA_TOKEN_NULL; + id special_mask_id = LLAMA_TOKEN_NULL; id linefeed_id = 13; - id special_prefix_id = -1; - id special_suffix_id = -1; - id special_middle_id = -1; - id special_eot_id = -1; // TODO: move above after "eos_id", and here add "file separator" token - id special_eom_id = -1; + id special_prefix_id = LLAMA_TOKEN_NULL; + id special_suffix_id = LLAMA_TOKEN_NULL; + id special_middle_id = LLAMA_TOKEN_NULL; + id special_eot_id = LLAMA_TOKEN_NULL; // TODO: move above after "eos_id", and here add "file separator" token + id special_eom_id = LLAMA_TOKEN_NULL; // set of all tokens that cause "end of generation" std::set special_eog_ids; diff --git a/src/llama.cpp b/src/llama.cpp index 3443b0689..bf6fd9277 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -2412,7 +2412,7 @@ struct llama_hparams { // needed by encoder-decoder models (e.g. T5, FLAN-T5) // ref: https://github.com/ggerganov/llama.cpp/pull/8141 - llama_token dec_start_token_id = -1; + llama_token dec_start_token_id = LLAMA_TOKEN_NULL; enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_NONE; enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE;