llama : rename n_ctx -> cache.size, less confusing (#0)

This commit is contained in:
Georgi Gerganov 2024-05-23 12:38:18 +03:00
parent 152da28ae5
commit a61a94e543
No known key found for this signature in database
GPG Key ID: 449E073F9DC10735

View File

@ -2475,7 +2475,6 @@ static bool llama_kv_cache_init(
static bool llama_kv_cache_find_slot( static bool llama_kv_cache_find_slot(
struct llama_kv_cache & cache, struct llama_kv_cache & cache,
const struct llama_batch & batch) { const struct llama_batch & batch) {
const uint32_t n_ctx = cache.size;
const uint32_t n_tokens = batch.n_tokens; const uint32_t n_tokens = batch.n_tokens;
if (cache.recurrent) { if (cache.recurrent) {
@ -2526,16 +2525,16 @@ static bool llama_kv_cache_find_slot(
} }
// otherwise, one cell per token. // otherwise, one cell per token.
if (n_tokens > n_ctx) { if (n_tokens > cache.size) {
LLAMA_LOG_ERROR("%s: n_tokens=%d > n_ctx=%d\n", __func__, n_tokens, n_ctx); LLAMA_LOG_ERROR("%s: n_tokens=%d > cache.size=%d\n", __func__, n_tokens, cache.size);
return false; return false;
} }
uint32_t n_tested = 0; uint32_t n_tested = 0;
while (true) { while (true) {
if (cache.head + n_tokens > n_ctx) { if (cache.head + n_tokens > cache.size) {
n_tested += n_ctx - cache.head; n_tested += cache.size - cache.head;
cache.head = 0; cache.head = 0;
continue; continue;
} }
@ -2554,7 +2553,7 @@ static bool llama_kv_cache_find_slot(
break; break;
} }
if (n_tested >= n_ctx) { if (n_tested >= cache.size) {
//LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens); //LLAMA_LOG_ERROR("%s: failed to find a slot for %d tokens\n", __func__, n_tokens);
return false; return false;
} }