llama : remove unused variable

This commit is contained in:
Francis Couture-Harpin 2024-08-21 23:06:22 -04:00
parent aff96920f9
commit e04910dc48

View File

@ -7718,7 +7718,6 @@ static bool llm_load_tensors(
const int64_t d_inner = hparams.ssm_d_inner; const int64_t d_inner = hparams.ssm_d_inner;
const int64_t d_state = hparams.ssm_d_state; const int64_t d_state = hparams.ssm_d_state;
const int64_t n_head = hparams.ssm_dt_rank; const int64_t n_head = hparams.ssm_dt_rank;
const int64_t head_dim = n_embd / n_head;
const int64_t n_group = hparams.ssm_n_group; const int64_t n_group = hparams.ssm_n_group;
const int64_t d_in_proj = 2*d_inner + 2*n_group*d_state + n_head; const int64_t d_in_proj = 2*d_inner + 2*n_group*d_state + n_head;
@ -9287,7 +9286,7 @@ static struct ggml_tensor * llm_build_mamba2(
const int64_t d_inner = hparams.ssm_d_inner; const int64_t d_inner = hparams.ssm_d_inner;
const int64_t d_state = hparams.ssm_d_state; const int64_t d_state = hparams.ssm_d_state;
const int64_t n_head = hparams.ssm_dt_rank; const int64_t n_head = hparams.ssm_dt_rank;
const int64_t head_dim = d_inner / n_head; // FIXME const int64_t head_dim = d_inner / n_head;
const int64_t n_group = hparams.ssm_n_group; const int64_t n_group = hparams.ssm_n_group;
const int64_t n_seqs = batch.n_seqs; const int64_t n_seqs = batch.n_seqs;