mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-15 15:29:53 +00:00
llama : remove unused variable
This commit is contained in:
parent
aff96920f9
commit
e04910dc48
@ -7718,7 +7718,6 @@ static bool llm_load_tensors(
|
|||||||
const int64_t d_inner = hparams.ssm_d_inner;
|
const int64_t d_inner = hparams.ssm_d_inner;
|
||||||
const int64_t d_state = hparams.ssm_d_state;
|
const int64_t d_state = hparams.ssm_d_state;
|
||||||
const int64_t n_head = hparams.ssm_dt_rank;
|
const int64_t n_head = hparams.ssm_dt_rank;
|
||||||
const int64_t head_dim = n_embd / n_head;
|
|
||||||
const int64_t n_group = hparams.ssm_n_group;
|
const int64_t n_group = hparams.ssm_n_group;
|
||||||
const int64_t d_in_proj = 2*d_inner + 2*n_group*d_state + n_head;
|
const int64_t d_in_proj = 2*d_inner + 2*n_group*d_state + n_head;
|
||||||
|
|
||||||
@ -9287,7 +9286,7 @@ static struct ggml_tensor * llm_build_mamba2(
|
|||||||
const int64_t d_inner = hparams.ssm_d_inner;
|
const int64_t d_inner = hparams.ssm_d_inner;
|
||||||
const int64_t d_state = hparams.ssm_d_state;
|
const int64_t d_state = hparams.ssm_d_state;
|
||||||
const int64_t n_head = hparams.ssm_dt_rank;
|
const int64_t n_head = hparams.ssm_dt_rank;
|
||||||
const int64_t head_dim = d_inner / n_head; // FIXME
|
const int64_t head_dim = d_inner / n_head;
|
||||||
const int64_t n_group = hparams.ssm_n_group;
|
const int64_t n_group = hparams.ssm_n_group;
|
||||||
const int64_t n_seqs = batch.n_seqs;
|
const int64_t n_seqs = batch.n_seqs;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user