mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 19:34:35 +00:00
fix auto merge
This commit is contained in:
parent
a1666aaaca
commit
30faf1f3de
@ -10863,7 +10863,7 @@ struct llm_build_context {
|
|||||||
// special-case: the up and gate tensors are merged into a single tensor
|
// special-case: the up and gate tensors are merged into a single tensor
|
||||||
// TOOD: support into llm_build_ffn
|
// TOOD: support into llm_build_ffn
|
||||||
{
|
{
|
||||||
cur = llm_build_ffn(ctx0, cur,
|
cur = llm_build_ffn(lctx, ctx0, cur,
|
||||||
model.layers[il].ffn_up, NULL, NULL,
|
model.layers[il].ffn_up, NULL, NULL,
|
||||||
NULL, NULL, NULL,
|
NULL, NULL, NULL,
|
||||||
model.layers[il].ffn_down, NULL, NULL,
|
model.layers[il].ffn_down, NULL, NULL,
|
||||||
@ -13622,7 +13622,7 @@ struct llm_build_context {
|
|||||||
);
|
);
|
||||||
cb(Kcur, "Kcur_rope", il);
|
cb(Kcur, "Kcur_rope", il);
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(lctx, ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, NULL,
|
model.layers[il].wo, NULL,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
|
|
||||||
@ -13647,7 +13647,7 @@ struct llm_build_context {
|
|||||||
LLM_NORM_RMS, cb, il);
|
LLM_NORM_RMS, cb, il);
|
||||||
cb(cur, "ffn_norm", il);
|
cb(cur, "ffn_norm", il);
|
||||||
|
|
||||||
cur = llm_build_ffn(ctx0, cur,
|
cur = llm_build_ffn(lctx, ctx0, cur,
|
||||||
model.layers[il].ffn_up, NULL, NULL,
|
model.layers[il].ffn_up, NULL, NULL,
|
||||||
NULL, NULL, NULL,
|
NULL, NULL, NULL,
|
||||||
model.layers[il].ffn_down, NULL, NULL,
|
model.layers[il].ffn_down, NULL, NULL,
|
||||||
|
Loading…
Reference in New Issue
Block a user