mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-14 06:49:54 +00:00
ggml : add comment about backward GGML_OP_DIAG_MASK_INF (#4203)
This commit is contained in:
parent
28cb35a0ec
commit
48b24b170e
2
ggml.c
2
ggml.c
@ -15335,6 +15335,8 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
|
|||||||
const int n_past = ((int32_t *) tensor->op_params)[0];
|
const int n_past = ((int32_t *) tensor->op_params)[0];
|
||||||
src0->grad =
|
src0->grad =
|
||||||
ggml_add_or_set(ctx, src0->grad,
|
ggml_add_or_set(ctx, src0->grad,
|
||||||
|
/* ggml_diag_mask_inf_impl() shouldn't be here */
|
||||||
|
/* ref: https://github.com/ggerganov/llama.cpp/pull/4203#discussion_r1412377992 */
|
||||||
ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
|
ggml_diag_mask_zero_impl(ctx, tensor->grad, n_past, false),
|
||||||
zero_table);
|
zero_table);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user