mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-25 02:44:36 +00:00
llama : fix typo in llama_tensor_get_type comment [no ci] (#8937)
This commit is contained in:
parent
daef3ab233
commit
6f6496bb09
@ -15304,7 +15304,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
|
|||||||
const int n_expert = std::max(1, (int)qs.model.hparams.n_expert);
|
const int n_expert = std::max(1, (int)qs.model.hparams.n_expert);
|
||||||
auto layer_info = [n_expert] (int i_layer, int n_layer, const char * name) {
|
auto layer_info = [n_expert] (int i_layer, int n_layer, const char * name) {
|
||||||
if (n_expert > 1) {
|
if (n_expert > 1) {
|
||||||
// Believe it or not, "experts" in the FFN of Mixtral-8x7B are not consecutive, but iccasionally randomly
|
// Believe it or not, "experts" in the FFN of Mixtral-8x7B are not consecutive, but occasionally randomly
|
||||||
// sprinkled in the model. Hence, simply dividing i_ffn_down by n_expert does not work
|
// sprinkled in the model. Hence, simply dividing i_ffn_down by n_expert does not work
|
||||||
// for getting the current layer as I initially thought, and we need to resort to parsing the
|
// for getting the current layer as I initially thought, and we need to resort to parsing the
|
||||||
// tensor name.
|
// tensor name.
|
||||||
|
Loading…
Reference in New Issue
Block a user