mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-11-11 13:30:35 +00:00
convert.py : fix llama/llama2 conversion due to vocab_size=-1 (#4258)
This commit is contained in:
parent
954e22858c
commit
f4d973cecb
@ -267,7 +267,7 @@ class Params:
|
||||
n_ctx = 2048
|
||||
|
||||
return Params(
|
||||
n_vocab = config.get("vocab_size", model["tok_embeddings.weight"].shape[0]),
|
||||
n_vocab = model["tok_embeddings.weight"].shape[0],
|
||||
n_embd = config["dim"],
|
||||
n_layer = config["n_layers"],
|
||||
n_ctx = n_ctx,
|
||||
|
Loading…
Reference in New Issue
Block a user