mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-24 10:24:35 +00:00
convert.py : fix llama/llama2 conversion due to vocab_size=-1 (#5019)
PR #4818 (merged last week) reintroduced a config check for vocab_size that was addressed in PR #4258 (merged 2023-11-30). Without the fix, llama2 models can't be converted. The error is: `ValueError: The model's vocab size is set to -1 in params.json. Please update it manually. Maybe 32000?`
This commit is contained in:
parent
3e945cc1e9
commit
b46757735d
@ -348,7 +348,7 @@ class Params:
|
||||
f_rope_freq_base = 1e6
|
||||
|
||||
return Params(
|
||||
n_vocab=config.get("vocab_size", model["tok_embeddings.weight"].shape[0]),
|
||||
n_vocab=model["tok_embeddings.weight"].shape[0],
|
||||
n_embd=config["dim"],
|
||||
n_layer=config["n_layers"],
|
||||
n_ctx=n_ctx,
|
||||
|
Loading…
Reference in New Issue
Block a user