mirror of
https://github.com/ggerganov/llama.cpp.git
synced 2024-12-26 03:14:35 +00:00
llama : make load error reporting more granular (#5477)
Makes it easier to pinpoint where e.g. `unordered_map::at: key not found` comes from.
This commit is contained in:
parent
263978904c
commit
037259be68
18
llama.cpp
18
llama.cpp
@ -4384,9 +4384,21 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
|
||||
|
||||
model.hparams.vocab_only = params.vocab_only;
|
||||
|
||||
llm_load_arch (ml, model);
|
||||
llm_load_hparams(ml, model);
|
||||
llm_load_vocab (ml, model);
|
||||
try {
|
||||
llm_load_arch(ml, model);
|
||||
} catch(const std::exception & e) {
|
||||
throw std::runtime_error("error loading model architecture: " + std::string(e.what()));
|
||||
}
|
||||
try {
|
||||
llm_load_hparams(ml, model);
|
||||
} catch(const std::exception & e) {
|
||||
throw std::runtime_error("error loading model hyperparameters: " + std::string(e.what()));
|
||||
}
|
||||
try {
|
||||
llm_load_vocab(ml, model);
|
||||
} catch(const std::exception & e) {
|
||||
throw std::runtime_error("error loading model vocabulary: " + std::string(e.what()));
|
||||
}
|
||||
|
||||
llm_load_print_meta(ml, model);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user