llama : make load error reporting more granular (#5477)

Makes it easier to pinpoint where e.g. `unordered_map::at: key not found` comes from.
This commit is contained in:
Aarni Koskela 2024-02-13 15:24:50 +02:00 committed by GitHub
parent 263978904c
commit 037259be68
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -4384,9 +4384,21 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
model.hparams.vocab_only = params.vocab_only; model.hparams.vocab_only = params.vocab_only;
llm_load_arch (ml, model); try {
llm_load_hparams(ml, model); llm_load_arch(ml, model);
llm_load_vocab (ml, model); } catch(const std::exception & e) {
throw std::runtime_error("error loading model architecture: " + std::string(e.what()));
}
try {
llm_load_hparams(ml, model);
} catch(const std::exception & e) {
throw std::runtime_error("error loading model hyperparameters: " + std::string(e.what()));
}
try {
llm_load_vocab(ml, model);
} catch(const std::exception & e) {
throw std::runtime_error("error loading model vocabulary: " + std::string(e.what()));
}
llm_load_print_meta(ml, model); llm_load_print_meta(ml, model);