llama : support optional tensors (#4283)

This commit is contained in:
Georgi Gerganov 2023-12-01 20:35:03 +02:00
parent b220222a64
commit d5a1cbde60
No known key found for this signature in database
GPG Key ID: 449E073F9DC10735
2 changed files with 10 additions and 25 deletions

View File

@ -1469,7 +1469,7 @@ struct llama_server_context
int split_multiprompt_task(task_server& multiprompt_task) int split_multiprompt_task(task_server& multiprompt_task)
{ {
auto prompt_count = multiprompt_task.data.at("prompt").size(); int prompt_count = multiprompt_task.data.at("prompt").size();
assert(prompt_count > 1); assert(prompt_count > 1);
int multitask_id = id_gen++; int multitask_id = id_gen++;

View File

@ -1991,10 +1991,13 @@ struct llama_model_loader {
return tensor; return tensor;
} }
struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend) { struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend, bool optional = false) {
struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str()); struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str());
if (cur == NULL) { if (cur == NULL) {
if (optional) {
return NULL;
}
throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str())); throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
} }
@ -2812,29 +2815,11 @@ static void llm_load_tensors(
layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split); layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split);
layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split); layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
try { // optional bias tensors
layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, backend); layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, backend, true);
} catch (const std::runtime_error& e) { layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, backend, true);
if (std::string(e.what()).find("not found") != std::string::npos) layer.bq = NULL; else throw; layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, backend, true);
} layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend, true);
try {
layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, backend);
} catch (const std::runtime_error& e) {
if (std::string(e.what()).find("not found") != std::string::npos) layer.bk = NULL; else throw;
}
try {
layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, backend);
} catch (const std::runtime_error& e) {
if (std::string(e.what()).find("not found") != std::string::npos) layer.bv = NULL; else throw;
}
try {
layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
} catch (const std::runtime_error& e) {
if (std::string(e.what()).find("not found") != std::string::npos) layer.bo = NULL; else throw;
}
layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend); layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);