Просмотр исходного кода

llama : support optional tensors (#4283)

Georgi Gerganov 2 лет назад
Родитель
Сommit
d5a1cbde60
2 измененных файлов с 10 добавлено и 25 удалено
  1. 1 1
      examples/server/server.cpp
  2. 9 24
      llama.cpp

+ 1 - 1
examples/server/server.cpp

@@ -1469,7 +1469,7 @@ struct llama_server_context
 
 
     int split_multiprompt_task(task_server& multiprompt_task)
     int split_multiprompt_task(task_server& multiprompt_task)
     {
     {
-        auto prompt_count = multiprompt_task.data.at("prompt").size();
+        int prompt_count = multiprompt_task.data.at("prompt").size();
         assert(prompt_count > 1);
         assert(prompt_count > 1);
 
 
         int multitask_id = id_gen++;
         int multitask_id = id_gen++;

+ 9 - 24
llama.cpp

@@ -1991,10 +1991,13 @@ struct llama_model_loader {
         return tensor;
         return tensor;
     }
     }
 
 
-    struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend) {
+    struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend, bool optional = false) {
         struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str());
         struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str());
 
 
         if (cur == NULL) {
         if (cur == NULL) {
+            if (optional) {
+                return NULL;
+            }
             throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
             throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
         }
         }
 
 
@@ -2812,29 +2815,11 @@ static void llm_load_tensors(
                         layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, backend_split);
                         layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V,   "weight", i), {n_embd, n_embd_gqa}, backend_split);
                         layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd},     backend_split);
                         layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd},     backend_split);
 
 
-                        try {
-                            layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, backend);
-                        } catch (const std::runtime_error& e) {
-                            if (std::string(e.what()).find("not found") != std::string::npos) layer.bq = NULL; else throw;
-                        }
-
-                        try {
-                            layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, backend);
-                        } catch (const std::runtime_error& e) {
-                            if (std::string(e.what()).find("not found") != std::string::npos) layer.bk = NULL; else throw;
-                        }
-
-                        try {
-                            layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, backend);
-                        } catch (const std::runtime_error& e) {
-                            if (std::string(e.what()).find("not found") != std::string::npos) layer.bv = NULL; else throw;
-                        }
-
-                        try {
-                            layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
-                        } catch (const std::runtime_error& e) {
-                            if (std::string(e.what()).find("not found") != std::string::npos) layer.bo = NULL; else throw;
-                        }
+                        // optional bias tensors
+                        layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q,   "bias", i), {n_embd},     backend, true);
+                        layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K,   "bias", i), {n_embd_gqa}, backend, true);
+                        layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V,   "bias", i), {n_embd_gqa}, backend, true);
+                        layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd},     backend, true);
 
 
                         layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
                         layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);