Просмотр исходного кода

add geglu activation function (#14074)

Co-authored-by: dinhhuy <huy.dinh@brains-tech.co.jp>
Đinh Trọng Huy 7 месяцев назад
Родитель
Сommit
91a8ee6a6f
2 измененных файлов с 23 добавлено и 0 удалено
  1. 22 0
      src/llama-graph.cpp
  2. 1 0
      src/llama-graph.h

+ 22 - 0
src/llama-graph.cpp

@@ -659,6 +659,28 @@ ggml_tensor * llm_graph_context::build_ffn(
                 cur = ggml_mul(ctx0, x0, x1);
                 cb(cur, "ffn_mul", il);
             } break;
+        case LLM_FFN_GEGLU:
+            {
+                // Split into two equal parts
+                int64_t split_point = cur->ne[0] / 2;
+                ggml_tensor * output_ffn_up = ggml_cont(ctx0, ggml_view_2d(
+                                                ctx0, cur, split_point,
+                                                cur->ne[1], cur->nb[1], 0
+                                            ));
+                ggml_tensor * output_ffn_gate = ggml_cont(ctx0, ggml_view_2d(
+                                                ctx0, cur, split_point,
+                                                cur->ne[1], cur->nb[1],
+                                                split_point * ggml_element_size(cur)
+                                            ));
+
+                // Apply GELU activation function to the first part
+                output_ffn_up = ggml_gelu(ctx0, output_ffn_up);
+                cb(output_ffn_up, "ffn_gelu", il);
+
+                // Element-wise multiplication between the activated part and the gate part
+                cur = ggml_mul(ctx0, output_ffn_up, output_ffn_gate);
+                cb(cur, "ffn_geglu", il);
+            } break;
     }
 
     if (gate && type_gate == LLM_FFN_PAR) {

+ 1 - 0
src/llama-graph.h

@@ -36,6 +36,7 @@ enum llm_ffn_op_type {
     LLM_FFN_RELU,
     LLM_FFN_RELU_SQR,
     LLM_FFN_SWIGLU,
+    LLM_FFN_GEGLU,
 };
 
 enum llm_ffn_gate_type {