1
0
Эх сурвалжийг харах

llama : remove unused vars (#4796)

Georgi Gerganov 2 жил өмнө
parent
commit
9dede37d81
1 өөрчлөгдсөн 0 нэмэгдсэн , 2 устгасан
  1. 0 2
      llama.cpp

+ 0 - 2
llama.cpp

@@ -4997,7 +4997,6 @@ struct llm_build_context {
         struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
         struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
 
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         const int64_t n_embd_head = hparams.n_embd_head_v;
-        const int64_t n_embd_gqa  = hparams.n_embd_v_gqa();
         GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
         GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
 
 
         const int64_t n_rot = n_embd_head_k / 2;
         const int64_t n_rot = n_embd_head_k / 2;
@@ -5210,7 +5209,6 @@ struct llm_build_context {
         struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
         struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
 
 
         const int64_t n_embd_head = hparams.n_embd_head_v;
         const int64_t n_embd_head = hparams.n_embd_head_v;
-        const int64_t n_embd_gqa  = hparams.n_embd_v_gqa();
         GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
         GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
 
 
         struct ggml_tensor * cur;
         struct ggml_tensor * cur;