Parcourir la source

llama : enable LLAMA_SET_ROWS=1 by default (#14959)

ggml-ci
Georgi Gerganov il y a 5 mois
Parent
commit
a4569c41fd

+ 1 - 1
src/llama-context.cpp

@@ -105,7 +105,7 @@ llama_context::llama_context(
 
 
     {
     {
         const char * LLAMA_SET_ROWS = getenv("LLAMA_SET_ROWS");
         const char * LLAMA_SET_ROWS = getenv("LLAMA_SET_ROWS");
-        supports_set_rows = LLAMA_SET_ROWS ? (atoi(LLAMA_SET_ROWS) != 0) : false;
+        supports_set_rows = LLAMA_SET_ROWS ? (atoi(LLAMA_SET_ROWS) != 0) : supports_set_rows;
 
 
         if (!supports_set_rows && !cparams.kv_unified) {
         if (!supports_set_rows && !cparams.kv_unified) {
             LLAMA_LOG_WARN("%s: non-unified KV cache requires ggml_set_rows() - forcing unified KV cache\n", __func__);
             LLAMA_LOG_WARN("%s: non-unified KV cache requires ggml_set_rows() - forcing unified KV cache\n", __func__);

+ 1 - 1
src/llama-context.h

@@ -289,7 +289,7 @@ private:
 
 
     // env: LLAMA_SET_ROWS (temporary)
     // env: LLAMA_SET_ROWS (temporary)
     // ref: https://github.com/ggml-org/llama.cpp/pull/14285
     // ref: https://github.com/ggml-org/llama.cpp/pull/14285
-    bool supports_set_rows = false;
+    bool supports_set_rows = true;
 
 
     // env: LLAMA_GRAPH_REUSE_DISABLE
     // env: LLAMA_GRAPH_REUSE_DISABLE
     bool graph_reuse_disable = false;
     bool graph_reuse_disable = false;

+ 1 - 1
src/llama-kv-cache-unified.cpp

@@ -193,7 +193,7 @@ llama_kv_cache_unified::llama_kv_cache_unified(
     debug = LLAMA_KV_CACHE_DEBUG ? atoi(LLAMA_KV_CACHE_DEBUG) : 0;
     debug = LLAMA_KV_CACHE_DEBUG ? atoi(LLAMA_KV_CACHE_DEBUG) : 0;
 
 
     const char * LLAMA_SET_ROWS = getenv("LLAMA_SET_ROWS");
     const char * LLAMA_SET_ROWS = getenv("LLAMA_SET_ROWS");
-    supports_set_rows = LLAMA_SET_ROWS ? atoi(LLAMA_SET_ROWS) != 0 : 0;
+    supports_set_rows = LLAMA_SET_ROWS ? atoi(LLAMA_SET_ROWS) != 0 : supports_set_rows;
 
 
     if (!supports_set_rows) {
     if (!supports_set_rows) {
         // ref: https://github.com/ggml-org/llama.cpp/pull/14363
         // ref: https://github.com/ggml-org/llama.cpp/pull/14363

+ 1 - 1
src/llama-kv-cache-unified.h

@@ -230,7 +230,7 @@ private:
 
 
     // env: LLAMA_SET_ROWS (temporary)
     // env: LLAMA_SET_ROWS (temporary)
     // ref: https://github.com/ggml-org/llama.cpp/pull/14285
     // ref: https://github.com/ggml-org/llama.cpp/pull/14285
-    bool supports_set_rows = false;
+    bool supports_set_rows = true;
 
 
     const llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE;
     const llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE;