Kaynağa Gözat

llama : use the same threshold for OpenBLAS and ggml thread limiting (#577)

Maël Kerbiriou 2 yıl önce
ebeveyn
işleme
41318d708e
1 değiştirilmiş dosya ile 1 ekleme ve 1 silme
  1. 1 1
      llama.cpp

+ 1 - 1
llama.cpp

@@ -856,7 +856,7 @@ static bool llama_eval_internal(
     // for big prompts, if BLAS is enabled, it is better to use only one thread
     // for big prompts, if BLAS is enabled, it is better to use only one thread
     // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
     // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
     ggml_cgraph gf = {};
     ggml_cgraph gf = {};
-    gf.n_threads = N > 255 && ggml_cpu_has_blas() ? 1 : n_threads;
+    gf.n_threads = N >= 32 && ggml_cpu_has_blas() ? 1 : n_threads;
 
 
     struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
     struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
     memcpy(embd->data, tokens, N*ggml_element_size(embd));
     memcpy(embd->data, tokens, N*ggml_element_size(embd));