Procházet zdrojové kódy

llama : mark LLM_ARCH_STARCODER as full offload supported (#3945)

as done in https://github.com/ggerganov/llama.cpp/pull/3827
Meng Zhang před 2 roky
rodič
revize
3d48f42efc
1 změnil soubory, kde provedl 6 přidání a 5 odebrání
  1. 6 5
      llama.cpp

+ 6 - 5
llama.cpp

@@ -5164,11 +5164,12 @@ static int llama_decode_internal(
 
     // If all tensors can be run on the GPU then using more than 1 thread is detrimental.
     const bool full_offload_supported =
-        model.arch == LLM_ARCH_LLAMA    ||
-        model.arch == LLM_ARCH_BAICHUAN ||
-        model.arch == LLM_ARCH_FALCON   ||
-        model.arch == LLM_ARCH_REFACT   ||
-        model.arch == LLM_ARCH_MPT;
+        model.arch == LLM_ARCH_LLAMA      ||
+        model.arch == LLM_ARCH_BAICHUAN   ||
+        model.arch == LLM_ARCH_FALCON     ||
+        model.arch == LLM_ARCH_REFACT     ||
+        model.arch == LLM_ARCH_MPT        ||
+        model.arch == LLM_ARCH_STARCODER;
 
     const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3;
     if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) {