소스 검색

tests : fix test-thread-safety when compiling with multiple backends (#16699)

* run one test per backend/device (even if it's the same device)
Acly 2 달 전
부모
커밋
d8eaa26e4d
1개의 변경된 파일6개의 추가작업 그리고 4개의 파일을 삭제
  1. 6 4
      tests/test-thread-safety.cpp

+ 6 - 4
tests/test-thread-safety.cpp

@@ -3,6 +3,7 @@
 // - Creates n_parallel (--parallel) contexts per model
 // - Runs inference in parallel on each context
 
+#include <array>
 #include <thread>
 #include <vector>
 #include <atomic>
@@ -38,13 +39,14 @@ int main(int argc, char ** argv) {
     cparams.n_seq_max = 1;
 
     int dev_count = ggml_backend_dev_count();
-    int gpu_dev_count = 0;
+    std::vector<std::array<ggml_backend_dev_t, 2>> gpus;
     for (int i = 0; i < dev_count; ++i) {
         auto * dev = ggml_backend_dev_get(i);
         if (dev && ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_GPU) {
-            gpu_dev_count++;
+            gpus.push_back({dev, nullptr});
         }
     }
+    const int gpu_dev_count = (int)gpus.size();
     const int num_models = gpu_dev_count + 1 + 1; // GPUs + 1 CPU model + 1 layer split
     //const int num_models = std::max(1, gpu_dev_count);
     const int num_contexts = std::max(1, params.n_parallel);
@@ -58,12 +60,12 @@ int main(int argc, char ** argv) {
 
         if (m < gpu_dev_count) {
             mparams.split_mode = LLAMA_SPLIT_MODE_NONE;
-            mparams.main_gpu = m;
+            mparams.devices = gpus[m].data();
         } else if (m == gpu_dev_count) {
             mparams.split_mode = LLAMA_SPLIT_MODE_NONE;
             mparams.main_gpu = -1; // CPU model
         } else {
-            mparams.split_mode = LLAMA_SPLIT_MODE_LAYER;;
+            mparams.split_mode = LLAMA_SPLIT_MODE_LAYER;
         }
 
         llama_model * model = llama_model_load_from_file(params.model.path.c_str(), mparams);