Sfoglia il codice sorgente

llama : allow using mmap without PrefetchVirtualMemory, apply GGML_WIN_VER to llama.cpp sources (#14013)

Diego Devesa 7 mesi fa
parent
commit
3a077146a4
4 ha cambiato i file con 7 aggiunte e 3 eliminazioni
  1. 5 0
      CMakeLists.txt
  2. 1 1
      ggml/CMakeLists.txt
  3. 0 1
      ggml/src/CMakeLists.txt
  4. 1 1
      src/llama-mmap.cpp

+ 5 - 0
CMakeLists.txt

@@ -159,6 +159,11 @@ if (NOT TARGET ggml AND NOT LLAMA_USE_SYSTEM_GGML)
     # ... otherwise assume ggml is added by a parent CMakeLists.txt
 endif()
 
+if (MINGW)
+    # Target Windows 8 for PrefetchVirtualMemory
+    add_compile_definitions(_WIN32_WINNT=${GGML_WIN_VER})
+endif()
+
 #
 # build the library
 #

+ 1 - 1
ggml/CMakeLists.txt

@@ -137,7 +137,7 @@ set(GGML_CPU_ARM_ARCH        "" CACHE STRING "ggml: CPU architecture for ARM")
 set(GGML_CPU_POWERPC_CPUTYPE "" CACHE STRING "ggml: CPU type for PowerPC")
 
 
-if (WIN32)
+if (MINGW)
     set(GGML_WIN_VER "0x602" CACHE STRING   "ggml: Windows version")
 endif()
 

+ 0 - 1
ggml/src/CMakeLists.txt

@@ -125,7 +125,6 @@ if (NOT MSVC)
 endif()
 
 if (MINGW)
-    # Target Windows 8 for PrefetchVirtualMemory
     add_compile_definitions(_WIN32_WINNT=${GGML_WIN_VER})
 endif()
 

+ 1 - 1
src/llama-mmap.cpp

@@ -401,7 +401,7 @@ struct llama_mmap::impl {
                 }
             }
 #else
-            throw std::runtime_error("PrefetchVirtualMemory unavailable");
+            LLAMA_LOG_DEBUG("skipping PrefetchVirtualMemory because _WIN32_WINNT < 0x602\n");
 #endif
         }
     }