Просмотр исходного кода

enable CPU HBM (#2603)

* add cpu hbm support

* add memalign 0 byte check

* Update ggml.c

* Update llama.cpp

* ggml : allow ggml_init with 0 size

* retrigger ci

* fix code style

---------

Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Kunshang Ji 2 лет назад
Родитель
Сommit
7f412dab9c
3 измененных файлов с 38 добавлено и 2 удалено
  1. 8 0
      CMakeLists.txt
  2. 19 1
      ggml.c
  3. 11 1
      llama.cpp

+ 8 - 0
CMakeLists.txt

@@ -557,6 +557,11 @@ endif()
 
 
 # ggml
 # ggml
 
 
+if (GGML_USE_CPU_HBM)
+    add_definitions(-DGGML_USE_CPU_HBM)
+    find_library(memkind memkind REQUIRED)
+endif()
+
 add_library(ggml OBJECT
 add_library(ggml OBJECT
             ggml.c
             ggml.c
             ggml.h
             ggml.h
@@ -572,6 +577,9 @@ add_library(ggml OBJECT
 target_include_directories(ggml PUBLIC . ${LLAMA_EXTRA_INCLUDES})
 target_include_directories(ggml PUBLIC . ${LLAMA_EXTRA_INCLUDES})
 target_compile_features(ggml PUBLIC c_std_11) # don't bump
 target_compile_features(ggml PUBLIC c_std_11) # don't bump
 target_link_libraries(ggml PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS})
 target_link_libraries(ggml PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS})
+if (GGML_USE_CPU_HBM)
+    target_link_libraries(ggml PUBLIC memkind)
+endif()
 
 
 add_library(ggml_static STATIC $<TARGET_OBJECTS:ggml>)
 add_library(ggml_static STATIC $<TARGET_OBJECTS:ggml>)
 if (BUILD_SHARED_LIBS)
 if (BUILD_SHARED_LIBS)

+ 19 - 1
ggml.c

@@ -103,6 +103,9 @@ typedef void * thread_ret_t;
 #include <sys/stat.h>
 #include <sys/stat.h>
 #include <unistd.h>
 #include <unistd.h>
 
 
+#endif
+#ifdef GGML_USE_CPU_HBM
+#include <hbwmalloc.h>
 #endif
 #endif
 
 
 // __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
 // __FMA__ and __F16C__ are not defined in MSVC, however they are implied with AVX2/AVX512
@@ -192,8 +195,14 @@ typedef void * thread_ret_t;
 #define GGML_ALIGNED_FREE(ptr)    _aligned_free(ptr)
 #define GGML_ALIGNED_FREE(ptr)    _aligned_free(ptr)
 #else
 #else
 inline static void * ggml_aligned_malloc(size_t size) {
 inline static void * ggml_aligned_malloc(size_t size) {
+    if (size == 0) {
+        GGML_PRINT("WARNING: Behavior may be unexpected when allocating 0 bytes for ggml_aligned_malloc!\n");
+        return NULL;
+    }
     void * aligned_memory = NULL;
     void * aligned_memory = NULL;
-#ifdef GGML_USE_METAL
+#ifdef GGML_USE_CPU_HBM
+    int result = hbw_posix_memalign(&aligned_memory, 16, size);
+#elif GGML_USE_METAL
     int result = posix_memalign(&aligned_memory, sysconf(_SC_PAGESIZE), size);
     int result = posix_memalign(&aligned_memory, sysconf(_SC_PAGESIZE), size);
 #else
 #else
     int result = posix_memalign(&aligned_memory, GGML_MEM_ALIGN, size);
     int result = posix_memalign(&aligned_memory, GGML_MEM_ALIGN, size);
@@ -215,8 +224,12 @@ inline static void * ggml_aligned_malloc(size_t size) {
     return aligned_memory;
     return aligned_memory;
 }
 }
 #define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size)
 #define GGML_ALIGNED_MALLOC(size) ggml_aligned_malloc(size)
+#ifdef GGML_USE_CPU_HBM
+#define GGML_ALIGNED_FREE(ptr)    if(NULL != ptr) hbw_free(ptr)
+#else
 #define GGML_ALIGNED_FREE(ptr)    free(ptr)
 #define GGML_ALIGNED_FREE(ptr)    free(ptr)
 #endif
 #endif
+#endif
 
 
 #define UNUSED GGML_UNUSED
 #define UNUSED GGML_UNUSED
 #define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0)
 #define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0)
@@ -4566,6 +4579,11 @@ struct ggml_context * ggml_init(struct ggml_init_params params) {
         return NULL;
         return NULL;
     }
     }
 
 
+    // allow to call ggml_init with 0 size
+    if (params.mem_size == 0) {
+        params.mem_size = GGML_MEM_ALIGN;
+    }
+
     const size_t mem_size = params.mem_buffer ? params.mem_size : GGML_PAD(params.mem_size, GGML_MEM_ALIGN);
     const size_t mem_size = params.mem_buffer ? params.mem_size : GGML_PAD(params.mem_size, GGML_MEM_ALIGN);
 
 
     *ctx = (struct ggml_context) {
     *ctx = (struct ggml_context) {

+ 11 - 1
llama.cpp

@@ -126,6 +126,9 @@ void replace_all(std::string & s, const std::string & search, const std::string
     }
     }
     s = std::move(result);
     s = std::move(result);
 }
 }
+#ifdef GGML_USE_CPU_HBM
+#include <hbwmalloc.h>
+#endif
 
 
 static void zeros(std::ofstream & file, size_t n) {
 static void zeros(std::ofstream & file, size_t n) {
     char zero = 0;
     char zero = 0;
@@ -450,6 +453,9 @@ static void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph *
 #elif GGML_USE_METAL
 #elif GGML_USE_METAL
 #   define llama_host_malloc(n)  ggml_metal_host_malloc(n)
 #   define llama_host_malloc(n)  ggml_metal_host_malloc(n)
 #   define llama_host_free(data) ggml_metal_host_free(data)
 #   define llama_host_free(data) ggml_metal_host_free(data)
+#elif GGML_USE_CPU_HBM
+#   define llama_host_malloc(n)  hbw_malloc(n)
+#   define llama_host_free(data) if (data != NULL) hbw_free(data)
 #else
 #else
 #   define llama_host_malloc(n)  malloc(n)
 #   define llama_host_malloc(n)  malloc(n)
 #   define llama_host_free(data) free(data)
 #   define llama_host_free(data) free(data)
@@ -1489,7 +1495,11 @@ struct llama_model_loader {
             // allocate temp buffer if not using mmap
             // allocate temp buffer if not using mmap
             if (!use_mmap && cur->data == NULL) {
             if (!use_mmap && cur->data == NULL) {
                 GGML_ASSERT(cur->backend != GGML_BACKEND_CPU);
                 GGML_ASSERT(cur->backend != GGML_BACKEND_CPU);
-                cur->data = malloc(ggml_nbytes(cur));
+                #ifdef GGML_USE_CPU_HBM
+                cur->data = (uint8_t*)hbw_malloc(ggml_nbytes(cur));
+                #else
+                cur->data = (uint8_t*)malloc(ggml_nbytes(cur));
+                #endif
             }
             }
 
 
             load_data_for(cur);
             load_data_for(cur);