ggml-cuda.cu 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. #include <stdint.h>
  2. #include <stdio.h>
  3. #include <cuda_fp16.h>
  4. #include <atomic>
  5. #include "ggml-cuda.h"
  6. typedef uint16_t ggml_fp16_t;
  7. static_assert(sizeof(__half) == sizeof(ggml_fp16_t), "wrong fp16 size");
  8. #define QK4_0 32
  9. typedef struct {
  10. float d; // delta
  11. uint8_t qs[QK4_0 / 2]; // nibbles / quants
  12. } block_q4_0;
  13. static_assert(sizeof(block_q4_0) == sizeof(float) + QK4_0 / 2, "wrong q4_0 block size/padding");
  14. #define QK4_1 32
  15. typedef struct {
  16. float d; // delta
  17. float m; // min
  18. uint8_t qs[QK4_1 / 2]; // nibbles / quants
  19. } block_q4_1;
  20. static_assert(sizeof(block_q4_1) == sizeof(float) * 2 + QK4_1 / 2, "wrong q4_1 block size/padding");
  21. #define QK4_2 16
  22. typedef struct {
  23. __half d; // delta
  24. uint8_t qs[QK4_2 / 2]; // nibbles / quants
  25. } block_q4_2;
  26. static_assert(sizeof(block_q4_2) == sizeof(ggml_fp16_t) + QK4_2 / 2, "wrong q4_2 block size/padding");
  27. #define QK4_3 16
  28. typedef struct {
  29. __half d; // delta
  30. __half m; // min
  31. uint8_t qs[QK4_3 / 2]; // nibbles / quants
  32. } block_q4_3;
  33. static_assert(sizeof(block_q4_3) == 2 * sizeof(ggml_fp16_t) + QK4_3 / 2, "wrong q4_3 block size/padding");
  34. #define QK8_0 32
  35. typedef struct {
  36. float d; // delta
  37. int8_t qs[QK8_0]; // quants
  38. } block_q8_0;
  39. static_assert(sizeof(block_q8_0) == sizeof(float) + QK8_0, "wrong q8_0 block size/padding");
  40. static __global__ void dequantize_block_q4_0(const void * vx, float * y) {
  41. const block_q4_0 * x = (const block_q4_0 *) vx;
  42. const int i = blockIdx.x;
  43. const float d = x[i].d;
  44. const uint8_t * pp = x[i].qs;
  45. for (int l = 0; l < QK4_0; l += 2) {
  46. const uint8_t vi = pp[l/2];
  47. const int8_t vi0 = vi & 0xf;
  48. const int8_t vi1 = vi >> 4;
  49. const float v0 = (vi0 - 8)*d;
  50. const float v1 = (vi1 - 8)*d;
  51. y[i*QK4_0 + l + 0] = v0;
  52. y[i*QK4_0 + l + 1] = v1;
  53. }
  54. }
  55. static __global__ void dequantize_block_q4_1(const void * vx, float * y) {
  56. const block_q4_1 * x = (const block_q4_1 *) vx;
  57. const int i = blockIdx.x;
  58. const float d = x[i].d;
  59. const float m = x[i].m;
  60. const uint8_t * pp = x[i].qs;
  61. for (int l = 0; l < QK4_1; l += 2) {
  62. const uint8_t vi = pp[l/2];
  63. const int8_t vi0 = vi & 0xf;
  64. const int8_t vi1 = vi >> 4;
  65. const float v0 = vi0*d + m;
  66. const float v1 = vi1*d + m;
  67. y[i*QK4_1 + l + 0] = v0;
  68. y[i*QK4_1 + l + 1] = v1;
  69. }
  70. }
  71. static __global__ void dequantize_block_q4_2(const void * vx, float * y) {
  72. const block_q4_2 * x = (const block_q4_2 *) vx;
  73. const int i = blockIdx.x;
  74. const float d = x[i].d;
  75. const uint8_t * pp = x[i].qs;
  76. for (int l = 0; l < QK4_2; l += 2) {
  77. const uint8_t vi = pp[l/2];
  78. const int8_t vi0 = vi & 0xf;
  79. const int8_t vi1 = vi >> 4;
  80. const float v0 = (vi0 - 8)*d;
  81. const float v1 = (vi1 - 8)*d;
  82. y[i*QK4_2 + l + 0] = v0;
  83. y[i*QK4_2 + l + 1] = v1;
  84. }
  85. }
  86. static __global__ void dequantize_block_q4_3(const void * vx, float * y) {
  87. const block_q4_3 * x = (const block_q4_3 *) vx;
  88. const int i = blockIdx.x;
  89. const float d = x[i].d;
  90. const float m = x[i].m;
  91. const uint8_t * pp = x[i].qs;
  92. for (int l = 0; l < QK4_3; l += 2) {
  93. const uint8_t vi = pp[l/2];
  94. const int8_t vi0 = vi & 0xf;
  95. const int8_t vi1 = vi >> 4;
  96. const float v0 = vi0*d + m;
  97. const float v1 = vi1*d + m;
  98. y[i*QK4_3 + l + 0] = v0;
  99. y[i*QK4_3 + l + 1] = v1;
  100. }
  101. }
  102. static __global__ void dequantize_block_q8_0(const void * vx, float * y) {
  103. const block_q8_0 * x = (const block_q8_0 *) vx;
  104. const int i = blockIdx.x;
  105. const float d = x[i].d;
  106. const int8_t * pp = x[i].qs;
  107. for (int l = 0; l < QK8_0; l++) {
  108. const int8_t vi = pp[l];
  109. y[i*QK8_0 + l] = vi*d;
  110. }
  111. }
  112. void dequantize_row_q4_0_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
  113. const int nb = k / QK4_0;
  114. dequantize_block_q4_0<<<nb, 1, 0, stream>>>(vx, y);
  115. }
  116. void dequantize_row_q4_1_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
  117. const int nb = k / QK4_1;
  118. dequantize_block_q4_1<<<nb, 1, 0, stream>>>(vx, y);
  119. }
  120. void dequantize_row_q4_2_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
  121. const int nb = k / QK4_2;
  122. dequantize_block_q4_2<<<nb, 1, 0, stream>>>(vx, y);
  123. }
  124. void dequantize_row_q4_3_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
  125. const int nb = k / QK4_3;
  126. dequantize_block_q4_3<<<nb, 1, 0, stream>>>(vx, y);
  127. }
  128. void dequantize_row_q8_0_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
  129. const int nb = k / QK8_0;
  130. dequantize_block_q8_0<<<nb, 1, 0, stream>>>(vx, y);
  131. }
  132. // buffer pool for cuda
  133. #define MAX_CUDA_BUFFERS 16
  134. struct scoped_spin_lock {
  135. std::atomic_flag& lock;
  136. scoped_spin_lock(std::atomic_flag& lock) : lock(lock) {
  137. while (lock.test_and_set(std::memory_order_acquire)) {
  138. ; // spin
  139. }
  140. }
  141. ~scoped_spin_lock() {
  142. lock.clear(std::memory_order_release);
  143. }
  144. scoped_spin_lock(const scoped_spin_lock&) = delete;
  145. scoped_spin_lock& operator=(const scoped_spin_lock&) = delete;
  146. };
  147. struct cuda_buffer {
  148. void * ptr = nullptr;
  149. size_t size = 0;
  150. };
  151. static cuda_buffer g_cuda_buffer_pool[MAX_CUDA_BUFFERS];
  152. static std::atomic_flag g_cuda_pool_lock = ATOMIC_FLAG_INIT;
  153. void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) {
  154. scoped_spin_lock lock(g_cuda_pool_lock);
  155. for (int i = 0; i < MAX_CUDA_BUFFERS; ++i) {
  156. cuda_buffer& b = g_cuda_buffer_pool[i];
  157. if (b.size >= size && b.ptr != nullptr) {
  158. void * ptr = b.ptr;
  159. *actual_size = b.size;
  160. b.ptr = nullptr;
  161. b.size = 0;
  162. return ptr;
  163. }
  164. }
  165. void * ptr;
  166. CUDA_CHECK(cudaMalloc((void **) &ptr, size));
  167. *actual_size = size;
  168. return ptr;
  169. }
  170. void ggml_cuda_pool_free(void * ptr, size_t size) {
  171. scoped_spin_lock lock(g_cuda_pool_lock);
  172. for (int i = 0; i < MAX_CUDA_BUFFERS; ++i) {
  173. cuda_buffer& b = g_cuda_buffer_pool[i];
  174. if (b.ptr == nullptr) {
  175. b.ptr = ptr;
  176. b.size = size;
  177. return;
  178. }
  179. }
  180. fprintf(stderr, "WARNING: cuda buffer pool full, increase MAX_CUDA_BUFFERS\n");
  181. CUDA_CHECK(cudaFree(ptr));
  182. }
  183. cublasHandle_t g_cublasH = NULL;
  184. cudaStream_t g_cudaStream = NULL;
  185. void ggml_init_cublas(void) {
  186. if (g_cublasH == NULL) {
  187. // create cublas handle, bind a stream
  188. CUBLAS_CHECK(cublasCreate(&g_cublasH));
  189. CUDA_CHECK(cudaStreamCreateWithFlags(&g_cudaStream, cudaStreamNonBlocking));
  190. CUBLAS_CHECK(cublasSetStream(g_cublasH, g_cudaStream));
  191. // configure logging to stdout
  192. // CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, NULL));
  193. }
  194. }