ggml-cuda.cu 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. #include <stdint.h>
  2. #include <stdio.h>
  3. #include <cuda_fp16.h>
  4. #include <atomic>
  5. #include "ggml-cuda.h"
  6. typedef uint16_t ggml_fp16_t;
  7. static_assert(sizeof(__half) == sizeof(ggml_fp16_t), "wrong fp16 size");
  8. #define QK4_0 32
  9. typedef struct {
  10. float d; // delta
  11. uint8_t qs[QK4_0 / 2]; // nibbles / quants
  12. } block_q4_0;
  13. static_assert(sizeof(block_q4_0) == sizeof(float) + QK4_0 / 2, "wrong q4_0 block size/padding");
  14. #define QK4_1 32
  15. typedef struct {
  16. float d; // delta
  17. float m; // min
  18. uint8_t qs[QK4_1 / 2]; // nibbles / quants
  19. } block_q4_1;
  20. static_assert(sizeof(block_q4_1) == sizeof(float) * 2 + QK4_1 / 2, "wrong q4_1 block size/padding");
  21. #define QK4_2 16
  22. typedef struct {
  23. __half d; // delta
  24. uint8_t qs[QK4_2 / 2]; // nibbles / quants
  25. } block_q4_2;
  26. static_assert(sizeof(block_q4_2) == sizeof(ggml_fp16_t) + QK4_2 / 2, "wrong q4_2 block size/padding");
  27. #define QK5_0 32
  28. typedef struct {
  29. __half d; // delta
  30. uint8_t qh[4]; // 5-th bit of quants
  31. uint8_t qs[QK5_0 / 2]; // nibbles / quants
  32. } block_q5_0;
  33. static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding");
  34. #define QK5_1 32
  35. typedef struct {
  36. __half d; // delta
  37. __half m; // min
  38. uint32_t qh; // 5-th bit of quants
  39. uint8_t qs[QK5_1 / 2]; // nibbles / quants
  40. } block_q5_1;
  41. static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding");
  42. #define QK8_0 32
  43. typedef struct {
  44. float d; // delta
  45. int8_t qs[QK8_0]; // quants
  46. } block_q8_0;
  47. static_assert(sizeof(block_q8_0) == sizeof(float) + QK8_0, "wrong q8_0 block size/padding");
  48. static __global__ void dequantize_block_q4_0(const void * vx, float * y) {
  49. const block_q4_0 * x = (const block_q4_0 *) vx;
  50. const int i = blockIdx.x;
  51. const float d = x[i].d;
  52. const uint8_t * pp = x[i].qs;
  53. for (int l = 0; l < QK4_0; l += 2) {
  54. const uint8_t vi = pp[l/2];
  55. const int8_t vi0 = vi & 0xf;
  56. const int8_t vi1 = vi >> 4;
  57. const float v0 = (vi0 - 8)*d;
  58. const float v1 = (vi1 - 8)*d;
  59. y[i*QK4_0 + l + 0] = v0;
  60. y[i*QK4_0 + l + 1] = v1;
  61. }
  62. }
  63. static __global__ void dequantize_block_q4_1(const void * vx, float * y) {
  64. const block_q4_1 * x = (const block_q4_1 *) vx;
  65. const int i = blockIdx.x;
  66. const float d = x[i].d;
  67. const float m = x[i].m;
  68. const uint8_t * pp = x[i].qs;
  69. for (int l = 0; l < QK4_1; l += 2) {
  70. const uint8_t vi = pp[l/2];
  71. const int8_t vi0 = vi & 0xf;
  72. const int8_t vi1 = vi >> 4;
  73. const float v0 = vi0*d + m;
  74. const float v1 = vi1*d + m;
  75. y[i*QK4_1 + l + 0] = v0;
  76. y[i*QK4_1 + l + 1] = v1;
  77. }
  78. }
  79. static __global__ void dequantize_block_q4_2(const void * vx, float * y) {
  80. const block_q4_2 * x = (const block_q4_2 *) vx;
  81. const int i = blockIdx.x;
  82. const float d = x[i].d;
  83. const uint8_t * pp = x[i].qs;
  84. for (int l = 0; l < QK4_2; l += 2) {
  85. const uint8_t vi = pp[l/2];
  86. const int8_t vi0 = vi & 0xf;
  87. const int8_t vi1 = vi >> 4;
  88. const float v0 = (vi0 - 8)*d;
  89. const float v1 = (vi1 - 8)*d;
  90. y[i*QK4_2 + l + 0] = v0;
  91. y[i*QK4_2 + l + 1] = v1;
  92. }
  93. }
  94. static __global__ void dequantize_block_q5_0(const void * vx, float * y) {
  95. const block_q5_0 * x = (const block_q5_0 *) vx;
  96. const int i = blockIdx.x;
  97. const float d = x[i].d;
  98. const uint8_t * pp = x[i].qs;
  99. uint32_t qh;
  100. memcpy(&qh, x[i].qh, sizeof(qh));
  101. for (int l = 0; l < QK5_0; l += 2) {
  102. const uint8_t vi = pp[l/2];
  103. const int8_t vh0 = ((qh & (1 << (l + 0))) >> (l + 0)) << 4;
  104. const int8_t vh1 = ((qh & (1 << (l + 1))) >> (l + 1)) << 4;
  105. const int8_t vi0 = ((vi & 0xf) | vh0);
  106. const int8_t vi1 = ((vi >> 4) | vh1);
  107. const float v0 = (vi0 - 16)*d;
  108. const float v1 = (vi1 - 16)*d;
  109. y[i*QK5_0 + l + 0] = v0;
  110. y[i*QK5_0 + l + 1] = v1;
  111. }
  112. }
  113. static __global__ void dequantize_block_q5_1(const void * vx, float * y) {
  114. const block_q5_1 * x = (const block_q5_1 *) vx;
  115. const int i = blockIdx.x;
  116. const float d = x[i].d;
  117. const float m = x[i].m;
  118. const uint8_t * pp = x[i].qs;
  119. const uint32_t qh = x[i].qh;
  120. for (int l = 0; l < QK5_1; l += 2) {
  121. const uint8_t vi = pp[l/2];
  122. const int8_t vh0 = ((qh & (1 << (l + 0))) >> (l + 0)) << 4;
  123. const int8_t vh1 = ((qh & (1 << (l + 1))) >> (l + 1)) << 4;
  124. const int8_t vi0 = (vi & 0xf) | vh0;
  125. const int8_t vi1 = (vi >> 4) | vh1;
  126. const float v0 = vi0*d + m;
  127. const float v1 = vi1*d + m;
  128. y[i*QK5_1 + l + 0] = v0;
  129. y[i*QK5_1 + l + 1] = v1;
  130. }
  131. }
  132. static __global__ void dequantize_block_q8_0(const void * vx, float * y) {
  133. const block_q8_0 * x = (const block_q8_0 *) vx;
  134. const int i = blockIdx.x;
  135. const float d = x[i].d;
  136. const int8_t * pp = x[i].qs;
  137. for (int l = 0; l < QK8_0; l++) {
  138. const int8_t vi = pp[l];
  139. y[i*QK8_0 + l] = vi*d;
  140. }
  141. }
  142. void dequantize_row_q4_0_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
  143. const int nb = k / QK4_0;
  144. dequantize_block_q4_0<<<nb, 1, 0, stream>>>(vx, y);
  145. }
  146. void dequantize_row_q4_1_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
  147. const int nb = k / QK4_1;
  148. dequantize_block_q4_1<<<nb, 1, 0, stream>>>(vx, y);
  149. }
  150. void dequantize_row_q4_2_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
  151. const int nb = k / QK4_2;
  152. dequantize_block_q4_2<<<nb, 1, 0, stream>>>(vx, y);
  153. }
  154. void dequantize_row_q5_0_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
  155. const int nb = k / QK5_0;
  156. dequantize_block_q5_0<<<nb, 1, 0, stream>>>(vx, y);
  157. }
  158. void dequantize_row_q5_1_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
  159. const int nb = k / QK5_1;
  160. dequantize_block_q5_1<<<nb, 1, 0, stream>>>(vx, y);
  161. }
  162. void dequantize_row_q8_0_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
  163. const int nb = k / QK8_0;
  164. dequantize_block_q8_0<<<nb, 1, 0, stream>>>(vx, y);
  165. }
  166. // buffer pool for cuda
  167. #define MAX_CUDA_BUFFERS 16
  168. struct scoped_spin_lock {
  169. std::atomic_flag& lock;
  170. scoped_spin_lock(std::atomic_flag& lock) : lock(lock) {
  171. while (lock.test_and_set(std::memory_order_acquire)) {
  172. ; // spin
  173. }
  174. }
  175. ~scoped_spin_lock() {
  176. lock.clear(std::memory_order_release);
  177. }
  178. scoped_spin_lock(const scoped_spin_lock&) = delete;
  179. scoped_spin_lock& operator=(const scoped_spin_lock&) = delete;
  180. };
  181. struct cuda_buffer {
  182. void * ptr = nullptr;
  183. size_t size = 0;
  184. };
  185. static cuda_buffer g_cuda_buffer_pool[MAX_CUDA_BUFFERS];
  186. static std::atomic_flag g_cuda_pool_lock = ATOMIC_FLAG_INIT;
  187. void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) {
  188. scoped_spin_lock lock(g_cuda_pool_lock);
  189. for (int i = 0; i < MAX_CUDA_BUFFERS; ++i) {
  190. cuda_buffer& b = g_cuda_buffer_pool[i];
  191. if (b.size >= size && b.ptr != nullptr) {
  192. void * ptr = b.ptr;
  193. *actual_size = b.size;
  194. b.ptr = nullptr;
  195. b.size = 0;
  196. return ptr;
  197. }
  198. }
  199. void * ptr;
  200. CUDA_CHECK(cudaMalloc((void **) &ptr, size));
  201. *actual_size = size;
  202. return ptr;
  203. }
  204. void ggml_cuda_pool_free(void * ptr, size_t size) {
  205. scoped_spin_lock lock(g_cuda_pool_lock);
  206. for (int i = 0; i < MAX_CUDA_BUFFERS; ++i) {
  207. cuda_buffer& b = g_cuda_buffer_pool[i];
  208. if (b.ptr == nullptr) {
  209. b.ptr = ptr;
  210. b.size = size;
  211. return;
  212. }
  213. }
  214. fprintf(stderr, "WARNING: cuda buffer pool full, increase MAX_CUDA_BUFFERS\n");
  215. CUDA_CHECK(cudaFree(ptr));
  216. }
  217. cublasHandle_t g_cublasH = NULL;
  218. cudaStream_t g_cudaStream = NULL;
  219. void ggml_init_cublas(void) {
  220. if (g_cublasH == NULL) {
  221. // create cublas handle, bind a stream
  222. CUBLAS_CHECK(cublasCreate(&g_cublasH));
  223. CUDA_CHECK(cudaStreamCreateWithFlags(&g_cudaStream, cudaStreamNonBlocking));
  224. CUBLAS_CHECK(cublasSetStream(g_cublasH, g_cudaStream));
  225. // configure logging to stdout
  226. // CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, NULL));
  227. }
  228. }