ggml-cuda.cu 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375
  1. #include <stdint.h>
  2. #include <stdio.h>
  3. #include <cuda_fp16.h>
  4. #include <atomic>
  5. #include "ggml-cuda.h"
  6. typedef uint16_t ggml_fp16_t;
  7. static_assert(sizeof(__half) == sizeof(ggml_fp16_t), "wrong fp16 size");
  8. #define QK4_0 32
  9. typedef struct {
  10. float d; // delta
  11. uint8_t qs[QK4_0 / 2]; // nibbles / quants
  12. } block_q4_0;
  13. static_assert(sizeof(block_q4_0) == sizeof(float) + QK4_0 / 2, "wrong q4_0 block size/padding");
  14. #define QK4_1 32
  15. typedef struct {
  16. float d; // delta
  17. float m; // min
  18. uint8_t qs[QK4_1 / 2]; // nibbles / quants
  19. } block_q4_1;
  20. static_assert(sizeof(block_q4_1) == sizeof(float) * 2 + QK4_1 / 2, "wrong q4_1 block size/padding");
  21. #define QK4_2 16
  22. typedef struct {
  23. __half d; // delta
  24. uint8_t qs[QK4_2 / 2]; // nibbles / quants
  25. } block_q4_2;
  26. static_assert(sizeof(block_q4_2) == sizeof(ggml_fp16_t) + QK4_2 / 2, "wrong q4_2 block size/padding");
  27. #define QK5_0 32
  28. typedef struct {
  29. __half d; // delta
  30. uint8_t qh[4]; // 5-th bit of quants
  31. uint8_t qs[QK5_0 / 2]; // nibbles / quants
  32. } block_q5_0;
  33. static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding");
  34. #define QK5_1 32
  35. typedef struct {
  36. __half d; // delta
  37. __half m; // min
  38. uint32_t qh; // 5-th bit of quants
  39. uint8_t qs[QK5_1 / 2]; // nibbles / quants
  40. } block_q5_1;
  41. static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding");
  42. #define QK8_0 32
  43. typedef struct {
  44. float d; // delta
  45. int8_t qs[QK8_0]; // quants
  46. } block_q8_0;
  47. static_assert(sizeof(block_q8_0) == sizeof(float) + QK8_0, "wrong q8_0 block size/padding");
  48. static __global__ void dequantize_block_q4_0(const void * vx, float * y) {
  49. const block_q4_0 * x = (const block_q4_0 *) vx;
  50. const int i = blockIdx.x;
  51. const float d = x[i].d;
  52. const uint8_t * pp = x[i].qs;
  53. for (int l = 0; l < QK4_0; l += 2) {
  54. const uint8_t vi = pp[l/2];
  55. const int8_t vi0 = vi & 0xf;
  56. const int8_t vi1 = vi >> 4;
  57. const float v0 = (vi0 - 8)*d;
  58. const float v1 = (vi1 - 8)*d;
  59. y[i*QK4_0 + l + 0] = v0;
  60. y[i*QK4_0 + l + 1] = v1;
  61. }
  62. }
  63. static __global__ void dequantize_block_q4_1(const void * vx, float * y) {
  64. const block_q4_1 * x = (const block_q4_1 *) vx;
  65. const int i = blockIdx.x;
  66. const float d = x[i].d;
  67. const float m = x[i].m;
  68. const uint8_t * pp = x[i].qs;
  69. for (int l = 0; l < QK4_1; l += 2) {
  70. const uint8_t vi = pp[l/2];
  71. const int8_t vi0 = vi & 0xf;
  72. const int8_t vi1 = vi >> 4;
  73. const float v0 = vi0*d + m;
  74. const float v1 = vi1*d + m;
  75. y[i*QK4_1 + l + 0] = v0;
  76. y[i*QK4_1 + l + 1] = v1;
  77. }
  78. }
  79. static __global__ void dequantize_block_q4_2(const void * vx, float * y) {
  80. const block_q4_2 * x = (const block_q4_2 *) vx;
  81. const int i = blockIdx.x;
  82. const float d = x[i].d;
  83. const uint8_t * pp = x[i].qs;
  84. for (int l = 0; l < QK4_2; l += 2) {
  85. const uint8_t vi = pp[l/2];
  86. const int8_t vi0 = vi & 0xf;
  87. const int8_t vi1 = vi >> 4;
  88. const float v0 = (vi0 - 8)*d;
  89. const float v1 = (vi1 - 8)*d;
  90. y[i*QK4_2 + l + 0] = v0;
  91. y[i*QK4_2 + l + 1] = v1;
  92. }
  93. }
  94. static __global__ void dequantize_block_q5_0(const void * vx, float * y) {
  95. const block_q5_0 * x = (const block_q5_0 *) vx;
  96. const int i = blockIdx.x;
  97. const float d = x[i].d;
  98. const uint8_t * pp = x[i].qs;
  99. uint32_t qh;
  100. memcpy(&qh, x[i].qh, sizeof(qh));
  101. for (int l = 0; l < QK5_0; l += 2) {
  102. const uint8_t vi = pp[l/2];
  103. const int8_t vh0 = ((qh & (1 << (l + 0))) >> (l + 0)) << 4;
  104. const int8_t vh1 = ((qh & (1 << (l + 1))) >> (l + 1)) << 4;
  105. const int8_t vi0 = ((vi & 0xf) | vh0);
  106. const int8_t vi1 = ((vi >> 4) | vh1);
  107. const float v0 = (vi0 - 16)*d;
  108. const float v1 = (vi1 - 16)*d;
  109. y[i*QK5_0 + l + 0] = v0;
  110. y[i*QK5_0 + l + 1] = v1;
  111. }
  112. }
  113. static __global__ void dequantize_block_q5_1(const void * vx, float * y) {
  114. const block_q5_1 * x = (const block_q5_1 *) vx;
  115. const int i = blockIdx.x;
  116. const float d = x[i].d;
  117. const float m = x[i].m;
  118. const uint8_t * pp = x[i].qs;
  119. const uint32_t qh = x[i].qh;
  120. for (int l = 0; l < QK5_1; l += 2) {
  121. const uint8_t vi = pp[l/2];
  122. const int8_t vh0 = ((qh & (1 << (l + 0))) >> (l + 0)) << 4;
  123. const int8_t vh1 = ((qh & (1 << (l + 1))) >> (l + 1)) << 4;
  124. const int8_t vi0 = (vi & 0xf) | vh0;
  125. const int8_t vi1 = (vi >> 4) | vh1;
  126. const float v0 = vi0*d + m;
  127. const float v1 = vi1*d + m;
  128. y[i*QK5_1 + l + 0] = v0;
  129. y[i*QK5_1 + l + 1] = v1;
  130. }
  131. }
  132. static __global__ void dequantize_block_q8_0(const void * vx, float * y) {
  133. const block_q8_0 * x = (const block_q8_0 *) vx;
  134. const int i = blockIdx.x;
  135. const float d = x[i].d;
  136. const int8_t * pp = x[i].qs;
  137. for (int l = 0; l < QK8_0; l++) {
  138. const int8_t vi = pp[l];
  139. y[i*QK8_0 + l] = vi*d;
  140. }
  141. }
  142. void dequantize_row_q4_0_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
  143. const int nb = k / QK4_0;
  144. dequantize_block_q4_0<<<nb, 1, 0, stream>>>(vx, y);
  145. }
  146. void dequantize_row_q4_1_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
  147. const int nb = k / QK4_1;
  148. dequantize_block_q4_1<<<nb, 1, 0, stream>>>(vx, y);
  149. }
  150. void dequantize_row_q4_2_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
  151. const int nb = k / QK4_2;
  152. dequantize_block_q4_2<<<nb, 1, 0, stream>>>(vx, y);
  153. }
  154. void dequantize_row_q5_0_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
  155. const int nb = k / QK5_0;
  156. dequantize_block_q5_0<<<nb, 1, 0, stream>>>(vx, y);
  157. }
  158. void dequantize_row_q5_1_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
  159. const int nb = k / QK5_1;
  160. dequantize_block_q5_1<<<nb, 1, 0, stream>>>(vx, y);
  161. }
  162. void dequantize_row_q8_0_cuda(const void * vx, float * y, int k, cudaStream_t stream) {
  163. const int nb = k / QK8_0;
  164. dequantize_block_q8_0<<<nb, 1, 0, stream>>>(vx, y);
  165. }
  166. dequantize_row_q_cuda_t ggml_get_dequantize_row_q_cuda(ggml_type type) {
  167. switch (type) {
  168. case GGML_TYPE_Q4_0:
  169. return dequantize_row_q4_0_cuda;
  170. case GGML_TYPE_Q4_1:
  171. return dequantize_row_q4_1_cuda;
  172. case GGML_TYPE_Q4_2:
  173. return dequantize_row_q4_2_cuda;
  174. case GGML_TYPE_Q5_0:
  175. return dequantize_row_q5_0_cuda;
  176. case GGML_TYPE_Q5_1:
  177. return dequantize_row_q5_1_cuda;
  178. case GGML_TYPE_Q8_0:
  179. return dequantize_row_q8_0_cuda;
  180. default:
  181. return nullptr;
  182. }
  183. }
  184. // buffer pool for cuda
  185. #define MAX_CUDA_BUFFERS 16
  186. struct scoped_spin_lock {
  187. std::atomic_flag& lock;
  188. scoped_spin_lock(std::atomic_flag& lock) : lock(lock) {
  189. while (lock.test_and_set(std::memory_order_acquire)) {
  190. ; // spin
  191. }
  192. }
  193. ~scoped_spin_lock() {
  194. lock.clear(std::memory_order_release);
  195. }
  196. scoped_spin_lock(const scoped_spin_lock&) = delete;
  197. scoped_spin_lock& operator=(const scoped_spin_lock&) = delete;
  198. };
  199. struct cuda_buffer {
  200. void * ptr = nullptr;
  201. size_t size = 0;
  202. };
  203. static cuda_buffer g_cuda_buffer_pool[MAX_CUDA_BUFFERS];
  204. static std::atomic_flag g_cuda_pool_lock = ATOMIC_FLAG_INIT;
  205. void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) {
  206. scoped_spin_lock lock(g_cuda_pool_lock);
  207. for (int i = 0; i < MAX_CUDA_BUFFERS; ++i) {
  208. cuda_buffer& b = g_cuda_buffer_pool[i];
  209. if (b.size >= size && b.ptr != nullptr) {
  210. void * ptr = b.ptr;
  211. *actual_size = b.size;
  212. b.ptr = nullptr;
  213. b.size = 0;
  214. return ptr;
  215. }
  216. }
  217. void * ptr;
  218. CUDA_CHECK(cudaMalloc((void **) &ptr, size));
  219. *actual_size = size;
  220. return ptr;
  221. }
  222. void ggml_cuda_pool_free(void * ptr, size_t size) {
  223. scoped_spin_lock lock(g_cuda_pool_lock);
  224. for (int i = 0; i < MAX_CUDA_BUFFERS; ++i) {
  225. cuda_buffer& b = g_cuda_buffer_pool[i];
  226. if (b.ptr == nullptr) {
  227. b.ptr = ptr;
  228. b.size = size;
  229. return;
  230. }
  231. }
  232. fprintf(stderr, "WARNING: cuda buffer pool full, increase MAX_CUDA_BUFFERS\n");
  233. CUDA_CHECK(cudaFree(ptr));
  234. }
  235. cublasHandle_t g_cublasH = nullptr;
  236. cudaStream_t g_cudaStream = nullptr;
  237. cudaStream_t g_cudaStream2 = nullptr;
  238. cudaEvent_t g_cudaEvent = nullptr;
  239. void ggml_init_cublas() {
  240. if (g_cublasH == nullptr) {
  241. // create cublas handle, bind a stream
  242. CUBLAS_CHECK(cublasCreate(&g_cublasH));
  243. CUDA_CHECK(cudaStreamCreateWithFlags(&g_cudaStream, cudaStreamNonBlocking));
  244. CUBLAS_CHECK(cublasSetStream(g_cublasH, g_cudaStream));
  245. // create additional stream and event for synchronization
  246. CUDA_CHECK(cudaStreamCreateWithFlags(&g_cudaStream2, cudaStreamNonBlocking));
  247. CUDA_CHECK(cudaEventCreateWithFlags(&g_cudaEvent, cudaEventDisableTiming));
  248. // configure logging to stdout
  249. // CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, NULL));
  250. }
  251. }
  252. cudaError_t ggml_cuda_h2d_tensor_2d(void * dst, const struct ggml_tensor * src, uint64_t i3, uint64_t i2, cudaStream_t stream) {
  253. const uint64_t ne0 = src->ne[0];
  254. const uint64_t ne1 = src->ne[1];
  255. const uint64_t nb0 = src->nb[0];
  256. const uint64_t nb1 = src->nb[1];
  257. const uint64_t nb2 = src->nb[2];
  258. const uint64_t nb3 = src->nb[3];
  259. const enum ggml_type type = src->type;
  260. const size_t ts = ggml_type_size(type);
  261. const size_t bs = ggml_blck_size(type);
  262. const void * x = (const void *) ((const char *) src->data + i2*nb2 + i3*nb3);
  263. if (nb0 == ts && nb1 == ts*ne0/bs) {
  264. return cudaMemcpyAsync(dst, x, ne1*nb1, cudaMemcpyHostToDevice, stream);
  265. } else if (nb0 == ts) {
  266. return cudaMemcpy2DAsync(dst, ts*ne0/bs, x, nb1, ts*ne0/bs, ne1, cudaMemcpyHostToDevice, stream);
  267. } else {
  268. for (uint64_t i1 = 0; i1 < ne1; i1++) {
  269. const void * rx = (const void *) ((const char *) x + i1*nb1);
  270. void * rd = (void *) ((char *) dst + i1*ts*ne0/bs);
  271. // pretend the row is a matrix with cols=1
  272. cudaError_t r = cudaMemcpy2DAsync(rd, ts/bs, rx, nb0, ts/bs, ne0, cudaMemcpyHostToDevice, stream);
  273. if (r != cudaSuccess) return r;
  274. }
  275. return cudaSuccess;
  276. }
  277. }
  278. void * ggml_cuda_host_malloc(size_t size) {
  279. if (getenv("GGML_CUDA_NO_PINNED") != nullptr) {
  280. return nullptr;
  281. }
  282. void * ptr = nullptr;
  283. cudaError_t err = cudaMallocHost((void **) &ptr, size);
  284. if (err != cudaSuccess) {
  285. fprintf(stderr, "WARNING: failed to allocate %.2f MB of pinned memory: %s\n",
  286. size/1024.0/1024.0, cudaGetErrorString(err));
  287. return nullptr;
  288. }
  289. return ptr;
  290. }
  291. void ggml_cuda_host_free(void * ptr) {
  292. CUDA_CHECK(cudaFreeHost(ptr));
  293. }