ggml-cuda.cu 71 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902
  1. #include <cstddef>
  2. #include <cstdint>
  3. #include <stdint.h>
  4. #include <stdio.h>
  5. #include <atomic>
  6. #include <assert.h>
  7. #include <cuda_runtime.h>
  8. #include <cublas_v2.h>
  9. #include <cuda_fp16.h>
  10. #include "ggml-cuda.h"
  11. #include "ggml.h"
  12. static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size");
  13. #define CUDA_CHECK(err) \
  14. do { \
  15. cudaError_t err_ = (err); \
  16. if (err_ != cudaSuccess) { \
  17. fprintf(stderr, "CUDA error %d at %s:%d: %s\n", err_, __FILE__, __LINE__, \
  18. cudaGetErrorString(err_)); \
  19. exit(1); \
  20. } \
  21. } while (0)
  22. #if CUDART_VERSION >= 12
  23. #define CUBLAS_CHECK(err) \
  24. do { \
  25. cublasStatus_t err_ = (err); \
  26. if (err_ != CUBLAS_STATUS_SUCCESS) { \
  27. fprintf(stderr, "\ncuBLAS error %d at %s:%d: %s\n", \
  28. err_, __FILE__, __LINE__, cublasGetStatusString(err_)); \
  29. exit(1); \
  30. } \
  31. } while (0)
  32. #else
  33. #define CUBLAS_CHECK(err) \
  34. do { \
  35. cublasStatus_t err_ = (err); \
  36. if (err_ != CUBLAS_STATUS_SUCCESS) { \
  37. fprintf(stderr, "\ncuBLAS error %d at %s:%d\n", err_, __FILE__, __LINE__); \
  38. exit(1); \
  39. } \
  40. } while (0)
  41. #endif // CUDART_VERSION >= 11
  42. typedef void (*dequantize_kernel_t)(const void * vx, const int ib, const int iqs, float & v0, float & v1);
  43. typedef void (*to_fp32_cuda_t)(const void * x, float * y, int k, cudaStream_t stream);
  44. typedef void (*dot_kernel_k_t)(const void * vx, const int ib, const int iqs, const float * y, float & v);
  45. typedef void (*ggml_cuda_func_t)(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst);
  46. typedef void (*ggml_cuda_op_t)(
  47. const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i, float * src0_ddf_i,
  48. float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
  49. cudaStream_t & cudaStream_main);
  50. // QK = number of values after dequantization
  51. // QR = QK / number of values before dequantization
  52. #define QK4_0 32
  53. #define QR4_0 2
  54. typedef struct {
  55. half d; // delta
  56. uint8_t qs[QK4_0 / 2]; // nibbles / quants
  57. } block_q4_0;
  58. static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding");
  59. #define QK4_1 32
  60. #define QR4_1 2
  61. typedef struct {
  62. half d; // delta
  63. half m; // min
  64. uint8_t qs[QK4_1 / 2]; // nibbles / quants
  65. } block_q4_1;
  66. static_assert(sizeof(block_q4_1) == sizeof(ggml_fp16_t) * 2 + QK4_1 / 2, "wrong q4_1 block size/padding");
  67. #define QK5_0 32
  68. #define QR5_0 2
  69. typedef struct {
  70. half d; // delta
  71. uint8_t qh[4]; // 5-th bit of quants
  72. uint8_t qs[QK5_0 / 2]; // nibbles / quants
  73. } block_q5_0;
  74. static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding");
  75. #define QK5_1 32
  76. #define QR5_1 2
  77. typedef struct {
  78. half d; // delta
  79. half m; // min
  80. uint8_t qh[4]; // 5-th bit of quants
  81. uint8_t qs[QK5_1 / 2]; // nibbles / quants
  82. } block_q5_1;
  83. static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding");
  84. #define QK8_0 32
  85. #define QR8_0 1
  86. typedef struct {
  87. half d; // delta
  88. int8_t qs[QK8_0]; // quants
  89. } block_q8_0;
  90. static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
  91. //================================= k-quants
  92. #define QK_K 256
  93. typedef struct {
  94. uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
  95. uint8_t qs[QK_K/4]; // quants
  96. half d; // super-block scale for quantized scales
  97. half dmin; // super-block scale for quantized mins
  98. } block_q2_K;
  99. static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_fp16_t) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding");
  100. typedef struct {
  101. uint8_t hmask[QK_K/8];
  102. uint8_t qs[QK_K/4]; // nibbles / quants
  103. uint8_t scales[3*QK_K/64];
  104. half d;
  105. } block_q3_K;
  106. static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + 11 * QK_K / 64, "wrong q3_K block size/padding");
  107. typedef struct {
  108. half d; // super-block scale for quantized scales
  109. half dmin; // super-block scale for quantized mins
  110. uint8_t scales[3*QK_K/64]; // scales, quantized with 6 bits
  111. uint8_t qs[QK_K/2]; // 4--bit quants
  112. } block_q4_K;
  113. static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + 3*QK_K/64 + QK_K/2, "wrong q4_K block size/padding");
  114. typedef struct {
  115. half d; // super-block scale for quantized scales
  116. half dmin; // super-block scale for quantized mins
  117. uint8_t scales[3*QK_K/64]; // scales, quantized with 6 bits
  118. uint8_t qh[QK_K/8]; // quants, high bit
  119. uint8_t qs[QK_K/2]; // quants, low 4 bits
  120. } block_q5_K;
  121. static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_fp16_t) + 3*QK_K/64 + QK_K/2 + QK_K/8, "wrong q5_K block size/padding");
  122. typedef struct {
  123. uint8_t ql[QK_K/2]; // quants, lower 4 bits
  124. uint8_t qh[QK_K/4]; // quants, upper 2 bits
  125. int8_t scales[QK_K/16]; // scales
  126. half d; // delta
  127. } block_q6_K;
  128. static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + 13*QK_K/16, "wrong q6_K block size/padding");
  129. #define WARP_SIZE 32
  130. #define CUDA_ADD_BLOCK_SIZE 256
  131. #define CUDA_MUL_BLOCK_SIZE 256
  132. #define CUDA_SILU_BLOCK_SIZE 256
  133. #define CUDA_ROPE_BLOCK_SIZE 256
  134. #define CUDA_DEQUANTIZE_BLOCK_SIZE 256
  135. // dmmv = dequantize_mul_mat_vec
  136. #ifndef GGML_CUDA_DMMV_X
  137. #define GGML_CUDA_DMMV_X 32
  138. #endif
  139. #ifndef GGML_CUDA_DMMV_Y
  140. #define GGML_CUDA_DMMV_Y 1
  141. #endif
  142. static __global__ void add_f32(const float * x, const float * y, float * dst, const int k) {
  143. const int i = blockDim.x*blockIdx.x + threadIdx.x;
  144. if (i >= k) {
  145. return;
  146. }
  147. dst[i] = x[i] + y[i];
  148. }
  149. static __global__ void mul_f32(const float * x, const float * y, float * dst, const int kx, const int ky) {
  150. const int i = blockDim.x*blockIdx.x + threadIdx.x;
  151. if (i >= kx) {
  152. return;
  153. }
  154. dst[i] = x[i] * y[i%ky];
  155. }
  156. static __global__ void silu_f32(const float * x, float * dst, const int k) {
  157. const int i = blockDim.x*blockIdx.x + threadIdx.x;
  158. if (i >= k) {
  159. return;
  160. }
  161. dst[i] = x[i] / (1.0f + expf(-x[i]));
  162. }
  163. static __global__ void rms_norm_f32(const float * x, float * dst, const int ncols) {
  164. const int row = blockIdx.x*blockDim.y + threadIdx.y;
  165. const int tid = threadIdx.x;
  166. const float eps = 1e-6;
  167. float tmp = 0.0f; // partial sum for thread in warp
  168. for (int i = 0; i < ncols; i += WARP_SIZE) {
  169. const int col = i + tid;
  170. const float xi = x[row*ncols + col];
  171. tmp += xi * xi;
  172. }
  173. // sum up partial sums
  174. __syncthreads();
  175. #pragma unroll
  176. for (int mask = 16; mask > 0; mask >>= 1) {
  177. tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
  178. }
  179. const float mean = tmp / ncols;
  180. const float scale = 1.0f / sqrtf(mean + eps);
  181. for (int i = 0; i < ncols; i += WARP_SIZE) {
  182. const int col = i + tid;
  183. dst[row*ncols + col] = scale * x[row*ncols + col];
  184. }
  185. }
  186. static __device__ void dequantize_q4_0(const void * vx, const int ib, const int iqs, float & v0, float & v1){
  187. const block_q4_0 * x = (const block_q4_0 *) vx;
  188. const float d = x[ib].d;
  189. const uint8_t vui = x[ib].qs[iqs];
  190. const int8_t vi0 = vui & 0xF;
  191. const int8_t vi1 = vui >> 4;
  192. v0 = (vi0 - 8)*d;
  193. v1 = (vi1 - 8)*d;
  194. }
  195. static __device__ void dequantize_q4_1(const void * vx, const int ib, const int iqs, float & v0, float & v1){
  196. const block_q4_1 * x = (const block_q4_1 *) vx;
  197. const float d = x[ib].d;
  198. const float m = x[ib].m;
  199. const uint8_t vui = x[ib].qs[iqs];
  200. const int8_t vi0 = vui & 0xF;
  201. const int8_t vi1 = vui >> 4;
  202. v0 = vi0*d + m;
  203. v1 = vi1*d + m;
  204. }
  205. static __device__ void dequantize_q5_0(const void * vx, const int ib, const int iqs, float & v0, float & v1){
  206. const block_q5_0 * x = (const block_q5_0 *) vx;
  207. const float d = x[ib].d;
  208. uint32_t qh;
  209. memcpy(&qh, x[ib].qh, sizeof(qh));
  210. const uint8_t xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
  211. const uint8_t xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
  212. const int32_t x0 = ((x[ib].qs[iqs] & 0xf) | xh_0) - 16;
  213. const int32_t x1 = ((x[ib].qs[iqs] >> 4) | xh_1) - 16;
  214. v0 = x0*d;
  215. v1 = x1*d;
  216. }
  217. static __device__ void dequantize_q5_1(const void * vx, const int ib, const int iqs, float & v0, float & v1){
  218. const block_q5_1 * x = (const block_q5_1 *) vx;
  219. const float d = x[ib].d;
  220. const float m = x[ib].m;
  221. uint32_t qh;
  222. memcpy(&qh, x[ib].qh, sizeof(qh));
  223. const uint8_t xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
  224. const uint8_t xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
  225. const int32_t x0 = ((x[ib].qs[iqs] & 0xf) | xh_0);
  226. const int32_t x1 = ((x[ib].qs[iqs] >> 4) | xh_1);
  227. v0 = x0*d + m;
  228. v1 = x1*d + m;
  229. }
  230. static __device__ void dequantize_q8_0(const void * vx, const int ib, const int iqs, float & v0, float & v1){
  231. const block_q8_0 * x = (const block_q8_0 *) vx;
  232. const float d = x[ib].d;
  233. const int8_t vi0 = x[ib].qs[iqs + 0];
  234. const int8_t vi1 = x[ib].qs[iqs + 1];
  235. v0 = vi0*d;
  236. v1 = vi1*d;
  237. }
  238. //================================== k-quants
  239. static __global__ void dequantize_block_q2_K(const void * vx, float * yy) {
  240. const int i = blockIdx.x;
  241. const int tid = threadIdx.x;
  242. const int n = tid/32;
  243. const int l = tid - 32*n;
  244. const int is = 8*n + l/16;
  245. const block_q2_K * x = (const block_q2_K *) vx;
  246. const uint8_t q = x[i].qs[32*n + l];
  247. float * y = yy + i*QK_K + 128*n;
  248. float dall = x[i].d;
  249. float dmin = x[i].dmin;
  250. y[l+ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4);
  251. y[l+32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 2) & 3) - dmin * (x[i].scales[is+2] >> 4);
  252. y[l+64] = dall * (x[i].scales[is+4] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+4] >> 4);
  253. y[l+96] = dall * (x[i].scales[is+6] & 0xF) * ((q >> 6) & 3) - dmin * (x[i].scales[is+6] >> 4);
  254. }
  255. static __device__ void vec_dot_q2_K(const void * vx, const int ib, const int iqs, const float * yy, float & result) {
  256. const block_q2_K * x = (const block_q2_K *) vx;
  257. // if n is 0, we want to do the lower 128, else the upper 128,
  258. // covering y[l+0], y[l+32], y[l+64], y[l+96] and
  259. // y[l+16], y[l+48], y[l+80], y[l+112]
  260. int n = iqs/128; // 0 or 1
  261. int r = iqs - 128*n; // 0...120 in steps of 8
  262. int l = r/8; // 0...15 in steps of 1
  263. const float * y = yy + 128*n + l;
  264. const uint8_t * q = x[ib].qs + 32*n + l;
  265. const uint8_t * s = x[ib].scales + 8*n;
  266. const float dall = x[ib].d;
  267. const float dmin = x[ib].dmin;
  268. float sum = y[ 0] * (dall * ((s[0] & 0xF) * ((q[ 0] >> 0) & 3)) - dmin * (s[0] >> 4))
  269. + y[ 32] * (dall * ((s[2] & 0xF) * ((q[ 0] >> 2) & 3)) - dmin * (s[2] >> 4))
  270. + y[ 64] * (dall * ((s[4] & 0xF) * ((q[ 0] >> 4) & 3)) - dmin * (s[4] >> 4))
  271. + y[ 96] * (dall * ((s[6] & 0xF) * ((q[ 0] >> 6) & 3)) - dmin * (s[6] >> 4))
  272. + y[ 16] * (dall * ((s[1] & 0xF) * ((q[16] >> 0) & 3)) - dmin * (s[1] >> 4))
  273. + y[ 48] * (dall * ((s[3] & 0xF) * ((q[16] >> 2) & 3)) - dmin * (s[3] >> 4))
  274. + y[ 80] * (dall * ((s[5] & 0xF) * ((q[16] >> 4) & 3)) - dmin * (s[5] >> 4))
  275. + y[112] * (dall * ((s[7] & 0xF) * ((q[16] >> 6) & 3)) - dmin * (s[7] >> 4));
  276. result = sum;
  277. }
  278. static __global__ void dequantize_block_q3_K(const void * vx, float * yy) {
  279. int r = threadIdx.x/4;
  280. int i = blockIdx.x;
  281. int tid = r/2;
  282. int is0 = r%2;
  283. int l0 = 16*is0 + 4*(threadIdx.x%4);
  284. int n = tid / 4;
  285. int j = tid - 4*n;
  286. const block_q3_K * x = (const block_q3_K *) vx;
  287. uint8_t m = 1 << (4*n + j);
  288. int is = 8*n + 2*j + is0;
  289. int shift = 2*j;
  290. int8_t us = is < 4 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+8] >> 0) & 3) << 4) :
  291. is < 8 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+4] >> 2) & 3) << 4) :
  292. is < 12 ? (x[i].scales[is-8] >> 4) | (((x[i].scales[is+0] >> 4) & 3) << 4) :
  293. (x[i].scales[is-8] >> 4) | (((x[i].scales[is-4] >> 6) & 3) << 4);
  294. float d_all = x[i].d;
  295. float dl = d_all * (us - 32);
  296. float * y = yy + i*QK_K + 128*n + 32*j;
  297. const uint8_t * q = x[i].qs + 32*n;
  298. const uint8_t * hm = x[i].hmask;
  299. for (int l = l0; l < l0+4; ++l) y[l] = dl * ((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4));
  300. }
  301. static __device__ void vec_dot_q3_K(const void * vx, const int ib, const int iqs, const float * yy, float & result) {
  302. const block_q3_K * x = (const block_q3_K *) vx;
  303. const uint32_t kmask1 = 0x03030303;
  304. const uint32_t kmask2 = 0x0f0f0f0f;
  305. uint32_t aux[3];
  306. uint32_t utmp[4];
  307. // if n is 0, we want to do the lower 128, else the upper 128,
  308. // covering y[l+0], y[l+32], y[l+64], y[l+96] and
  309. // y[l+16], y[l+48], y[l+80], y[l+112]
  310. int n = iqs/128; // 0 or 1
  311. int r = iqs - 128*n; // 0...120 in steps of 8
  312. int l = r/8; // 0...15 in steps of 1
  313. const float * y = yy + 128*n + l;
  314. const uint8_t * q = x[ib].qs + 32*n + l;
  315. const uint8_t * hm = x[ib].hmask + l;
  316. const int8_t * s = (const int8_t *)utmp + 8*n;
  317. memcpy(aux, x[ib].scales, 12);
  318. utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
  319. utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
  320. utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
  321. utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
  322. const float dall = x[ib].d;
  323. const uint8_t m = 1 << (4*n);
  324. float sum = y[ 0] * (s[0] - 32) * (((q[ 0] >> 0) & 3) - (hm[ 0] & (m << 0) ? 0 : 4))
  325. + y[ 32] * (s[2] - 32) * (((q[ 0] >> 2) & 3) - (hm[ 0] & (m << 1) ? 0 : 4))
  326. + y[ 64] * (s[4] - 32) * (((q[ 0] >> 4) & 3) - (hm[ 0] & (m << 2) ? 0 : 4))
  327. + y[ 96] * (s[6] - 32) * (((q[ 0] >> 6) & 3) - (hm[ 0] & (m << 3) ? 0 : 4))
  328. + y[ 16] * (s[1] - 32) * (((q[16] >> 0) & 3) - (hm[16] & (m << 0) ? 0 : 4))
  329. + y[ 48] * (s[3] - 32) * (((q[16] >> 2) & 3) - (hm[16] & (m << 1) ? 0 : 4))
  330. + y[ 80] * (s[5] - 32) * (((q[16] >> 4) & 3) - (hm[16] & (m << 2) ? 0 : 4))
  331. + y[112] * (s[7] - 32) * (((q[16] >> 6) & 3) - (hm[16] & (m << 3) ? 0 : 4));
  332. result = sum * dall;
  333. }
  334. static inline __device__ void get_scale_min_k4(int j, const uint8_t * q, uint8_t & d, uint8_t & m) {
  335. if (j < 4) {
  336. d = q[j] & 63; m = q[j + 4] & 63;
  337. } else {
  338. d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
  339. m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
  340. }
  341. }
  342. static __global__ void dequantize_block_q4_K(const void * vx, float * yy) {
  343. const block_q4_K * x = (const block_q4_K *) vx;
  344. const int i = blockIdx.x;
  345. //// assume 64 threads - this is very slightly better than the one below
  346. //const int tid = threadIdx.x;
  347. //const int il = tid/16;
  348. //const int ir = tid%16;
  349. //const int is = 2*il;
  350. //const int n = 2;
  351. // assume 32 threads
  352. const int tid = threadIdx.x;
  353. const int il = tid/8;
  354. const int ir = tid%8;
  355. const int is = 2*il;
  356. const int n = 4;
  357. float * y = yy + i*QK_K + 64*il + n*ir;
  358. const float dall = x[i].d;
  359. const float dmin = x[i].dmin;
  360. const uint8_t * q = x[i].qs + 32*il + n*ir;
  361. uint8_t sc, m;
  362. get_scale_min_k4(is + 0, x[i].scales, sc, m);
  363. const float d1 = dall * sc; const float m1 = dmin * m;
  364. get_scale_min_k4(is + 1, x[i].scales, sc, m);
  365. const float d2 = dall * sc; const float m2 = dmin * m;
  366. for (int l = 0; l < n; ++l) {
  367. y[l + 0] = d1 * (q[l] & 0xF) - m1;
  368. y[l +32] = d2 * (q[l] >> 4) - m2;
  369. }
  370. }
  371. static __device__ void vec_dot_q4_K(const void * vx, const int ib, const int iqs, const float * yy, float & result) {
  372. const block_q4_K * x = (const block_q4_K *) vx;
  373. // iqs is in 0...248 in steps of 8 =>
  374. const int j = iqs / 64; // j is in 0...3
  375. const int ir = (iqs - 64*j)/2; // ir is in 0...28 in steps of 4
  376. const int is = 2*j; // is is in 0...6 in steps of 2
  377. const float * y = yy + 64*j + ir;
  378. const uint8_t * q = x[ib].qs + 32*j + ir;
  379. const float dall = x[ib].d;
  380. const float dmin = x[ib].dmin;
  381. uint8_t sc, m;
  382. get_scale_min_k4(is + 0, x[ib].scales, sc, m);
  383. const float d1 = dall * sc;
  384. const float m1 = dmin * m;
  385. get_scale_min_k4(is + 1, x[ib].scales, sc, m);
  386. const float d2 = dall * sc;
  387. const float m2 = dmin * m;
  388. float sum = 0;
  389. for (int k = 0; k < 4; ++k) {
  390. sum += y[k + 0] * (d1 * (q[k] & 0xF) - m1);
  391. sum += y[k + 32] * (d2 * (q[k] >> 4) - m2);
  392. }
  393. result = sum;
  394. }
  395. static __global__ void dequantize_block_q5_K(const void * vx, float * yy) {
  396. const block_q5_K * x = (const block_q5_K *) vx;
  397. const int i = blockIdx.x;
  398. // assume 64 threads - this is very slightly better than the one below
  399. const int tid = threadIdx.x;
  400. const int il = tid/16; // il is in 0...3
  401. const int ir = tid%16; // ir is in 0...15
  402. const int is = 2*il; // is is in 0...6
  403. float * y = yy + i*QK_K + 64*il + 2*ir;
  404. const float dall = x[i].d;
  405. const float dmin = x[i].dmin;
  406. const uint8_t * ql = x[i].qs + 32*il + 2*ir;
  407. const uint8_t * qh = x[i].qh + 2*ir;
  408. uint8_t sc, m;
  409. get_scale_min_k4(is + 0, x[i].scales, sc, m);
  410. const float d1 = dall * sc; const float m1 = dmin * m;
  411. get_scale_min_k4(is + 1, x[i].scales, sc, m);
  412. const float d2 = dall * sc; const float m2 = dmin * m;
  413. uint8_t hm = 1 << (2*il);
  414. y[ 0] = d1 * ((ql[ 0] & 0xF) + (qh[ 0] & hm ? 16 : 0)) - m1;
  415. y[ 1] = d1 * ((ql[ 1] & 0xF) + (qh[ 1] & hm ? 16 : 0)) - m1;
  416. hm <<= 1;
  417. y[32] = d2 * ((ql[ 0] >> 4) + (qh[ 0] & hm ? 16 : 0)) - m2;
  418. y[33] = d2 * ((ql[ 1] >> 4) + (qh[ 1] & hm ? 16 : 0)) - m2;
  419. }
  420. static __device__ void vec_dot_q5_K(const void * vx, const int ib, const int iqs, const float * yy, float & result) {
  421. const block_q5_K * x = (const block_q5_K *) vx;
  422. // iqs is in 0...248 in steps of 8 =>
  423. const int j = iqs / 64; // j is in 0...3
  424. const int ir = (iqs - 64*j)/2; // ir is in 0...28 in steps of 4
  425. const int is = 2*j; // is is in 0...6 in steps of 2
  426. const float * y = yy + 64*j + ir;
  427. const uint8_t * ql = x[ib].qs + 32*j + ir;
  428. const uint8_t * qh = x[ib].qh + ir;
  429. const float dall = x[ib].d;
  430. const float dmin = x[ib].dmin;
  431. uint8_t sc, m;
  432. get_scale_min_k4(is + 0, x[ib].scales, sc, m);
  433. const float d1 = dall * sc;
  434. const float m1 = dmin * m;
  435. get_scale_min_k4(is + 1, x[ib].scales, sc, m);
  436. const float d2 = dall * sc;
  437. const float m2 = dmin * m;
  438. uint8_t hm = 1 << is;
  439. float sum = 0;
  440. for (int k = 0; k < 4; ++k) {
  441. sum += y[k + 0] * (d1 * ((ql[k] & 0xF) + (qh[k] & hm ? 16 : 0)) - m1);
  442. }
  443. hm <<= 1;
  444. for (int k = 0; k < 4; ++k) {
  445. sum += y[k + 32] * (d2 * ((ql[k] >> 4) + (qh[k] & hm ? 16 : 0)) - m2);
  446. }
  447. result = sum;
  448. }
  449. static __global__ void dequantize_block_q6_K(const void * vx, float * yy) {
  450. const block_q6_K * x = (const block_q6_K *) vx;
  451. const int i = blockIdx.x;
  452. // assume 64 threads - this is very slightly better than the one below
  453. const int tid = threadIdx.x;
  454. const int ip = tid/32; // ip is 0 or 1
  455. const int il = tid - 32*ip; // 0...32
  456. const int is = 8*ip + il/16;
  457. float * y = yy + i*QK_K + 128*ip + il;
  458. const float d = x[i].d;
  459. const uint8_t * ql = x[i].ql + 64*ip + il;
  460. const uint8_t qh = x[i].qh[32*ip + il];
  461. const int8_t * sc = x[i].scales + is;
  462. y[ 0] = d * sc[0] * ((int8_t)((ql[ 0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32);
  463. y[32] = d * sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32);
  464. y[64] = d * sc[4] * ((int8_t)((ql[ 0] >> 4) | (((qh >> 4) & 3) << 4)) - 32);
  465. y[96] = d * sc[6] * ((int8_t)((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32);
  466. }
  467. static __device__ void vec_dot_q6_K(const void * vx, const int ib, const int iqs, const float * yy, float & result) {
  468. const block_q6_K * x = (const block_q6_K *) vx;
  469. const int ip = iqs / 128; // 0 or 1
  470. const int il = (iqs - 128*ip)/8; // 0...15
  471. const int is = 8*ip;
  472. const float * y = yy + 128*ip + il;
  473. const float d = x[ib].d;
  474. const uint8_t * ql = x[ib].ql + 64*ip + il;
  475. const uint8_t * qh = x[ib].qh + 32*ip + il;
  476. const int8_t * sc = x[ib].scales + is;
  477. result = y[ 0] * d * sc[0] * ((int8_t)((ql[ 0] & 0xF) | (((qh[ 0] >> 0) & 3) << 4)) - 32)
  478. + y[ 32] * d * sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh[ 0] >> 2) & 3) << 4)) - 32)
  479. + y[ 64] * d * sc[4] * ((int8_t)((ql[ 0] >> 4) | (((qh[ 0] >> 4) & 3) << 4)) - 32)
  480. + y[ 96] * d * sc[6] * ((int8_t)((ql[32] >> 4) | (((qh[ 0] >> 6) & 3) << 4)) - 32)
  481. + y[ 16] * d * sc[1] * ((int8_t)((ql[16] & 0xF) | (((qh[16] >> 0) & 3) << 4)) - 32)
  482. + y[ 48] * d * sc[3] * ((int8_t)((ql[48] & 0xF) | (((qh[16] >> 2) & 3) << 4)) - 32)
  483. + y[ 80] * d * sc[5] * ((int8_t)((ql[16] >> 4) | (((qh[16] >> 4) & 3) << 4)) - 32)
  484. + y[112] * d * sc[7] * ((int8_t)((ql[48] >> 4) | (((qh[16] >> 6) & 3) << 4)) - 32);
  485. }
  486. static __device__ void convert_f16(const void * vx, const int ib, const int iqs, float & v0, float & v1){
  487. const half * x = (const half *) vx;
  488. v0 = __half2float(x[ib + iqs + 0]);
  489. v1 = __half2float(x[ib + iqs + 1]);
  490. }
  491. template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
  492. static __global__ void dequantize_block(const void * vx, float * y, const int k) {
  493. const int i = blockDim.x*blockIdx.x + 2*threadIdx.x;
  494. if (i >= k) {
  495. return;
  496. }
  497. const int ib = i/qk; // block index
  498. const int iqs = (i%qk)/qr; // quant index
  499. const int iybs = i - i%qk; // y block start index
  500. const int y_offset = qr == 1 ? 1 : qk/2;
  501. // dequantize
  502. float & v0 = y[iybs + iqs + 0];
  503. float & v1 = y[iybs + iqs + y_offset];
  504. dequantize_kernel(vx, ib, iqs, v0, v1);
  505. }
  506. template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
  507. static __global__ void dequantize_mul_mat_vec(const void * vx, const float * y, float * dst, const int ncols) {
  508. // qk = quantized weights per x block
  509. // qr = number of quantized weights per data value in x block
  510. const int row = blockIdx.x*blockDim.y + threadIdx.y;
  511. const int tid = threadIdx.x;
  512. const int iter_stride = 2*GGML_CUDA_DMMV_X;
  513. const int vals_per_iter = iter_stride / WARP_SIZE; // num quantized vals per thread and i iter
  514. const int y_offset = qr == 1 ? 1 : qk/2;
  515. float tmp = 0.0f; // partial sum for thread in warp
  516. for (int i = 0; i < ncols; i += iter_stride) {
  517. const int col = i + vals_per_iter*tid;
  518. const int ib = (row*ncols + col)/qk; // x block index
  519. const int iqs = (col%qk)/qr; // x quant index
  520. const int iybs = col - col%qk; // y block start index
  521. // processing >2 values per i iter is faster for fast GPUs
  522. #pragma unroll
  523. for (int j = 0; j < vals_per_iter; j += 2) {
  524. // process 2 vals per j iter
  525. // dequantize
  526. float v0, v1;
  527. dequantize_kernel(vx, ib, iqs + j/qr, v0, v1);
  528. // for qr = 2 the iqs needs to increase by 1 per j iter because 2 weights per data val
  529. // matrix multiplication
  530. tmp += v0 * y[iybs + iqs + j/qr + 0];
  531. tmp += v1 * y[iybs + iqs + j/qr + y_offset];
  532. // for qr = 2 the y index needs to increase by 1 per j iter because of y_offset = qk/2
  533. }
  534. }
  535. // sum up partial sums and write back result
  536. __syncthreads();
  537. #pragma unroll
  538. for (int mask = 16; mask > 0; mask >>= 1) {
  539. tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
  540. }
  541. if (tid == 0) {
  542. dst[row] = tmp;
  543. }
  544. }
  545. template <int n_thread, dot_kernel_k_t dot_kernel>
  546. static __global__ void dequantize_mul_mat_vec_k(const void * vx, const float * y, float * dst, const int ncols) {
  547. const int row = blockIdx.x*blockDim.y + threadIdx.y;
  548. const int tid = threadIdx.x;
  549. const int iter_stride = QK_K;
  550. const int vals_per_iter = iter_stride / n_thread;
  551. const int num_blocks_per_row = ncols / QK_K;
  552. const int ib0 = row*num_blocks_per_row;
  553. float tmp = 0; // partial sum for thread in warp
  554. for (int i = 0; i < ncols; i += iter_stride) {
  555. const int col = i + vals_per_iter*tid;
  556. const int ib = ib0 + col/QK_K; // x block index
  557. const int iqs = col%QK_K; // x quant index
  558. const int iybs = col - col%QK_K; // y block start index
  559. float v;
  560. dot_kernel(vx, ib, iqs, y + iybs, v);
  561. tmp += v;
  562. }
  563. // sum up partial sums and write back result
  564. __syncthreads();
  565. #pragma unroll
  566. for (int mask = 16; mask > 0; mask >>= 1) {
  567. tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
  568. }
  569. if (tid == 0) {
  570. dst[row] = tmp;
  571. }
  572. }
  573. static __global__ void rope_f32(const float * x, float * dst, const int ncols, const float p, const float theta_scale) {
  574. const int col = 2*(blockDim.x*blockIdx.x + threadIdx.x);
  575. if (col >= ncols) {
  576. return;
  577. }
  578. const int row = blockDim.y*blockIdx.y + threadIdx.y;
  579. const int i = row*ncols + col;
  580. const float theta = p*powf(theta_scale, col/2);
  581. const float sin_theta = sinf(theta);
  582. const float cos_theta = cosf(theta);
  583. const float x0 = x[i + 0];
  584. const float x1 = x[i + 1];
  585. dst[i + 0] = x0*cos_theta - x1*sin_theta;
  586. dst[i + 1] = x0*sin_theta + x1*cos_theta;
  587. }
  588. static void add_f32_cuda(const float * x, const float * y, float * dst, const int k, cudaStream_t stream) {
  589. const int num_blocks = (k + CUDA_ADD_BLOCK_SIZE - 1) / CUDA_ADD_BLOCK_SIZE;
  590. add_f32<<<num_blocks, CUDA_ADD_BLOCK_SIZE, 0, stream>>>(x, y, dst, k);
  591. }
  592. static void mul_f32_cuda(const float * x, const float * y, float * dst, const int kx, const int ky, cudaStream_t stream) {
  593. const int num_blocks = (kx + CUDA_MUL_BLOCK_SIZE - 1) / CUDA_MUL_BLOCK_SIZE;
  594. mul_f32<<<num_blocks, CUDA_MUL_BLOCK_SIZE, 0, stream>>>(x, y, dst, kx, ky);
  595. }
  596. static void silu_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) {
  597. const int num_blocks = (k + CUDA_SILU_BLOCK_SIZE - 1) / CUDA_SILU_BLOCK_SIZE;
  598. silu_f32<<<num_blocks, CUDA_SILU_BLOCK_SIZE, 0, stream>>>(x, dst, k);
  599. }
  600. static void rms_norm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  601. GGML_ASSERT(ncols % WARP_SIZE == 0);
  602. const dim3 block_dims(WARP_SIZE, 1, 1);
  603. rms_norm_f32<<<nrows, block_dims, 0, stream>>>(x, dst, ncols);
  604. }
  605. static void dequantize_row_q4_0_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  606. const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
  607. dequantize_block<QK4_0, QR4_0, dequantize_q4_0><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
  608. }
  609. static void dequantize_row_q4_1_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  610. const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
  611. dequantize_block<QK4_1, QR4_1, dequantize_q4_1><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
  612. }
  613. static void dequantize_row_q5_0_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  614. const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
  615. dequantize_block<QK5_0, QR5_0, dequantize_q5_0><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
  616. }
  617. static void dequantize_row_q5_1_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  618. const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
  619. dequantize_block<QK5_1, QR5_1, dequantize_q5_1><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
  620. }
  621. static void dequantize_row_q8_0_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  622. const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
  623. dequantize_block<QK8_0, QR8_0, dequantize_q8_0><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
  624. }
  625. static void dequantize_row_q2_K_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  626. const int nb = k / QK_K;
  627. dequantize_block_q2_K<<<nb, 64, 0, stream>>>(vx, y);
  628. }
  629. static void dequantize_row_q3_K_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  630. const int nb = k / QK_K;
  631. dequantize_block_q3_K<<<nb, 64, 0, stream>>>(vx, y);
  632. }
  633. static void dequantize_row_q4_K_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  634. const int nb = k / QK_K;
  635. dequantize_block_q4_K<<<nb, 32, 0, stream>>>(vx, y);
  636. }
  637. static void dequantize_row_q5_K_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  638. const int nb = k / QK_K;
  639. dequantize_block_q5_K<<<nb, 64, 0, stream>>>(vx, y);
  640. }
  641. static void dequantize_row_q6_K_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  642. const int nb = k / QK_K;
  643. dequantize_block_q6_K<<<nb, 64, 0, stream>>>(vx, y);
  644. }
  645. static void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  646. GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
  647. GGML_ASSERT(nrows % GGML_CUDA_DMMV_Y == 0);
  648. const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
  649. dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>
  650. <<<nrows/GGML_CUDA_DMMV_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
  651. }
  652. static void dequantize_mul_mat_vec_q4_1_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  653. GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
  654. GGML_ASSERT(nrows % GGML_CUDA_DMMV_Y == 0);
  655. const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
  656. dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>
  657. <<<nrows/GGML_CUDA_DMMV_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
  658. }
  659. static void dequantize_mul_mat_vec_q5_0_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  660. GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
  661. GGML_ASSERT(nrows % GGML_CUDA_DMMV_Y == 0);
  662. const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
  663. dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>
  664. <<<nrows/GGML_CUDA_DMMV_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
  665. }
  666. static void dequantize_mul_mat_vec_q5_1_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  667. GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
  668. GGML_ASSERT(nrows % GGML_CUDA_DMMV_Y == 0);
  669. const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
  670. dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>
  671. <<<nrows/GGML_CUDA_DMMV_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
  672. }
  673. static void dequantize_mul_mat_vec_q8_0_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  674. GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
  675. GGML_ASSERT(nrows % GGML_CUDA_DMMV_Y == 0);
  676. const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
  677. dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>
  678. <<<nrows/GGML_CUDA_DMMV_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
  679. }
  680. static void dequantize_mul_mat_vec_q2_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  681. GGML_ASSERT(ncols % QK_K == 0);
  682. const int ny = 2;
  683. const dim3 block_dims(32, ny, 1);
  684. dequantize_mul_mat_vec_k<32, vec_dot_q2_K><<<(nrows + ny - 1)/ny, block_dims, 0, stream>>>(vx, y, dst, ncols);
  685. }
  686. static void dequantize_mul_mat_vec_q3_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  687. GGML_ASSERT(ncols % QK_K == 0);
  688. const dim3 block_dims(32, 2, 1);
  689. dequantize_mul_mat_vec_k<32, vec_dot_q3_K><<<nrows/2, block_dims, 0, stream>>>(vx, y, dst, ncols);
  690. }
  691. static void dequantize_mul_mat_vec_q4_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  692. GGML_ASSERT(ncols % QK_K == 0);
  693. const dim3 block_dims(32, 2, 1);
  694. dequantize_mul_mat_vec_k<32, vec_dot_q4_K><<<nrows/2, block_dims, 0, stream>>>(vx, y, dst, ncols);
  695. }
  696. static void dequantize_mul_mat_vec_q5_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  697. GGML_ASSERT(ncols % QK_K == 0);
  698. const dim3 block_dims(32, 2, 1);
  699. dequantize_mul_mat_vec_k<32, vec_dot_q5_K><<<nrows/2, block_dims, 0, stream>>>(vx, y, dst, ncols);
  700. }
  701. static void dequantize_mul_mat_vec_q6_K_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  702. GGML_ASSERT(ncols % QK_K == 0);
  703. const dim3 block_dims(32, 2, 1);
  704. dequantize_mul_mat_vec_k<32, vec_dot_q6_K><<<nrows/2, block_dims, 0, stream>>>(vx, y, dst, ncols);
  705. }
  706. static void convert_fp16_to_fp32_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  707. const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
  708. dequantize_block<1, 1, convert_f16><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
  709. }
  710. static void convert_mul_mat_vec_f16_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  711. GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
  712. GGML_ASSERT(nrows % GGML_CUDA_DMMV_Y == 0);
  713. const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
  714. dequantize_mul_mat_vec<1, 1, convert_f16>
  715. <<<nrows/GGML_CUDA_DMMV_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
  716. }
  717. static to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
  718. switch (type) {
  719. case GGML_TYPE_Q4_0:
  720. return dequantize_row_q4_0_cuda;
  721. case GGML_TYPE_Q4_1:
  722. return dequantize_row_q4_1_cuda;
  723. case GGML_TYPE_Q5_0:
  724. return dequantize_row_q5_0_cuda;
  725. case GGML_TYPE_Q5_1:
  726. return dequantize_row_q5_1_cuda;
  727. case GGML_TYPE_Q8_0:
  728. return dequantize_row_q8_0_cuda;
  729. case GGML_TYPE_Q2_K:
  730. return dequantize_row_q2_K_cuda;
  731. case GGML_TYPE_Q3_K:
  732. return dequantize_row_q3_K_cuda;
  733. case GGML_TYPE_Q4_K:
  734. return dequantize_row_q4_K_cuda;
  735. case GGML_TYPE_Q5_K:
  736. return dequantize_row_q5_K_cuda;
  737. case GGML_TYPE_Q6_K:
  738. return dequantize_row_q6_K_cuda;
  739. case GGML_TYPE_F16:
  740. return convert_fp16_to_fp32_cuda;
  741. default:
  742. return nullptr;
  743. }
  744. }
  745. static void rope_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float p, const float theta_scale, cudaStream_t stream) {
  746. GGML_ASSERT(nrows % 2 == 0);
  747. const dim3 block_dims(2*CUDA_ROPE_BLOCK_SIZE, 1, 1);
  748. const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
  749. const dim3 block_nums(num_blocks_x, nrows, 1);
  750. rope_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, p, theta_scale);
  751. }
  752. // buffer pool for cuda
  753. #define MAX_CUDA_BUFFERS 256
  754. struct scoped_spin_lock {
  755. std::atomic_flag& lock;
  756. scoped_spin_lock(std::atomic_flag& lock) : lock(lock) {
  757. while (lock.test_and_set(std::memory_order_acquire)) {
  758. ; // spin
  759. }
  760. }
  761. ~scoped_spin_lock() {
  762. lock.clear(std::memory_order_release);
  763. }
  764. scoped_spin_lock(const scoped_spin_lock&) = delete;
  765. scoped_spin_lock& operator=(const scoped_spin_lock&) = delete;
  766. };
  767. struct cuda_buffer {
  768. void * ptr = nullptr;
  769. size_t size = 0;
  770. };
  771. static cuda_buffer g_cuda_buffer_pool[GGML_CUDA_MAX_DEVICES][MAX_CUDA_BUFFERS];
  772. static std::atomic_flag g_cuda_pool_lock = ATOMIC_FLAG_INIT;
  773. static void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) {
  774. scoped_spin_lock lock(g_cuda_pool_lock);
  775. int id;
  776. CUDA_CHECK(cudaGetDevice(&id));
  777. for (int i = 0; i < MAX_CUDA_BUFFERS; ++i) {
  778. cuda_buffer& b = g_cuda_buffer_pool[id][i];
  779. if (b.size >= size && b.ptr != nullptr) {
  780. void * ptr = b.ptr;
  781. *actual_size = b.size;
  782. b.ptr = nullptr;
  783. b.size = 0;
  784. return ptr;
  785. }
  786. }
  787. void * ptr;
  788. CUDA_CHECK(cudaMalloc((void **) &ptr, size));
  789. *actual_size = size;
  790. return ptr;
  791. }
  792. static void ggml_cuda_pool_free(void * ptr, size_t size) {
  793. scoped_spin_lock lock(g_cuda_pool_lock);
  794. int id;
  795. CUDA_CHECK(cudaGetDevice(&id));
  796. for (int i = 0; i < MAX_CUDA_BUFFERS; ++i) {
  797. cuda_buffer& b = g_cuda_buffer_pool[id][i];
  798. if (b.ptr == nullptr) {
  799. b.ptr = ptr;
  800. b.size = size;
  801. return;
  802. }
  803. }
  804. fprintf(stderr, "WARNING: cuda buffer pool full, increase MAX_CUDA_BUFFERS\n");
  805. CUDA_CHECK(cudaFree(ptr));
  806. }
  807. static void * g_scratch_buffer = nullptr;
  808. static size_t g_scratch_size = 1024*1024*1024; // 1 GB by default
  809. static size_t g_scratch_offset = 0;
  810. #define GGML_CUDA_MAX_STREAMS 8 // Set this to 1 for reproducible matrix multiplication.
  811. #define GGML_CUDA_MAX_EVENTS 64
  812. static int g_device_count = -1;
  813. static int g_main_device = 0;
  814. static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0};
  815. static cublasHandle_t g_cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr};
  816. static cudaStream_t g_cudaStreams_main[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS] = { nullptr };
  817. static cudaStream_t g_cudaStreams_memcpy_src1[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS] = { nullptr };
  818. static cudaEvent_t g_cudaEvents_memcpy_src1[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_EVENTS] = { nullptr };
  819. void ggml_init_cublas() {
  820. static bool initialized = false;
  821. if (!initialized) {
  822. CUDA_CHECK(cudaGetDeviceCount(&g_device_count));
  823. GGML_ASSERT(g_device_count <= GGML_CUDA_MAX_DEVICES);
  824. int64_t total_vram = 0;
  825. fprintf(stderr, "%s: found %d CUDA devices:\n", __func__, g_device_count);
  826. for (int id = 0; id < g_device_count; ++id) {
  827. cudaDeviceProp prop;
  828. CUDA_CHECK(cudaGetDeviceProperties(&prop, id));
  829. fprintf(stderr, " Device %d: %s\n", id, prop.name);
  830. g_tensor_split[id] = total_vram;
  831. total_vram += prop.totalGlobalMem;
  832. }
  833. for (int id = 0; id < g_device_count; ++id) {
  834. g_tensor_split[id] /= total_vram;
  835. }
  836. for (int id = 0; id < g_device_count; ++id) {
  837. CUDA_CHECK(cudaSetDevice(id));
  838. // create streams
  839. for (int i = 0; i < GGML_CUDA_MAX_STREAMS; ++i) {
  840. CUDA_CHECK(cudaStreamCreateWithFlags(&g_cudaStreams_main[id][i], cudaStreamNonBlocking));
  841. CUDA_CHECK(cudaStreamCreateWithFlags(&g_cudaStreams_memcpy_src1[id][i], cudaStreamNonBlocking));
  842. }
  843. // create events
  844. for (int i = 0; i < GGML_CUDA_MAX_EVENTS; ++i) {
  845. CUDA_CHECK(cudaEventCreateWithFlags(&g_cudaEvents_memcpy_src1[id][i], cudaEventDisableTiming));
  846. }
  847. // create cublas handle
  848. CUBLAS_CHECK(cublasCreate(&g_cublas_handles[id]));
  849. CUBLAS_CHECK(cublasSetMathMode(g_cublas_handles[id], CUBLAS_TF32_TENSOR_OP_MATH));
  850. }
  851. // configure logging to stdout
  852. // CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, nullptr));
  853. initialized = true;
  854. }
  855. }
  856. void ggml_cuda_set_tensor_split(const float * tensor_split) {
  857. bool all_zero = true;
  858. for (int i = 0; i < g_device_count; ++i) {
  859. if (tensor_split[i] != 0.0f) {
  860. all_zero = false;
  861. break;
  862. }
  863. }
  864. if (all_zero) {
  865. return;
  866. }
  867. float split_sum = 0.0f;
  868. for (int i = 0; i < g_device_count; ++i) {
  869. g_tensor_split[i] = split_sum;
  870. split_sum += tensor_split[i];
  871. }
  872. for (int i = 0; i < g_device_count; ++i) {
  873. g_tensor_split[i] /= split_sum;
  874. }
  875. }
  876. void * ggml_cuda_host_malloc(size_t size) {
  877. if (getenv("GGML_CUDA_NO_PINNED") != nullptr) {
  878. return nullptr;
  879. }
  880. void * ptr = nullptr;
  881. cudaError_t err = cudaMallocHost((void **) &ptr, size);
  882. if (err != cudaSuccess) {
  883. // The allocation error can be bypassed. A null ptr will assigned out of this function.
  884. // This can fixed the OOM error in WSL.
  885. cudaGetLastError();
  886. fprintf(stderr, "WARNING: failed to allocate %.2f MB of pinned memory: %s\n",
  887. size/1024.0/1024.0, cudaGetErrorString(err));
  888. return nullptr;
  889. }
  890. return ptr;
  891. }
  892. void ggml_cuda_host_free(void * ptr) {
  893. CUDA_CHECK(cudaFreeHost(ptr));
  894. }
  895. static cudaError_t ggml_cuda_h2d_tensor_2d(
  896. void * dst, const struct ggml_tensor * src, int64_t i3, int64_t i2, int64_t i1_low, int64_t i1_high, cudaStream_t stream) {
  897. char * dst_char = (char *) dst;
  898. const int64_t ne0 = src->ne[0];
  899. const int64_t nb0 = src->nb[0];
  900. const int64_t nb1 = src->nb[1];
  901. const int64_t nb2 = src->nb[2];
  902. const int64_t nb3 = src->nb[3];
  903. const enum ggml_type type = src->type;
  904. const int64_t ts = ggml_type_size(type);
  905. const int64_t bs = ggml_blck_size(type);
  906. int64_t i1_diff = i1_high - i1_low;
  907. const void * x = (const void *) ((const char *) src->data + i1_low*nb1 + i2*nb2 + i3*nb3);
  908. if (nb0 == ts && nb1 == ts*ne0/bs) {
  909. return cudaMemcpyAsync(dst_char, x, i1_diff*nb1, cudaMemcpyHostToDevice, stream);
  910. } else if (nb0 == ts) {
  911. return cudaMemcpy2DAsync(dst_char, ts*ne0/bs, x, nb1, ts*ne0/bs, i1_diff, cudaMemcpyHostToDevice, stream);
  912. } else {
  913. for (int64_t i1 = 0; i1 < i1_diff; i1++) {
  914. const void * rx = (const void *) ((const char *) x + i1*nb1);
  915. void * rd = (void *) (dst_char + i1*ts*ne0/bs);
  916. // pretend the row is a matrix with cols=1
  917. cudaError_t r = cudaMemcpy2DAsync(rd, ts/bs, rx, nb0, ts/bs, ne0, cudaMemcpyHostToDevice, stream);
  918. if (r != cudaSuccess) return r;
  919. }
  920. return cudaSuccess;
  921. }
  922. }
  923. inline void ggml_cuda_op_add(
  924. const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
  925. float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
  926. cudaStream_t & cudaStream_main){
  927. GGML_ASSERT(src0_ddf_i != nullptr);
  928. GGML_ASSERT(src1_ddf_i != nullptr);
  929. GGML_ASSERT(dst_ddf_i != nullptr);
  930. const int64_t ne0 = src0->ne[0];
  931. const int64_t i01_diff = i01_high - i01_low;
  932. // compute
  933. add_f32_cuda(src0_ddf_i, src1_ddf_i, dst_ddf_i, ne0*i01_diff, cudaStream_main);
  934. CUDA_CHECK(cudaGetLastError());
  935. (void) src1;
  936. (void) dst;
  937. (void) src0_ddq_i;
  938. (void) i02;
  939. (void) i1;
  940. }
  941. inline void ggml_cuda_op_mul(
  942. const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
  943. float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
  944. cudaStream_t & cudaStream_main){
  945. GGML_ASSERT(src0_ddf_i != nullptr);
  946. GGML_ASSERT(src1_ddf_i != nullptr);
  947. GGML_ASSERT(dst_ddf_i != nullptr);
  948. const int64_t ne00 = src0->ne[0];
  949. const int64_t ne10 = src1->ne[0];
  950. const int64_t ne11 = src1->ne[1];
  951. for (int64_t i01 = i01_low; i01 < i01_high; i01++) {
  952. const int64_t i11 = i1*ne11 + i01%ne11; // broadcast src1 across src0
  953. float * src0_ddf_i01 = src0_ddf_i + i01*ne00;
  954. float * src1_ddf_i01 = src1_ddf_i + i11*ne10;
  955. float * dst_ddf_i01 = dst_ddf_i + i01*ne00;
  956. // compute
  957. mul_f32_cuda(src0_ddf_i01, src1_ddf_i01, dst_ddf_i01, ne00, ne10, cudaStream_main);
  958. CUDA_CHECK(cudaGetLastError());
  959. }
  960. (void) dst;
  961. (void) src0_ddq_i;
  962. (void) i02;
  963. }
  964. inline void ggml_cuda_op_silu(
  965. const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
  966. float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
  967. cudaStream_t & cudaStream_main){
  968. GGML_ASSERT(src0_ddf_i != nullptr);
  969. GGML_ASSERT(dst_ddf_i != nullptr);
  970. const int64_t ne00 = src0->ne[0];
  971. const int64_t i01_diff = i01_high - i01_low;
  972. // compute
  973. silu_f32_cuda(src0_ddf_i, dst_ddf_i, ne00*i01_diff, cudaStream_main);
  974. CUDA_CHECK(cudaGetLastError());
  975. (void) src1;
  976. (void) dst;
  977. (void) src0_ddq_i;
  978. (void) src1_ddf_i;
  979. (void) i02;
  980. (void) i1;
  981. }
  982. inline void ggml_cuda_op_rms_norm(
  983. const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
  984. float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
  985. cudaStream_t & cudaStream_main){
  986. GGML_ASSERT(src0_ddf_i != nullptr);
  987. GGML_ASSERT(dst_ddf_i != nullptr);
  988. const int64_t ne00 = src0->ne[0];
  989. const int64_t i01_diff = i01_high - i01_low;
  990. // compute
  991. rms_norm_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, cudaStream_main);
  992. CUDA_CHECK(cudaGetLastError());
  993. (void) src1;
  994. (void) dst;
  995. (void) src0_ddq_i;
  996. (void) src1_ddf_i;
  997. (void) i02;
  998. (void) i1;
  999. }
  1000. inline void ggml_cuda_op_dequantize_mul_mat_vec(
  1001. const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
  1002. float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
  1003. cudaStream_t & cudaStream_main){
  1004. GGML_ASSERT(src0_ddq_i != nullptr);
  1005. GGML_ASSERT(src1_ddf_i != nullptr);
  1006. GGML_ASSERT(dst_ddf_i != nullptr);
  1007. const int64_t ne00 = src0->ne[0];
  1008. const int64_t nrows = i01_high - i01_low;
  1009. switch (src0->type) {
  1010. case GGML_TYPE_Q4_0:
  1011. dequantize_mul_mat_vec_q4_0_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1012. break;
  1013. case GGML_TYPE_Q4_1:
  1014. dequantize_mul_mat_vec_q4_1_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1015. break;
  1016. case GGML_TYPE_Q5_0:
  1017. dequantize_mul_mat_vec_q5_0_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1018. break;
  1019. case GGML_TYPE_Q5_1:
  1020. dequantize_mul_mat_vec_q5_1_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1021. break;
  1022. case GGML_TYPE_Q8_0:
  1023. dequantize_mul_mat_vec_q8_0_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1024. break;
  1025. case GGML_TYPE_Q2_K:
  1026. dequantize_mul_mat_vec_q2_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1027. break;
  1028. case GGML_TYPE_Q3_K:
  1029. dequantize_mul_mat_vec_q3_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1030. break;
  1031. case GGML_TYPE_Q4_K:
  1032. dequantize_mul_mat_vec_q4_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1033. break;
  1034. case GGML_TYPE_Q5_K:
  1035. dequantize_mul_mat_vec_q5_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1036. break;
  1037. case GGML_TYPE_Q6_K:
  1038. dequantize_mul_mat_vec_q6_K_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1039. break;
  1040. case GGML_TYPE_F16:
  1041. convert_mul_mat_vec_f16_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1042. break;
  1043. default:
  1044. GGML_ASSERT(false);
  1045. break;
  1046. }
  1047. CUDA_CHECK(cudaGetLastError());
  1048. (void) src1;
  1049. (void) dst;
  1050. (void) src0_ddf_i;
  1051. (void) i02;
  1052. (void) i1;
  1053. }
  1054. inline void ggml_cuda_op_mul_mat_cublas(
  1055. const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
  1056. float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
  1057. cudaStream_t & cudaStream_main){
  1058. GGML_ASSERT(src0_ddf_i != nullptr);
  1059. GGML_ASSERT(src1_ddf_i != nullptr);
  1060. GGML_ASSERT(dst_ddf_i != nullptr);
  1061. const float alpha = 1.0f;
  1062. const float beta = 0.0f;
  1063. const int64_t ne00 = src0->ne[0];
  1064. const int64_t ne10 = src1->ne[0];
  1065. const int64_t ne11 = src1->ne[1];
  1066. const int64_t ne0 = dst->ne[0];
  1067. const int64_t i01_diff = i01_high - i01_low;
  1068. int id;
  1069. CUDA_CHECK(cudaGetDevice(&id));
  1070. // the main device has a larger memory buffer to hold the results from all GPUs
  1071. // ldc == nrows of the matrix that cuBLAS writes into
  1072. int ldc = dst->backend == GGML_BACKEND_GPU && id == g_main_device ? ne0 : i01_diff;
  1073. CUBLAS_CHECK(cublasSetStream(g_cublas_handles[id], cudaStream_main));
  1074. CUBLAS_CHECK(
  1075. cublasSgemm(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N,
  1076. i01_diff, ne11, ne10,
  1077. &alpha, src0_ddf_i, ne00,
  1078. src1_ddf_i, ne10,
  1079. &beta, dst_ddf_i, ldc));
  1080. (void) dst;
  1081. (void) src0_ddq_i;
  1082. (void) i02;
  1083. (void) i1;
  1084. }
  1085. inline void ggml_cuda_op_rope(
  1086. const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
  1087. float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
  1088. cudaStream_t & cudaStream_main){
  1089. GGML_ASSERT(src0_ddf_i != nullptr);
  1090. GGML_ASSERT(dst_ddf_i != nullptr);
  1091. const int64_t ne00 = src0->ne[0];
  1092. const int64_t i01_diff = i01_high - i01_low;
  1093. const int n_past = ((int32_t *) src1->data)[0];
  1094. const int n_dims = ((int32_t *) src1->data)[1];
  1095. const int mode = ((int32_t *) src1->data)[2];
  1096. GGML_ASSERT(mode == 0);
  1097. const float theta_scale = powf(10000.0, -2.0f/n_dims);
  1098. const float p = ((mode & 1) == 0 ? n_past + i02 : i02);
  1099. // compute
  1100. rope_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, p, theta_scale, cudaStream_main);
  1101. CUDA_CHECK(cudaGetLastError());
  1102. (void) dst;
  1103. (void) src0_ddq_i;
  1104. (void) src1_ddf_i;
  1105. (void) i1;
  1106. }
  1107. static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
  1108. ggml_cuda_op_t op, bool src0_needs_f32) {
  1109. const int64_t ne00 = src0->ne[0];
  1110. const int64_t ne01 = src0->ne[1];
  1111. const int64_t ne02 = src0->ne[2];
  1112. const int64_t ne03 = src0->ne[3];
  1113. const int64_t nrows0 = ggml_nrows(src0);
  1114. const bool use_src1 = src1 != nullptr;
  1115. const int64_t ne10 = use_src1 ? src1->ne[0] : 1;
  1116. const int64_t ne11 = use_src1 ? src1->ne[1] : 1;
  1117. const int64_t ne12 = use_src1 ? src1->ne[2] : 1;
  1118. const int64_t ne13 = use_src1 ? src1->ne[3] : 1;
  1119. const int64_t ne0 = dst->ne[0];
  1120. const int64_t ne1 = dst->ne[1];
  1121. const int nb2 = dst->nb[2];
  1122. const int nb3 = dst->nb[3];
  1123. GGML_ASSERT(dst->backend != GGML_BACKEND_GPU_SPLIT);
  1124. GGML_ASSERT(!use_src1 || src1->backend != GGML_BACKEND_GPU_SPLIT);
  1125. // strides for iteration over dims 3 and 2
  1126. const int64_t src0_stride = ne00 * ne01;
  1127. const int64_t src1_stride = ne10 * ne11;
  1128. const int64_t dst_stride = ne0 * ne1;
  1129. const int64_t num_iters = ne02 * ne03;
  1130. const size_t src0_ts = ggml_type_size(src0->type);
  1131. const size_t src0_bs = ggml_blck_size(src0->type);
  1132. struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
  1133. struct ggml_tensor_extra_gpu * src1_extra = use_src1 ? (ggml_tensor_extra_gpu *) src1->extra : nullptr;
  1134. struct ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
  1135. const bool src0_on_device = src0->backend == GGML_BACKEND_GPU || src0->backend == GGML_BACKEND_GPU_SPLIT;
  1136. const bool src0_is_f32 = src0->type == GGML_TYPE_F32;
  1137. const bool split = src0->backend == GGML_BACKEND_GPU_SPLIT;
  1138. const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(src0->type);
  1139. // dd = data device
  1140. char * src0_ddq[GGML_CUDA_MAX_DEVICES] = {nullptr}; // quantized
  1141. float * src0_ddf[GGML_CUDA_MAX_DEVICES] = {nullptr}; // float
  1142. float * src1_ddf[GGML_CUDA_MAX_DEVICES] = {nullptr};
  1143. float * dst_ddf[GGML_CUDA_MAX_DEVICES] = {nullptr};
  1144. // asq = actual size quantized, asf = actual size float
  1145. size_t src0_asq[GGML_CUDA_MAX_DEVICES] = {0};
  1146. size_t src0_asf[GGML_CUDA_MAX_DEVICES] = {0};
  1147. size_t src1_asf[GGML_CUDA_MAX_DEVICES] = {0};
  1148. size_t dst_asf[GGML_CUDA_MAX_DEVICES] = {0};
  1149. for (int id = 0; id < g_device_count; ++id) {
  1150. if (!split && id != g_main_device) {
  1151. continue;
  1152. }
  1153. const bool src1_on_device = use_src1 && src1->backend == GGML_BACKEND_GPU && id == g_main_device;
  1154. const bool dst_on_device = dst->backend == GGML_BACKEND_GPU && id == g_main_device;
  1155. int64_t row_low, row_high;
  1156. if (split) {
  1157. row_low = id == 0 ? 0 : nrows0*g_tensor_split[id];
  1158. row_low -= row_low % GGML_CUDA_DMMV_Y;
  1159. row_high = id == g_device_count - 1 ? nrows0 : nrows0*g_tensor_split[id + 1];
  1160. row_high -= row_high % GGML_CUDA_DMMV_Y;
  1161. } else {
  1162. row_low = 0;
  1163. row_high = nrows0;
  1164. }
  1165. if (row_low == row_high) {
  1166. continue;
  1167. }
  1168. int64_t row_diff = row_high - row_low;
  1169. cudaSetDevice(id);
  1170. if (src0_on_device) {
  1171. if (src0_is_f32) {
  1172. src0_ddf[id] = (float *) src0_extra->data_device[id];
  1173. } else {
  1174. src0_ddq[id] = (char *) src0_extra->data_device[id];
  1175. }
  1176. } else {
  1177. if (src0_is_f32) {
  1178. src0_ddf[id] = (float *) ggml_cuda_pool_malloc(row_diff*ne00 * sizeof(float), &src0_asf[id]);
  1179. } else {
  1180. src0_ddq[id] = (char *) ggml_cuda_pool_malloc(row_diff*ne00 * src0_ts/src0_bs, &src0_asq[id]);
  1181. }
  1182. }
  1183. if (src0_needs_f32 && !src0_is_f32) {
  1184. src0_ddf[id] = (float *) ggml_cuda_pool_malloc(row_diff*ne00 * sizeof(float), &src0_asf[id]);
  1185. }
  1186. if (use_src1) {
  1187. if (src1_on_device) {
  1188. src1_ddf[id] = (float *) src1_extra->data_device[id];
  1189. } else {
  1190. src1_ddf[id] = (float *) ggml_cuda_pool_malloc(num_iters*src1_stride * sizeof(float), &src1_asf[id]);
  1191. }
  1192. }
  1193. if (dst_on_device) {
  1194. dst_ddf[id] = (float *) dst_extra->data_device[id];
  1195. } else {
  1196. size_t size_dst_ddf = split ? row_diff*ne1 * sizeof(float) : num_iters*dst_stride * sizeof(float);
  1197. dst_ddf[id] = (float *) ggml_cuda_pool_malloc(size_dst_ddf, &dst_asf[id]);
  1198. }
  1199. for (int64_t i03 = 0; i03 < ne03; i03++) {
  1200. const int64_t i13 = i03 % ne13;
  1201. for (int64_t i02 = 0; i02 < ne02; i02++) {
  1202. const int64_t i12 = i02 % ne12;
  1203. const int64_t i0 = i03*ne02 + i02;
  1204. const int64_t i0_offset_low = row_low/ne01;
  1205. const int64_t i0_offset_high = row_high/ne01;
  1206. int64_t i01_low = 0;
  1207. int64_t i01_high = ne01;
  1208. if (split) {
  1209. if (i0 < i0_offset_low || i0 > i0_offset_high) {
  1210. continue;
  1211. }
  1212. if (i0 == i0_offset_low) {
  1213. i01_low = row_low % ne01;
  1214. }
  1215. if (i0 == i0_offset_high) {
  1216. i01_high = row_high % ne01;
  1217. }
  1218. }
  1219. // There is possibly a bug in the Windows nvcc compiler regarding instruction reordering or optimizing out local variables.
  1220. // Removing the first assert or changing the order of the arguments causes the second assert to fail.
  1221. // Removing both asserts results in i01_high becoming 0 which in turn results in garbage output.
  1222. // The root cause seems to be a problem with i0_offset_high becoming 0 when it should always be >0 (for single GPU).
  1223. GGML_ASSERT(i01_low == 0 || g_device_count > 1);
  1224. GGML_ASSERT(i01_high == ne01 || g_device_count > 1);
  1225. const int64_t i01_diff = i01_high - i01_low;
  1226. if (i01_diff == 0) {
  1227. continue;
  1228. }
  1229. const int64_t i11 = i13*ne12 + i12;
  1230. cudaStream_t cudaStream_main = g_cudaStreams_main[id][i0 % GGML_CUDA_MAX_STREAMS];
  1231. cudaStream_t cudaStream_memcpy_src1 = g_cudaStreams_memcpy_src1[id][i0 % GGML_CUDA_MAX_STREAMS];
  1232. cudaEvent_t cudaEvent_memcpy_src1 = g_cudaEvents_memcpy_src1[id][i0 % GGML_CUDA_MAX_EVENTS];
  1233. // for split tensors the data begins at i0 == i0_offset_low
  1234. char * src0_ddq_i = src0_ddq[id] + (i0 - i0_offset_low)*src0_stride*src0_ts/src0_bs;
  1235. float * src0_ddf_i = src0_ddf[id] + (i0 - i0_offset_low)*src0_stride;
  1236. float * src1_ddf_i = src1_ddf[id] + i11*src1_stride;
  1237. float * dst_ddf_i = dst_ddf[id] + (i0 - i0_offset_low)*dst_stride;
  1238. // for split tensors the data pointer needs to be rounded down
  1239. // to the bin edge for i03, i02 bins beyond the first
  1240. if (i0 - i0_offset_low > 0) {
  1241. src0_ddq_i -= (row_low % ne01)*ne00 * src0_ts/src0_bs;
  1242. src0_ddf_i -= (row_low % ne01)*ne00;
  1243. }
  1244. if (i0 - i0_offset_low > 0) {
  1245. dst_ddf_i -= (row_low % ne0)*ne1;
  1246. }
  1247. // the main device memory buffer can be on VRAM scratch, with space for all partial results
  1248. // in that case an offset on dst_ddf_i is needed
  1249. if (dst->backend == GGML_BACKEND_GPU && id == g_main_device) {
  1250. dst_ddf_i += i01_low; // offset is 0 if no tensor split
  1251. }
  1252. // copy src0, src1 to device if necessary
  1253. if (use_src1) {
  1254. if (src1->backend == GGML_BACKEND_CPU) {
  1255. CUDA_CHECK(ggml_cuda_h2d_tensor_2d(src1_ddf_i, src1, i03, i02, 0, ne11, cudaStream_memcpy_src1));
  1256. } else if (src1->backend == GGML_BACKEND_GPU) {
  1257. if (id != g_main_device) {
  1258. float * src1_ddf_i_source = (float *) src1_extra->data_device[g_main_device];
  1259. src1_ddf_i_source += i11*src1_stride;
  1260. CUDA_CHECK(cudaMemcpyAsync(src1_ddf_i, src1_ddf_i_source, src1_stride*sizeof(float),
  1261. cudaMemcpyDeviceToDevice, cudaStream_memcpy_src1));
  1262. }
  1263. } else {
  1264. GGML_ASSERT(false);
  1265. }
  1266. }
  1267. CUDA_CHECK(cudaEventRecord(cudaEvent_memcpy_src1, cudaStream_memcpy_src1));
  1268. if (!src0_on_device) {
  1269. if (src0_is_f32) {
  1270. CUDA_CHECK(ggml_cuda_h2d_tensor_2d(src0_ddf_i, src0, i03, i02, i01_low, i01_high, cudaStream_main));
  1271. } else {
  1272. CUDA_CHECK(ggml_cuda_h2d_tensor_2d(src0_ddq_i, src0, i03, i02, i01_low, i01_high, cudaStream_main));
  1273. }
  1274. }
  1275. // convert src0 to f32 if it's necessary for the ggml_cuda_op
  1276. if (src0_needs_f32 && !src0_is_f32) {
  1277. to_fp32_cuda(src0_ddq_i, src0_ddf_i, i01_diff*ne00, cudaStream_main);
  1278. CUDA_CHECK(cudaGetLastError());
  1279. }
  1280. // wait with main stream until src1 memcpy is done
  1281. CUDA_CHECK(cudaStreamWaitEvent(cudaStream_main, cudaEvent_memcpy_src1, 0));
  1282. // do the computation
  1283. op(src0, src1, dst, src0_ddq_i, src0_ddf_i, src1_ddf_i, dst_ddf_i, i02, i01_low, i01_high, i11, cudaStream_main);
  1284. // copy dst to host or other device if necessary
  1285. if (!dst_on_device) {
  1286. void * dst_off_device;
  1287. cudaMemcpyKind kind;
  1288. if (dst->backend == GGML_BACKEND_CPU) {
  1289. dst_off_device = dst->data;
  1290. kind = cudaMemcpyDeviceToHost;
  1291. } else if (dst->backend == GGML_BACKEND_GPU) {
  1292. dst_off_device = dst_extra->data_device[g_main_device];
  1293. kind = cudaMemcpyDeviceToDevice;
  1294. } else {
  1295. GGML_ASSERT(false);
  1296. }
  1297. if (split) {
  1298. // src0 = weight matrix is saved as a transposed matrix for better memory layout.
  1299. // dst is NOT transposed.
  1300. // The outputs of cuBLAS matrix matrix multiplications can therefore NOT simply be concatenated for >1 GPU.
  1301. // Instead they need to be copied to the correct slice in ne0 = dst row index.
  1302. // If dst is a vector with ne0 == 1 then you don't have to do this but it still produces correct results.
  1303. for (int64_t j = 0; j < ne1; ++j) {
  1304. float * dhf_dst_i = (float *) ((char *) dst_off_device + (j*ne0 + i01_low)*sizeof(float) + i02*nb2 + i03*nb3);
  1305. CUDA_CHECK(cudaMemcpyAsync(dhf_dst_i, dst_ddf_i + j*i01_diff, i01_diff*sizeof(float), kind, cudaStream_main));
  1306. }
  1307. } else {
  1308. float * dhf_dst_i = (float *) ((char *) dst_off_device + i02*nb2 + i03*nb3);
  1309. CUDA_CHECK(cudaMemcpyAsync(dhf_dst_i, dst_ddf_i, dst_stride*sizeof(float), kind, cudaStream_main));
  1310. }
  1311. }
  1312. }
  1313. }
  1314. }
  1315. // wait until each device is finished, then free their buffers
  1316. for (int id = 0; id < g_device_count; ++id) {
  1317. CUDA_CHECK(cudaSetDevice(id));
  1318. CUDA_CHECK(cudaDeviceSynchronize());
  1319. if (src0_asq[id] > 0) {
  1320. ggml_cuda_pool_free(src0_ddq[id], src0_asq[id]);
  1321. }
  1322. if (src0_asf[id] > 0) {
  1323. ggml_cuda_pool_free(src0_ddf[id], src0_asf[id]);
  1324. }
  1325. if (src1_asf[id] > 0) {
  1326. ggml_cuda_pool_free(src1_ddf[id], src1_asf[id]);
  1327. }
  1328. if (dst_asf[id] > 0) {
  1329. ggml_cuda_pool_free(dst_ddf[id], dst_asf[id]);
  1330. }
  1331. }
  1332. }
  1333. void ggml_cuda_add(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  1334. GGML_ASSERT(src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
  1335. ggml_cuda_op(src0, src1, dst, ggml_cuda_op_add, true);
  1336. }
  1337. void ggml_cuda_mul(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  1338. GGML_ASSERT(src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
  1339. ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul, true);
  1340. }
  1341. void ggml_cuda_silu(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  1342. GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
  1343. ggml_cuda_op(src0, src1, dst, ggml_cuda_op_silu, true);
  1344. }
  1345. void ggml_cuda_rms_norm(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  1346. GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
  1347. ggml_cuda_op(src0, src1, dst, ggml_cuda_op_rms_norm, true);
  1348. }
  1349. bool ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) {
  1350. GGML_ASSERT(src0->backend != GGML_BACKEND_GPU);
  1351. const int64_t ne10 = src1->ne[0];
  1352. const int64_t ne0 = dst->ne[0];
  1353. const int64_t ne1 = dst->ne[1];
  1354. // if (strcmp(dst->name, "KQ") == 0 || strcmp(dst->name, "KQV") == 0) {
  1355. // fprintf(stderr, "(%ld, %ld, %ld, %ld) + (%ld, %ld, %ld, %ld) -> (%ld, %ld, %ld, %ld)\n",
  1356. // src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3],
  1357. // src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3],
  1358. // dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3]);
  1359. // return false;
  1360. // }
  1361. // TODO: find the optimal values for these
  1362. if ((src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) &&
  1363. src1->type == GGML_TYPE_F32 &&
  1364. dst->type == GGML_TYPE_F32 &&
  1365. (ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) {
  1366. return true;
  1367. }
  1368. return false;
  1369. }
  1370. void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  1371. if (src0->type == GGML_TYPE_F32) {
  1372. ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, true);
  1373. } else if (ggml_is_quantized(src0->type) || src0->type == GGML_TYPE_F16) {
  1374. if (src1->ne[1] == 1) {
  1375. ggml_cuda_op(src0, src1, dst, ggml_cuda_op_dequantize_mul_mat_vec, false);
  1376. } else {
  1377. ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, true);
  1378. }
  1379. } else {
  1380. GGML_ASSERT(false);
  1381. }
  1382. }
  1383. void ggml_cuda_rope(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  1384. GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
  1385. ggml_cuda_op(src0, src1, dst, ggml_cuda_op_rope, true);
  1386. }
  1387. void ggml_cuda_nop(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  1388. (void) src0;
  1389. (void) src1;
  1390. (void) dst;
  1391. }
  1392. void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor) {
  1393. int nrows = ggml_nrows(tensor);
  1394. const size_t nb1 = tensor->nb[1];
  1395. ggml_backend backend = tensor->backend;
  1396. struct ggml_tensor_extra_gpu * extra = new struct ggml_tensor_extra_gpu;
  1397. for (int id = 0; id < g_device_count; ++id) {
  1398. extra->data_device[id] = nullptr;
  1399. if (backend == GGML_BACKEND_GPU && id != g_main_device) {
  1400. continue;
  1401. }
  1402. cudaSetDevice(id);
  1403. int row_low, row_high;
  1404. if (backend == GGML_BACKEND_GPU) {
  1405. row_low = 0;
  1406. row_high = nrows;
  1407. } else if (backend == GGML_BACKEND_GPU_SPLIT) {
  1408. row_low = id == 0 ? 0 : nrows*g_tensor_split[id];
  1409. row_low -= row_low % GGML_CUDA_DMMV_Y;
  1410. row_high = id == g_device_count - 1 ? nrows : nrows*g_tensor_split[id + 1];
  1411. row_high -= row_high % GGML_CUDA_DMMV_Y;
  1412. GGML_ASSERT(nrows % GGML_CUDA_DMMV_Y == 0);
  1413. } else {
  1414. GGML_ASSERT(false);
  1415. }
  1416. if (row_low == row_high) {
  1417. continue;
  1418. }
  1419. int64_t nrows_split = row_high - row_low;
  1420. const size_t offset_split = row_low*nb1;
  1421. const size_t size = ggml_nbytes_split(tensor, nrows_split);
  1422. void * buf;
  1423. CUDA_CHECK(cudaMalloc(&buf, size));
  1424. void * buf_host = (char*)data + offset_split;
  1425. cudaMemcpy(buf, buf_host, size, cudaMemcpyHostToDevice);
  1426. extra->data_device[id] = buf;
  1427. }
  1428. tensor->extra = extra;
  1429. }
  1430. void ggml_cuda_free_data(struct ggml_tensor * tensor) {
  1431. if (tensor->backend != GGML_BACKEND_GPU && tensor->backend != GGML_BACKEND_GPU_SPLIT) {
  1432. return;
  1433. }
  1434. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
  1435. for (int id = 0; id < g_device_count; ++id) {
  1436. if (extra->data_device[id] == nullptr) {
  1437. continue;
  1438. }
  1439. CUDA_CHECK(cudaSetDevice(id));
  1440. CUDA_CHECK(cudaFree(extra->data_device[id]));
  1441. }
  1442. delete extra;
  1443. }
  1444. void ggml_cuda_assign_buffers(struct ggml_tensor * tensor) {
  1445. if (tensor->src0 != nullptr && tensor->src0->op == GGML_OP_RESHAPE) {
  1446. ggml_cuda_assign_buffers(tensor);
  1447. }
  1448. const size_t size = ggml_nbytes(tensor);
  1449. GGML_ASSERT(size <= g_scratch_size);
  1450. if (g_scratch_offset + size > g_scratch_size) {
  1451. g_scratch_offset = 0;
  1452. }
  1453. tensor->backend = GGML_BACKEND_GPU;
  1454. struct ggml_tensor_extra_gpu * extra = new ggml_tensor_extra_gpu;
  1455. bool inplace = tensor->src0 != nullptr && tensor->src0->data == tensor->data;
  1456. CUDA_CHECK(cudaSetDevice(g_main_device));
  1457. if (inplace && tensor->src0->backend == GGML_BACKEND_GPU) {
  1458. struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu * ) tensor->src0->extra;
  1459. extra->data_device[g_main_device] = src0_extra->data_device;
  1460. GGML_ASSERT(false);
  1461. } else {
  1462. char * data = (char *) g_scratch_buffer;
  1463. if (data == nullptr) {
  1464. CUDA_CHECK(cudaMalloc(&data, g_scratch_size));
  1465. g_scratch_buffer = data;
  1466. }
  1467. extra->data_device[g_main_device] = data + g_scratch_offset;
  1468. }
  1469. // fprintf(stderr, "data=%p offset=%ld data_device=%p\n", data, g_scratch_offset, extra->data_device[0]);
  1470. g_scratch_offset += size;
  1471. // fprintf(stderr, "%s: scratch %d, %p - %p\n",
  1472. // tensor->name, g_scratch_index, data + g_scratch_offset, data + g_scratch_offset + size);
  1473. GGML_ASSERT(g_scratch_offset <= g_scratch_size);
  1474. tensor->extra = extra;
  1475. }
  1476. void ggml_cuda_set_main_device(int main_device) {
  1477. if (main_device > g_device_count) {
  1478. fprintf(stderr, "warning: cannot set main_device=%d because there are only %d devices. Using device %d instead.\n",
  1479. main_device, g_device_count, g_main_device);
  1480. return;
  1481. }
  1482. g_main_device = main_device;
  1483. if (g_device_count > 1) {
  1484. cudaDeviceProp prop;
  1485. CUDA_CHECK(cudaGetDeviceProperties(&prop, g_main_device));
  1486. fprintf(stderr, "%s: using device %d (%s) as main device\n", __func__, g_main_device, prop.name);
  1487. }
  1488. }
  1489. void ggml_cuda_set_scratch_size(size_t scratch_size) {
  1490. g_scratch_size = scratch_size;
  1491. }
  1492. bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor){
  1493. ggml_cuda_func_t func;
  1494. const bool any_on_device = tensor->backend == GGML_BACKEND_GPU
  1495. || tensor->src0->backend == GGML_BACKEND_GPU || tensor->src0->backend == GGML_BACKEND_GPU_SPLIT
  1496. || (tensor->src1 != nullptr && tensor->src1->backend == GGML_BACKEND_GPU);
  1497. switch (tensor->op) {
  1498. case GGML_OP_ADD:
  1499. if (!any_on_device) {
  1500. return false;
  1501. }
  1502. func = ggml_cuda_add;
  1503. break;
  1504. case GGML_OP_MUL:
  1505. if (!any_on_device) {
  1506. return false;
  1507. }
  1508. func = ggml_cuda_mul;
  1509. break;
  1510. case GGML_OP_SILU:
  1511. if (!any_on_device) {
  1512. return false;
  1513. }
  1514. func = ggml_cuda_silu;
  1515. break;
  1516. case GGML_OP_RMS_NORM:
  1517. if (!any_on_device) {
  1518. return false;
  1519. }
  1520. func = ggml_cuda_rms_norm;
  1521. break;
  1522. case GGML_OP_MUL_MAT:
  1523. if (!any_on_device && !ggml_cuda_can_mul_mat(tensor->src0, tensor->src1, tensor)) {
  1524. return false;
  1525. }
  1526. func = ggml_cuda_mul_mat;
  1527. break;
  1528. case GGML_OP_RESHAPE:
  1529. if (!any_on_device) {
  1530. return false;
  1531. }
  1532. func = ggml_cuda_nop;
  1533. break;
  1534. case GGML_OP_ROPE:
  1535. if (!any_on_device) {
  1536. return false;
  1537. }
  1538. func = ggml_cuda_rope;
  1539. break;
  1540. default:
  1541. return false;
  1542. }
  1543. if (params->ith != 0) {
  1544. return true;
  1545. }
  1546. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  1547. return true;
  1548. }
  1549. func(tensor->src0, tensor->src1, tensor);
  1550. return true;
  1551. }