ggml-cuda.cu 71 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907
  1. #include <cstddef>
  2. #include <cstdint>
  3. #include <stdint.h>
  4. #include <stdio.h>
  5. #include <atomic>
  6. #include <assert.h>
  7. #include <cuda_runtime.h>
  8. #include <cublas_v2.h>
  9. #include <cuda_fp16.h>
  10. #include "ggml-cuda.h"
  11. #include "ggml.h"
  12. static_assert(sizeof(half) == sizeof(ggml_fp16_t), "wrong fp16 size");
  13. #define CUDA_CHECK(err) \
  14. do { \
  15. cudaError_t err_ = (err); \
  16. if (err_ != cudaSuccess) { \
  17. fprintf(stderr, "CUDA error %d at %s:%d: %s\n", err_, __FILE__, __LINE__, \
  18. cudaGetErrorString(err_)); \
  19. exit(1); \
  20. } \
  21. } while (0)
  22. #if CUDART_VERSION >= 12
  23. #define CUBLAS_CHECK(err) \
  24. do { \
  25. cublasStatus_t err_ = (err); \
  26. if (err_ != CUBLAS_STATUS_SUCCESS) { \
  27. fprintf(stderr, "\ncuBLAS error %d at %s:%d: %s\n", \
  28. err_, __FILE__, __LINE__, cublasGetStatusString(err_)); \
  29. exit(1); \
  30. } \
  31. } while (0)
  32. #else
  33. #define CUBLAS_CHECK(err) \
  34. do { \
  35. cublasStatus_t err_ = (err); \
  36. if (err_ != CUBLAS_STATUS_SUCCESS) { \
  37. fprintf(stderr, "\ncuBLAS error %d at %s:%d\n", err_, __FILE__, __LINE__); \
  38. exit(1); \
  39. } \
  40. } while (0)
  41. #endif // CUDART_VERSION >= 11
  42. typedef void (*dequantize_kernel_t)(const void * vx, const int ib, const int iqs, float & v0, float & v1);
  43. typedef void (*to_fp32_cuda_t)(const void * x, float * y, int k, cudaStream_t stream);
  44. typedef void (*dot_kernel_k_t)(const void * vx, const int ib, const int iqs, const float * y, float & v);
  45. typedef void (*ggml_cuda_func_t)(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst);
  46. typedef void (*ggml_cuda_op_t)(
  47. const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i, float * src0_ddf_i,
  48. float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
  49. cudaStream_t & cudaStream_main);
  50. // QK = number of values after dequantization
  51. // QR = QK / number of values before dequantization
  52. #define QK4_0 32
  53. #define QR4_0 2
  54. typedef struct {
  55. half d; // delta
  56. uint8_t qs[QK4_0 / 2]; // nibbles / quants
  57. } block_q4_0;
  58. static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding");
  59. #define QK4_1 32
  60. #define QR4_1 2
  61. typedef struct {
  62. half d; // delta
  63. half m; // min
  64. uint8_t qs[QK4_1 / 2]; // nibbles / quants
  65. } block_q4_1;
  66. static_assert(sizeof(block_q4_1) == sizeof(ggml_fp16_t) * 2 + QK4_1 / 2, "wrong q4_1 block size/padding");
  67. #define QK5_0 32
  68. #define QR5_0 2
  69. typedef struct {
  70. half d; // delta
  71. uint8_t qh[4]; // 5-th bit of quants
  72. uint8_t qs[QK5_0 / 2]; // nibbles / quants
  73. } block_q5_0;
  74. static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding");
  75. #define QK5_1 32
  76. #define QR5_1 2
  77. typedef struct {
  78. half d; // delta
  79. half m; // min
  80. uint8_t qh[4]; // 5-th bit of quants
  81. uint8_t qs[QK5_1 / 2]; // nibbles / quants
  82. } block_q5_1;
  83. static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding");
  84. #define QK8_0 32
  85. #define QR8_0 1
  86. typedef struct {
  87. half d; // delta
  88. int8_t qs[QK8_0]; // quants
  89. } block_q8_0;
  90. static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
  91. //================================= k-quants
  92. #define QK_K 256
  93. typedef struct {
  94. uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
  95. uint8_t qs[QK_K/4]; // quants
  96. half d; // super-block scale for quantized scales
  97. half dmin; // super-block scale for quantized mins
  98. } block_q2_k;
  99. static_assert(sizeof(block_q2_k) == 2*sizeof(ggml_fp16_t) + QK_K/16 + QK_K/4, "wrong q2_k block size/padding");
  100. typedef struct {
  101. uint8_t hmask[QK_K/8];
  102. uint8_t qs[QK_K/4]; // nibbles / quants
  103. uint8_t scales[3*QK_K/64];
  104. half d;
  105. } block_q3_k;
  106. static_assert(sizeof(block_q3_k) == sizeof(ggml_fp16_t) + QK_K / 4 + 11 * QK_K / 64, "wrong q3_k block size/padding");
  107. typedef struct {
  108. half d; // super-block scale for quantized scales
  109. half dmin; // super-block scale for quantized mins
  110. uint8_t scales[3*QK_K/64]; // scales, quantized with 6 bits
  111. uint8_t qs[QK_K/2]; // 4--bit quants
  112. } block_q4_k;
  113. static_assert(sizeof(block_q4_k) == 2*sizeof(ggml_fp16_t) + 3*QK_K/64 + QK_K/2, "wrong q4_k block size/padding");
  114. typedef struct {
  115. half d; // super-block scale for quantized scales
  116. half dmin; // super-block scale for quantized mins
  117. uint8_t scales[3*QK_K/64]; // scales, quantized with 6 bits
  118. uint8_t qh[QK_K/8]; // quants, high bit
  119. uint8_t qs[QK_K/2]; // quants, low 4 bits
  120. } block_q5_k;
  121. static_assert(sizeof(block_q5_k) == 2*sizeof(ggml_fp16_t) + 3*QK_K/64 + QK_K/2 + QK_K/8, "wrong q5_k block size/padding");
  122. typedef struct {
  123. uint8_t ql[QK_K/2]; // quants, lower 4 bits
  124. uint8_t qh[QK_K/4]; // quants, upper 2 bits
  125. int8_t scales[QK_K/16]; // scales
  126. half d; // delta
  127. } block_q6_k;
  128. static_assert(sizeof(block_q6_k) == sizeof(ggml_fp16_t) + 13*QK_K/16, "wrong q6_k block size/padding");
  129. #define WARP_SIZE 32
  130. #define CUDA_ADD_BLOCK_SIZE 256
  131. #define CUDA_MUL_BLOCK_SIZE 256
  132. #define CUDA_SILU_BLOCK_SIZE 256
  133. #define CUDA_ROPE_BLOCK_SIZE 256
  134. #define CUDA_DEQUANTIZE_BLOCK_SIZE 256
  135. // dmmv = dequantize_mul_mat_vec
  136. #ifndef GGML_CUDA_DMMV_X
  137. #define GGML_CUDA_DMMV_X 32
  138. #endif
  139. #ifndef GGML_CUDA_DMMV_Y
  140. #define GGML_CUDA_DMMV_Y 1
  141. #endif
  142. static __global__ void add_f32(const float * x, const float * y, float * dst, const int k) {
  143. const int i = blockDim.x*blockIdx.x + threadIdx.x;
  144. if (i >= k) {
  145. return;
  146. }
  147. dst[i] = x[i] + y[i];
  148. }
  149. static __global__ void mul_f32(const float * x, const float * y, float * dst, const int kx, const int ky) {
  150. const int i = blockDim.x*blockIdx.x + threadIdx.x;
  151. if (i >= kx) {
  152. return;
  153. }
  154. dst[i] = x[i] * y[i%ky];
  155. }
  156. static __global__ void silu_f32(const float * x, float * dst, const int k) {
  157. const int i = blockDim.x*blockIdx.x + threadIdx.x;
  158. if (i >= k) {
  159. return;
  160. }
  161. dst[i] = x[i] / (1.0f + expf(-x[i]));
  162. }
  163. static __global__ void rms_norm_f32(const float * x, float * dst, const int ncols) {
  164. const int row = blockIdx.x*blockDim.y + threadIdx.y;
  165. const int tid = threadIdx.x;
  166. const float eps = 1e-6;
  167. float tmp = 0.0f; // partial sum for thread in warp
  168. for (int i = 0; i < ncols; i += WARP_SIZE) {
  169. const int col = i + tid;
  170. const float xi = x[row*ncols + col];
  171. tmp += xi * xi;
  172. }
  173. // sum up partial sums
  174. __syncthreads();
  175. #pragma unroll
  176. for (int mask = 16; mask > 0; mask >>= 1) {
  177. tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
  178. }
  179. const float mean = tmp / ncols;
  180. const float scale = 1.0f / sqrtf(mean + eps);
  181. for (int i = 0; i < ncols; i += WARP_SIZE) {
  182. const int col = i + tid;
  183. dst[row*ncols + col] = scale * x[row*ncols + col];
  184. }
  185. }
  186. static __device__ void dequantize_q4_0(const void * vx, const int ib, const int iqs, float & v0, float & v1){
  187. const block_q4_0 * x = (const block_q4_0 *) vx;
  188. const float d = x[ib].d;
  189. const uint8_t vui = x[ib].qs[iqs];
  190. const int8_t vi0 = vui & 0xF;
  191. const int8_t vi1 = vui >> 4;
  192. v0 = (vi0 - 8)*d;
  193. v1 = (vi1 - 8)*d;
  194. }
  195. static __device__ void dequantize_q4_1(const void * vx, const int ib, const int iqs, float & v0, float & v1){
  196. const block_q4_1 * x = (const block_q4_1 *) vx;
  197. const float d = x[ib].d;
  198. const float m = x[ib].m;
  199. const uint8_t vui = x[ib].qs[iqs];
  200. const int8_t vi0 = vui & 0xF;
  201. const int8_t vi1 = vui >> 4;
  202. v0 = vi0*d + m;
  203. v1 = vi1*d + m;
  204. }
  205. static __device__ void dequantize_q5_0(const void * vx, const int ib, const int iqs, float & v0, float & v1){
  206. const block_q5_0 * x = (const block_q5_0 *) vx;
  207. const float d = x[ib].d;
  208. uint32_t qh;
  209. memcpy(&qh, x[ib].qh, sizeof(qh));
  210. const uint8_t xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
  211. const uint8_t xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
  212. const int32_t x0 = ((x[ib].qs[iqs] & 0xf) | xh_0) - 16;
  213. const int32_t x1 = ((x[ib].qs[iqs] >> 4) | xh_1) - 16;
  214. v0 = x0*d;
  215. v1 = x1*d;
  216. }
  217. static __device__ void dequantize_q5_1(const void * vx, const int ib, const int iqs, float & v0, float & v1){
  218. const block_q5_1 * x = (const block_q5_1 *) vx;
  219. const float d = x[ib].d;
  220. const float m = x[ib].m;
  221. uint32_t qh;
  222. memcpy(&qh, x[ib].qh, sizeof(qh));
  223. const uint8_t xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
  224. const uint8_t xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
  225. const int32_t x0 = ((x[ib].qs[iqs] & 0xf) | xh_0);
  226. const int32_t x1 = ((x[ib].qs[iqs] >> 4) | xh_1);
  227. v0 = x0*d + m;
  228. v1 = x1*d + m;
  229. }
  230. static __device__ void dequantize_q8_0(const void * vx, const int ib, const int iqs, float & v0, float & v1){
  231. const block_q8_0 * x = (const block_q8_0 *) vx;
  232. const float d = x[ib].d;
  233. const int8_t vi0 = x[ib].qs[iqs + 0];
  234. const int8_t vi1 = x[ib].qs[iqs + 1];
  235. v0 = vi0*d;
  236. v1 = vi1*d;
  237. }
  238. //================================== k-quants
  239. static __global__ void dequantize_block_q2_k(const void * vx, float * yy) {
  240. const int i = blockIdx.x;
  241. const int tid = threadIdx.x;
  242. const int n = tid/32;
  243. const int l = tid - 32*n;
  244. const int is = 8*n + l/16;
  245. const block_q2_k * x = (const block_q2_k *) vx;
  246. const uint8_t q = x[i].qs[32*n + l];
  247. float * y = yy + i*QK_K + 128*n;
  248. float dall = x[i].d;
  249. float dmin = x[i].dmin;
  250. y[l+ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4);
  251. y[l+32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 2) & 3) - dmin * (x[i].scales[is+2] >> 4);
  252. y[l+64] = dall * (x[i].scales[is+4] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+4] >> 4);
  253. y[l+96] = dall * (x[i].scales[is+6] & 0xF) * ((q >> 6) & 3) - dmin * (x[i].scales[is+6] >> 4);
  254. }
  255. static __device__ void vec_dot_q2_k(const void * vx, const int ib, const int iqs, const float * yy, float & result) {
  256. const block_q2_k * x = (const block_q2_k *) vx;
  257. // if n is 0, we want to do the lower 128, else the upper 128,
  258. // covering y[l+0], y[l+32], y[l+64], y[l+96] and
  259. // y[l+16], y[l+48], y[l+80], y[l+112]
  260. int n = iqs/128; // 0 or 1
  261. int r = iqs - 128*n; // 0...120 in steps of 8
  262. int l = r/8; // 0...15 in steps of 1
  263. const float * y = yy + 128*n + l;
  264. const uint8_t * q = x[ib].qs + 32*n + l;
  265. const uint8_t * s = x[ib].scales + 8*n;
  266. const float dall = x[ib].d;
  267. const float dmin = x[ib].dmin;
  268. float sum = y[ 0] * (dall * ((s[0] & 0xF) * ((q[ 0] >> 0) & 3)) - dmin * (s[0] >> 4))
  269. + y[ 32] * (dall * ((s[2] & 0xF) * ((q[ 0] >> 2) & 3)) - dmin * (s[2] >> 4))
  270. + y[ 64] * (dall * ((s[4] & 0xF) * ((q[ 0] >> 4) & 3)) - dmin * (s[4] >> 4))
  271. + y[ 96] * (dall * ((s[6] & 0xF) * ((q[ 0] >> 6) & 3)) - dmin * (s[6] >> 4))
  272. + y[ 16] * (dall * ((s[1] & 0xF) * ((q[16] >> 0) & 3)) - dmin * (s[1] >> 4))
  273. + y[ 48] * (dall * ((s[3] & 0xF) * ((q[16] >> 2) & 3)) - dmin * (s[3] >> 4))
  274. + y[ 80] * (dall * ((s[5] & 0xF) * ((q[16] >> 4) & 3)) - dmin * (s[5] >> 4))
  275. + y[112] * (dall * ((s[7] & 0xF) * ((q[16] >> 6) & 3)) - dmin * (s[7] >> 4));
  276. result = sum;
  277. }
  278. static __global__ void dequantize_block_q3_k(const void * vx, float * yy) {
  279. int r = threadIdx.x/4;
  280. int i = blockIdx.x;
  281. int tid = r/2;
  282. int is0 = r%2;
  283. int l0 = 16*is0 + 4*(threadIdx.x%4);
  284. int n = tid / 4;
  285. int j = tid - 4*n;
  286. const block_q3_k * x = (const block_q3_k *) vx;
  287. uint8_t m = 1 << (4*n + j);
  288. int is = 8*n + 2*j + is0;
  289. int shift = 2*j;
  290. int8_t us = is < 4 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+8] >> 0) & 3) << 4) :
  291. is < 8 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+4] >> 2) & 3) << 4) :
  292. is < 12 ? (x[i].scales[is-8] >> 4) | (((x[i].scales[is+0] >> 4) & 3) << 4) :
  293. (x[i].scales[is-8] >> 4) | (((x[i].scales[is-4] >> 6) & 3) << 4);
  294. float d_all = x[i].d;
  295. float dl = d_all * (us - 32);
  296. float * y = yy + i*QK_K + 128*n + 32*j;
  297. const uint8_t * q = x[i].qs + 32*n;
  298. const uint8_t * hm = x[i].hmask;
  299. for (int l = l0; l < l0+4; ++l) y[l] = dl * ((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4));
  300. }
  301. static __device__ void vec_dot_q3_k(const void * vx, const int ib, const int iqs, const float * yy, float & result) {
  302. const block_q3_k * x = (const block_q3_k *) vx;
  303. const uint32_t kmask1 = 0x03030303;
  304. const uint32_t kmask2 = 0x0f0f0f0f;
  305. uint32_t aux[3];
  306. uint32_t utmp[4];
  307. // if n is 0, we want to do the lower 128, else the upper 128,
  308. // covering y[l+0], y[l+32], y[l+64], y[l+96] and
  309. // y[l+16], y[l+48], y[l+80], y[l+112]
  310. int n = iqs/128; // 0 or 1
  311. int r = iqs - 128*n; // 0...120 in steps of 8
  312. int l = r/8; // 0...15 in steps of 1
  313. const float * y = yy + 128*n + l;
  314. const uint8_t * q = x[ib].qs + 32*n + l;
  315. const uint8_t * hm = x[ib].hmask + l;
  316. const int8_t * s = (const int8_t *)utmp + 8*n;
  317. memcpy(aux, x[ib].scales, 12);
  318. utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
  319. utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
  320. utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
  321. utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
  322. const float dall = x[ib].d;
  323. const uint8_t m = 1 << (4*n);
  324. float sum = y[ 0] * (s[0] - 32) * (((q[ 0] >> 0) & 3) - (hm[ 0] & (m << 0) ? 0 : 4))
  325. + y[ 32] * (s[2] - 32) * (((q[ 0] >> 2) & 3) - (hm[ 0] & (m << 1) ? 0 : 4))
  326. + y[ 64] * (s[4] - 32) * (((q[ 0] >> 4) & 3) - (hm[ 0] & (m << 2) ? 0 : 4))
  327. + y[ 96] * (s[6] - 32) * (((q[ 0] >> 6) & 3) - (hm[ 0] & (m << 3) ? 0 : 4))
  328. + y[ 16] * (s[1] - 32) * (((q[16] >> 0) & 3) - (hm[16] & (m << 0) ? 0 : 4))
  329. + y[ 48] * (s[3] - 32) * (((q[16] >> 2) & 3) - (hm[16] & (m << 1) ? 0 : 4))
  330. + y[ 80] * (s[5] - 32) * (((q[16] >> 4) & 3) - (hm[16] & (m << 2) ? 0 : 4))
  331. + y[112] * (s[7] - 32) * (((q[16] >> 6) & 3) - (hm[16] & (m << 3) ? 0 : 4));
  332. result = sum * dall;
  333. }
  334. static inline __device__ void get_scale_min_k4(int j, const uint8_t * q, uint8_t & d, uint8_t & m) {
  335. if (j < 4) {
  336. d = q[j] & 63; m = q[j + 4] & 63;
  337. } else {
  338. d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
  339. m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
  340. }
  341. }
  342. static __global__ void dequantize_block_q4_k(const void * vx, float * yy) {
  343. const block_q4_k * x = (const block_q4_k *) vx;
  344. const int i = blockIdx.x;
  345. //// assume 64 threads - this is very slightly better than the one below
  346. //const int tid = threadIdx.x;
  347. //const int il = tid/16;
  348. //const int ir = tid%16;
  349. //const int is = 2*il;
  350. //const int n = 2;
  351. // assume 32 threads
  352. const int tid = threadIdx.x;
  353. const int il = tid/8;
  354. const int ir = tid%8;
  355. const int is = 2*il;
  356. const int n = 4;
  357. float * y = yy + i*QK_K + 64*il + n*ir;
  358. const float dall = x[i].d;
  359. const float dmin = x[i].dmin;
  360. const uint8_t * q = x[i].qs + 32*il + n*ir;
  361. uint8_t sc, m;
  362. get_scale_min_k4(is + 0, x[i].scales, sc, m);
  363. const float d1 = dall * sc; const float m1 = dmin * m;
  364. get_scale_min_k4(is + 1, x[i].scales, sc, m);
  365. const float d2 = dall * sc; const float m2 = dmin * m;
  366. for (int l = 0; l < n; ++l) {
  367. y[l + 0] = d1 * (q[l] & 0xF) - m1;
  368. y[l +32] = d2 * (q[l] >> 4) - m2;
  369. }
  370. }
  371. static __device__ void vec_dot_q4_k(const void * vx, const int ib, const int iqs, const float * yy, float & result) {
  372. const block_q4_k * x = (const block_q4_k *) vx;
  373. // iqs is in 0...248 in steps of 8 =>
  374. const int j = iqs / 64; // j is in 0...3
  375. const int ir = (iqs - 64*j)/2; // ir is in 0...28 in steps of 4
  376. const int is = 2*j; // is is in 0...6 in steps of 2
  377. const float * y = yy + 64*j + ir;
  378. const uint8_t * q = x[ib].qs + 32*j + ir;
  379. const float dall = x[ib].d;
  380. const float dmin = x[ib].dmin;
  381. uint8_t sc, m;
  382. get_scale_min_k4(is + 0, x[ib].scales, sc, m);
  383. const float d1 = dall * sc;
  384. const float m1 = dmin * m;
  385. get_scale_min_k4(is + 1, x[ib].scales, sc, m);
  386. const float d2 = dall * sc;
  387. const float m2 = dmin * m;
  388. float sum = 0;
  389. for (int k = 0; k < 4; ++k) {
  390. sum += y[k + 0] * (d1 * (q[k] & 0xF) - m1);
  391. sum += y[k + 32] * (d2 * (q[k] >> 4) - m2);
  392. }
  393. result = sum;
  394. }
  395. static __global__ void dequantize_block_q5_k(const void * vx, float * yy) {
  396. const block_q5_k * x = (const block_q5_k *) vx;
  397. const int i = blockIdx.x;
  398. // assume 64 threads - this is very slightly better than the one below
  399. const int tid = threadIdx.x;
  400. const int il = tid/16; // il is in 0...3
  401. const int ir = tid%16; // ir is in 0...15
  402. const int is = 2*il; // is is in 0...6
  403. float * y = yy + i*QK_K + 64*il + 2*ir;
  404. const float dall = x[i].d;
  405. const float dmin = x[i].dmin;
  406. const uint8_t * ql = x[i].qs + 32*il + 2*ir;
  407. const uint8_t * qh = x[i].qh + 2*ir;
  408. uint8_t sc, m;
  409. get_scale_min_k4(is + 0, x[i].scales, sc, m);
  410. const float d1 = dall * sc; const float m1 = dmin * m;
  411. get_scale_min_k4(is + 1, x[i].scales, sc, m);
  412. const float d2 = dall * sc; const float m2 = dmin * m;
  413. uint8_t hm = 1 << (2*il);
  414. y[ 0] = d1 * ((ql[ 0] & 0xF) + (qh[ 0] & hm ? 16 : 0)) - m1;
  415. y[ 1] = d1 * ((ql[ 1] & 0xF) + (qh[ 1] & hm ? 16 : 0)) - m1;
  416. hm <<= 1;
  417. y[32] = d2 * ((ql[ 0] >> 4) + (qh[ 0] & hm ? 16 : 0)) - m2;
  418. y[33] = d2 * ((ql[ 1] >> 4) + (qh[ 1] & hm ? 16 : 0)) - m2;
  419. }
  420. static __device__ void vec_dot_q5_k(const void * vx, const int ib, const int iqs, const float * yy, float & result) {
  421. const block_q5_k * x = (const block_q5_k *) vx;
  422. // iqs is in 0...248 in steps of 8 =>
  423. const int j = iqs / 64; // j is in 0...3
  424. const int ir = (iqs - 64*j)/2; // ir is in 0...28 in steps of 4
  425. const int is = 2*j; // is is in 0...6 in steps of 2
  426. const float * y = yy + 64*j + ir;
  427. const uint8_t * ql = x[ib].qs + 32*j + ir;
  428. const uint8_t * qh = x[ib].qh + ir;
  429. const float dall = x[ib].d;
  430. const float dmin = x[ib].dmin;
  431. uint8_t sc, m;
  432. get_scale_min_k4(is + 0, x[ib].scales, sc, m);
  433. const float d1 = dall * sc;
  434. const float m1 = dmin * m;
  435. get_scale_min_k4(is + 1, x[ib].scales, sc, m);
  436. const float d2 = dall * sc;
  437. const float m2 = dmin * m;
  438. uint8_t hm = 1 << is;
  439. float sum = 0;
  440. for (int k = 0; k < 4; ++k) {
  441. sum += y[k + 0] * (d1 * ((ql[k] & 0xF) + (qh[k] & hm ? 16 : 0)) - m1);
  442. }
  443. hm <<= 1;
  444. for (int k = 0; k < 4; ++k) {
  445. sum += y[k + 32] * (d2 * ((ql[k] >> 4) + (qh[k] & hm ? 16 : 0)) - m2);
  446. }
  447. result = sum;
  448. }
  449. static __global__ void dequantize_block_q6_k(const void * vx, float * yy) {
  450. const block_q6_k * x = (const block_q6_k *) vx;
  451. const int i = blockIdx.x;
  452. // assume 64 threads - this is very slightly better than the one below
  453. const int tid = threadIdx.x;
  454. const int ip = tid/32; // ip is 0 or 1
  455. const int il = tid - 32*ip; // 0...32
  456. const int is = 8*ip + il/16;
  457. float * y = yy + i*QK_K + 128*ip + il;
  458. const float d = x[i].d;
  459. const uint8_t * ql = x[i].ql + 64*ip + il;
  460. const uint8_t qh = x[i].qh[32*ip + il];
  461. const int8_t * sc = x[i].scales + is;
  462. y[ 0] = d * sc[0] * ((int8_t)((ql[ 0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32);
  463. y[32] = d * sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32);
  464. y[64] = d * sc[4] * ((int8_t)((ql[ 0] >> 4) | (((qh >> 4) & 3) << 4)) - 32);
  465. y[96] = d * sc[6] * ((int8_t)((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32);
  466. }
  467. static __device__ void vec_dot_q6_k(const void * vx, const int ib, const int iqs, const float * yy, float & result) {
  468. const block_q6_k * x = (const block_q6_k *) vx;
  469. const int ip = iqs / 128; // 0 or 1
  470. const int il = (iqs - 128*ip)/8; // 0...15
  471. const int is = 8*ip;
  472. const float * y = yy + 128*ip + il;
  473. const float d = x[ib].d;
  474. const uint8_t * ql = x[ib].ql + 64*ip + il;
  475. const uint8_t * qh = x[ib].qh + 32*ip + il;
  476. const int8_t * sc = x[ib].scales + is;
  477. result = y[ 0] * d * sc[0] * ((int8_t)((ql[ 0] & 0xF) | (((qh[ 0] >> 0) & 3) << 4)) - 32)
  478. + y[ 32] * d * sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh[ 0] >> 2) & 3) << 4)) - 32)
  479. + y[ 64] * d * sc[4] * ((int8_t)((ql[ 0] >> 4) | (((qh[ 0] >> 4) & 3) << 4)) - 32)
  480. + y[ 96] * d * sc[6] * ((int8_t)((ql[32] >> 4) | (((qh[ 0] >> 6) & 3) << 4)) - 32)
  481. + y[ 16] * d * sc[1] * ((int8_t)((ql[16] & 0xF) | (((qh[16] >> 0) & 3) << 4)) - 32)
  482. + y[ 48] * d * sc[3] * ((int8_t)((ql[48] & 0xF) | (((qh[16] >> 2) & 3) << 4)) - 32)
  483. + y[ 80] * d * sc[5] * ((int8_t)((ql[16] >> 4) | (((qh[16] >> 4) & 3) << 4)) - 32)
  484. + y[112] * d * sc[7] * ((int8_t)((ql[48] >> 4) | (((qh[16] >> 6) & 3) << 4)) - 32);
  485. }
  486. static __device__ void convert_f16(const void * vx, const int ib, const int iqs, float & v0, float & v1){
  487. const half * x = (const half *) vx;
  488. v0 = __half2float(x[ib + iqs + 0]);
  489. v1 = __half2float(x[ib + iqs + 1]);
  490. }
  491. template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
  492. static __global__ void dequantize_block(const void * vx, float * y, const int k) {
  493. const int i = blockDim.x*blockIdx.x + 2*threadIdx.x;
  494. if (i >= k) {
  495. return;
  496. }
  497. const int ib = i/qk; // block index
  498. const int iqs = (i%qk)/qr; // quant index
  499. const int iybs = i - i%qk; // y block start index
  500. const int y_offset = qr == 1 ? 1 : qk/2;
  501. // dequantize
  502. float & v0 = y[iybs + iqs + 0];
  503. float & v1 = y[iybs + iqs + y_offset];
  504. dequantize_kernel(vx, ib, iqs, v0, v1);
  505. }
  506. template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
  507. static __global__ void dequantize_mul_mat_vec(const void * vx, const float * y, float * dst, const int ncols) {
  508. // qk = quantized weights per x block
  509. // qr = number of quantized weights per data value in x block
  510. const int row = blockIdx.x*blockDim.y + threadIdx.y;
  511. const int tid = threadIdx.x;
  512. const int iter_stride = 2*GGML_CUDA_DMMV_X;
  513. const int vals_per_iter = iter_stride / WARP_SIZE; // num quantized vals per thread and i iter
  514. const int y_offset = qr == 1 ? 1 : qk/2;
  515. float tmp = 0.0f; // partial sum for thread in warp
  516. for (int i = 0; i < ncols; i += iter_stride) {
  517. const int col = i + vals_per_iter*tid;
  518. const int ib = (row*ncols + col)/qk; // x block index
  519. const int iqs = (col%qk)/qr; // x quant index
  520. const int iybs = col - col%qk; // y block start index
  521. // processing >2 values per i iter is faster for fast GPUs
  522. #pragma unroll
  523. for (int j = 0; j < vals_per_iter; j += 2) {
  524. // process 2 vals per j iter
  525. // dequantize
  526. float v0, v1;
  527. dequantize_kernel(vx, ib, iqs + j/qr, v0, v1);
  528. // for qr = 2 the iqs needs to increase by 1 per j iter because 2 weights per data val
  529. // matrix multiplication
  530. tmp += v0 * y[iybs + iqs + j/qr + 0];
  531. tmp += v1 * y[iybs + iqs + j/qr + y_offset];
  532. // for qr = 2 the y index needs to increase by 1 per j iter because of y_offset = qk/2
  533. }
  534. }
  535. // sum up partial sums and write back result
  536. __syncthreads();
  537. #pragma unroll
  538. for (int mask = 16; mask > 0; mask >>= 1) {
  539. tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
  540. }
  541. if (tid == 0) {
  542. dst[row] = tmp;
  543. }
  544. }
  545. template <int n_thread, dot_kernel_k_t dot_kernel>
  546. static __global__ void dequantize_mul_mat_vec_k(const void * vx, const float * y, float * dst, const int ncols) {
  547. const int row = blockIdx.x*blockDim.y + threadIdx.y;
  548. const int tid = threadIdx.x;
  549. const int iter_stride = QK_K;
  550. const int vals_per_iter = iter_stride / n_thread;
  551. const int num_blocks_per_row = ncols / QK_K;
  552. const int ib0 = row*num_blocks_per_row;
  553. float tmp = 0; // partial sum for thread in warp
  554. for (int i = 0; i < ncols; i += iter_stride) {
  555. const int col = i + vals_per_iter*tid;
  556. const int ib = ib0 + col/QK_K; // x block index
  557. const int iqs = col%QK_K; // x quant index
  558. const int iybs = col - col%QK_K; // y block start index
  559. float v;
  560. dot_kernel(vx, ib, iqs, y + iybs, v);
  561. tmp += v;
  562. }
  563. // sum up partial sums and write back result
  564. __syncthreads();
  565. #pragma unroll
  566. for (int mask = 16; mask > 0; mask >>= 1) {
  567. tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
  568. }
  569. if (tid == 0) {
  570. dst[row] = tmp;
  571. }
  572. }
  573. static __global__ void rope_f32(const float * x, float * dst, const int ncols, const float p, const float theta_scale) {
  574. const int col = 2*(blockDim.x*blockIdx.x + threadIdx.x);
  575. if (col >= ncols) {
  576. return;
  577. }
  578. const int row = blockDim.y*blockIdx.y + threadIdx.y;
  579. const int i = row*ncols + col;
  580. const float theta = p*powf(theta_scale, col/2);
  581. const float sin_theta = sinf(theta);
  582. const float cos_theta = cosf(theta);
  583. const float x0 = x[i + 0];
  584. const float x1 = x[i + 1];
  585. dst[i + 0] = x0*cos_theta - x1*sin_theta;
  586. dst[i + 1] = x0*sin_theta + x1*cos_theta;
  587. }
  588. static void add_f32_cuda(const float * x, const float * y, float * dst, const int k, cudaStream_t stream) {
  589. const int num_blocks = (k + CUDA_ADD_BLOCK_SIZE - 1) / CUDA_ADD_BLOCK_SIZE;
  590. add_f32<<<num_blocks, CUDA_ADD_BLOCK_SIZE, 0, stream>>>(x, y, dst, k);
  591. }
  592. static void mul_f32_cuda(const float * x, const float * y, float * dst, const int kx, const int ky, cudaStream_t stream) {
  593. const int num_blocks = (kx + CUDA_MUL_BLOCK_SIZE - 1) / CUDA_MUL_BLOCK_SIZE;
  594. mul_f32<<<num_blocks, CUDA_MUL_BLOCK_SIZE, 0, stream>>>(x, y, dst, kx, ky);
  595. }
  596. static void silu_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) {
  597. const int num_blocks = (k + CUDA_SILU_BLOCK_SIZE - 1) / CUDA_SILU_BLOCK_SIZE;
  598. silu_f32<<<num_blocks, CUDA_SILU_BLOCK_SIZE, 0, stream>>>(x, dst, k);
  599. }
  600. static void rms_norm_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  601. GGML_ASSERT(ncols % WARP_SIZE == 0);
  602. const dim3 block_dims(WARP_SIZE, 1, 1);
  603. rms_norm_f32<<<nrows, block_dims, 0, stream>>>(x, dst, ncols);
  604. }
  605. static void dequantize_row_q4_0_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  606. const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
  607. dequantize_block<QK4_0, QR4_0, dequantize_q4_0><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
  608. }
  609. static void dequantize_row_q4_1_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  610. const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
  611. dequantize_block<QK4_1, QR4_1, dequantize_q4_1><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
  612. }
  613. static void dequantize_row_q5_0_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  614. const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
  615. dequantize_block<QK5_0, QR5_0, dequantize_q5_0><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
  616. }
  617. static void dequantize_row_q5_1_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  618. const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
  619. dequantize_block<QK5_1, QR5_1, dequantize_q5_1><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
  620. }
  621. static void dequantize_row_q8_0_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  622. const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
  623. dequantize_block<QK8_0, QR8_0, dequantize_q8_0><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
  624. }
  625. static void dequantize_row_q2_k_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  626. const int nb = k / QK_K;
  627. dequantize_block_q2_k<<<nb, 64, 0, stream>>>(vx, y);
  628. }
  629. static void dequantize_row_q3_k_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  630. const int nb = k / QK_K;
  631. dequantize_block_q3_k<<<nb, 64, 0, stream>>>(vx, y);
  632. }
  633. static void dequantize_row_q4_k_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  634. const int nb = k / QK_K;
  635. dequantize_block_q4_k<<<nb, 32, 0, stream>>>(vx, y);
  636. }
  637. static void dequantize_row_q5_k_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  638. const int nb = k / QK_K;
  639. dequantize_block_q5_k<<<nb, 64, 0, stream>>>(vx, y);
  640. }
  641. static void dequantize_row_q6_k_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  642. const int nb = k / QK_K;
  643. dequantize_block_q6_k<<<nb, 64, 0, stream>>>(vx, y);
  644. }
  645. static void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  646. GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
  647. GGML_ASSERT(nrows % GGML_CUDA_DMMV_Y == 0);
  648. const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
  649. dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>
  650. <<<nrows/GGML_CUDA_DMMV_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
  651. }
  652. static void dequantize_mul_mat_vec_q4_1_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  653. GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
  654. GGML_ASSERT(nrows % GGML_CUDA_DMMV_Y == 0);
  655. const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
  656. dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>
  657. <<<nrows/GGML_CUDA_DMMV_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
  658. }
  659. static void dequantize_mul_mat_vec_q5_0_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  660. GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
  661. GGML_ASSERT(nrows % GGML_CUDA_DMMV_Y == 0);
  662. const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
  663. dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>
  664. <<<nrows/GGML_CUDA_DMMV_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
  665. }
  666. static void dequantize_mul_mat_vec_q5_1_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  667. GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
  668. GGML_ASSERT(nrows % GGML_CUDA_DMMV_Y == 0);
  669. const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
  670. dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>
  671. <<<nrows/GGML_CUDA_DMMV_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
  672. }
  673. static void dequantize_mul_mat_vec_q8_0_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  674. GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
  675. GGML_ASSERT(nrows % GGML_CUDA_DMMV_Y == 0);
  676. const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
  677. dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>
  678. <<<nrows/GGML_CUDA_DMMV_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
  679. }
  680. static void dequantize_mul_mat_vec_q2_k_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  681. GGML_ASSERT(ncols % QK_K == 0);
  682. const int ny = 2;
  683. const dim3 block_dims(32, ny, 1);
  684. dequantize_mul_mat_vec_k<32, vec_dot_q2_k><<<(nrows + ny - 1)/ny, block_dims, 0, stream>>>(vx, y, dst, ncols);
  685. }
  686. static void dequantize_mul_mat_vec_q3_k_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  687. GGML_ASSERT(ncols % QK_K == 0);
  688. const dim3 block_dims(32, 2, 1);
  689. dequantize_mul_mat_vec_k<32, vec_dot_q3_k><<<nrows/2, block_dims, 0, stream>>>(vx, y, dst, ncols);
  690. }
  691. static void dequantize_mul_mat_vec_q4_k_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  692. GGML_ASSERT(ncols % QK_K == 0);
  693. const dim3 block_dims(32, 2, 1);
  694. dequantize_mul_mat_vec_k<32, vec_dot_q4_k><<<nrows/2, block_dims, 0, stream>>>(vx, y, dst, ncols);
  695. }
  696. static void dequantize_mul_mat_vec_q5_k_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  697. GGML_ASSERT(ncols % QK_K == 0);
  698. const dim3 block_dims(32, 2, 1);
  699. dequantize_mul_mat_vec_k<32, vec_dot_q5_k><<<nrows/2, block_dims, 0, stream>>>(vx, y, dst, ncols);
  700. }
  701. static void dequantize_mul_mat_vec_q6_k_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  702. GGML_ASSERT(ncols % QK_K == 0);
  703. const dim3 block_dims(32, 2, 1);
  704. dequantize_mul_mat_vec_k<32, vec_dot_q6_k><<<nrows/2, block_dims, 0, stream>>>(vx, y, dst, ncols);
  705. }
  706. static void convert_fp16_to_fp32_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
  707. const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
  708. dequantize_block<1, 1, convert_f16><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
  709. }
  710. static void convert_mul_mat_vec_f16_cuda(const void * vx, const float * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
  711. GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
  712. GGML_ASSERT(nrows % GGML_CUDA_DMMV_Y == 0);
  713. const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
  714. dequantize_mul_mat_vec<1, 1, convert_f16>
  715. <<<nrows/GGML_CUDA_DMMV_Y, block_dims, 0, stream>>>(vx, y, dst, ncols);
  716. }
  717. static to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
  718. switch (type) {
  719. case GGML_TYPE_Q4_0:
  720. return dequantize_row_q4_0_cuda;
  721. case GGML_TYPE_Q4_1:
  722. return dequantize_row_q4_1_cuda;
  723. case GGML_TYPE_Q5_0:
  724. return dequantize_row_q5_0_cuda;
  725. case GGML_TYPE_Q5_1:
  726. return dequantize_row_q5_1_cuda;
  727. case GGML_TYPE_Q8_0:
  728. return dequantize_row_q8_0_cuda;
  729. case GGML_TYPE_Q2_K:
  730. return dequantize_row_q2_k_cuda;
  731. case GGML_TYPE_Q3_K:
  732. return dequantize_row_q3_k_cuda;
  733. case GGML_TYPE_Q4_K:
  734. return dequantize_row_q4_k_cuda;
  735. case GGML_TYPE_Q5_K:
  736. return dequantize_row_q5_k_cuda;
  737. case GGML_TYPE_Q6_K:
  738. return dequantize_row_q6_k_cuda;
  739. case GGML_TYPE_F16:
  740. return convert_fp16_to_fp32_cuda;
  741. default:
  742. return nullptr;
  743. }
  744. }
  745. static void rope_f32_cuda(const float * x, float * dst, const int ncols, const int nrows, const float p, const float theta_scale, cudaStream_t stream) {
  746. GGML_ASSERT(nrows % 2 == 0);
  747. const dim3 block_dims(2*CUDA_ROPE_BLOCK_SIZE, 1, 1);
  748. const int num_blocks_x = (ncols + 2*CUDA_ROPE_BLOCK_SIZE - 1) / (2*CUDA_ROPE_BLOCK_SIZE);
  749. const dim3 block_nums(num_blocks_x, nrows, 1);
  750. rope_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, p, theta_scale);
  751. }
  752. // buffer pool for cuda
  753. #define MAX_CUDA_BUFFERS 256
  754. struct scoped_spin_lock {
  755. std::atomic_flag& lock;
  756. scoped_spin_lock(std::atomic_flag& lock) : lock(lock) {
  757. while (lock.test_and_set(std::memory_order_acquire)) {
  758. ; // spin
  759. }
  760. }
  761. ~scoped_spin_lock() {
  762. lock.clear(std::memory_order_release);
  763. }
  764. scoped_spin_lock(const scoped_spin_lock&) = delete;
  765. scoped_spin_lock& operator=(const scoped_spin_lock&) = delete;
  766. };
  767. struct cuda_buffer {
  768. void * ptr = nullptr;
  769. size_t size = 0;
  770. };
  771. static cuda_buffer g_cuda_buffer_pool[GGML_CUDA_MAX_DEVICES][MAX_CUDA_BUFFERS];
  772. static std::atomic_flag g_cuda_pool_lock = ATOMIC_FLAG_INIT;
  773. static void * ggml_cuda_pool_malloc(size_t size, size_t * actual_size) {
  774. scoped_spin_lock lock(g_cuda_pool_lock);
  775. int id;
  776. CUDA_CHECK(cudaGetDevice(&id));
  777. for (int i = 0; i < MAX_CUDA_BUFFERS; ++i) {
  778. cuda_buffer& b = g_cuda_buffer_pool[id][i];
  779. if (b.size >= size && b.ptr != nullptr) {
  780. void * ptr = b.ptr;
  781. *actual_size = b.size;
  782. b.ptr = nullptr;
  783. b.size = 0;
  784. return ptr;
  785. }
  786. }
  787. void * ptr;
  788. CUDA_CHECK(cudaMalloc((void **) &ptr, size));
  789. *actual_size = size;
  790. return ptr;
  791. }
  792. static void ggml_cuda_pool_free(void * ptr, size_t size) {
  793. scoped_spin_lock lock(g_cuda_pool_lock);
  794. int id;
  795. CUDA_CHECK(cudaGetDevice(&id));
  796. for (int i = 0; i < MAX_CUDA_BUFFERS; ++i) {
  797. cuda_buffer& b = g_cuda_buffer_pool[id][i];
  798. if (b.ptr == nullptr) {
  799. b.ptr = ptr;
  800. b.size = size;
  801. return;
  802. }
  803. }
  804. fprintf(stderr, "WARNING: cuda buffer pool full, increase MAX_CUDA_BUFFERS\n");
  805. CUDA_CHECK(cudaFree(ptr));
  806. }
  807. static void * g_scratch_buffer = nullptr;
  808. static size_t g_scratch_size = 1024*1024*1024; // 1 GB by default
  809. static size_t g_scratch_offset = 0;
  810. #define GGML_CUDA_MAX_STREAMS 8 // Set this to 1 for reproducible matrix multiplication.
  811. #define GGML_CUDA_MAX_EVENTS 64
  812. static int g_device_count = -1;
  813. static int g_main_device = 0;
  814. static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0};
  815. static cublasHandle_t g_cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr};
  816. static cudaStream_t g_cudaStreams_main[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS] = { nullptr };
  817. static cudaStream_t g_cudaStreams_memcpy_src1[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_STREAMS] = { nullptr };
  818. static cudaEvent_t g_cudaEvents_memcpy_src1[GGML_CUDA_MAX_DEVICES][GGML_CUDA_MAX_EVENTS] = { nullptr };
  819. void ggml_init_cublas() {
  820. static bool initialized = false;
  821. if (!initialized) {
  822. CUDA_CHECK(cudaGetDeviceCount(&g_device_count));
  823. GGML_ASSERT(g_device_count <= GGML_CUDA_MAX_DEVICES);
  824. int64_t total_vram = 0;
  825. fprintf(stderr, "%s: found %d CUDA devices:\n", __func__, g_device_count);
  826. for (int id = 0; id < g_device_count; ++id) {
  827. cudaDeviceProp prop;
  828. CUDA_CHECK(cudaGetDeviceProperties(&prop, id));
  829. fprintf(stderr, " Device %d: %s\n", id, prop.name);
  830. g_tensor_split[id] = total_vram;
  831. total_vram += prop.totalGlobalMem;
  832. }
  833. for (int id = 0; id < g_device_count; ++id) {
  834. g_tensor_split[id] /= total_vram;
  835. }
  836. for (int id = 0; id < g_device_count; ++id) {
  837. CUDA_CHECK(cudaSetDevice(id));
  838. // create streams
  839. for (int i = 0; i < GGML_CUDA_MAX_STREAMS; ++i) {
  840. CUDA_CHECK(cudaStreamCreateWithFlags(&g_cudaStreams_main[id][i], cudaStreamNonBlocking));
  841. CUDA_CHECK(cudaStreamCreateWithFlags(&g_cudaStreams_memcpy_src1[id][i], cudaStreamNonBlocking));
  842. }
  843. // create events
  844. for (int i = 0; i < GGML_CUDA_MAX_EVENTS; ++i) {
  845. CUDA_CHECK(cudaEventCreateWithFlags(&g_cudaEvents_memcpy_src1[id][i], cudaEventDisableTiming));
  846. }
  847. // create cublas handle
  848. CUBLAS_CHECK(cublasCreate(&g_cublas_handles[id]));
  849. CUBLAS_CHECK(cublasSetMathMode(g_cublas_handles[id], CUBLAS_TF32_TENSOR_OP_MATH));
  850. }
  851. // configure logging to stdout
  852. // CUBLAS_CHECK(cublasLoggerConfigure(1, 1, 0, nullptr));
  853. initialized = true;
  854. }
  855. }
  856. void ggml_cuda_set_tensor_split(const float * tensor_split) {
  857. bool all_zero = true;
  858. for (int i = 0; i < g_device_count; ++i) {
  859. if (tensor_split[i] != 0.0f) {
  860. all_zero = false;
  861. break;
  862. }
  863. }
  864. if (all_zero) {
  865. return;
  866. }
  867. float split_sum = 0.0f;
  868. for (int i = 0; i < g_device_count; ++i) {
  869. g_tensor_split[i] = split_sum;
  870. split_sum += tensor_split[i];
  871. }
  872. for (int i = 0; i < g_device_count; ++i) {
  873. g_tensor_split[i] /= split_sum;
  874. }
  875. }
  876. void * ggml_cuda_host_malloc(size_t size) {
  877. if (getenv("GGML_CUDA_NO_PINNED") != nullptr) {
  878. return nullptr;
  879. }
  880. void * ptr = nullptr;
  881. cudaError_t err = cudaMallocHost((void **) &ptr, size);
  882. if (err != cudaSuccess) {
  883. fprintf(stderr, "WARNING: failed to allocate %.2f MB of pinned memory: %s\n",
  884. size/1024.0/1024.0, cudaGetErrorString(err));
  885. return nullptr;
  886. }
  887. return ptr;
  888. }
  889. void ggml_cuda_host_free(void * ptr) {
  890. CUDA_CHECK(cudaFreeHost(ptr));
  891. }
  892. static cudaError_t ggml_cuda_h2d_tensor_2d(
  893. void * dst, const struct ggml_tensor * src, int64_t i3, int64_t i2, int64_t i1_low, int64_t i1_high, cudaStream_t stream) {
  894. char * dst_char = (char *) dst;
  895. const int64_t ne0 = src->ne[0];
  896. const int64_t nb0 = src->nb[0];
  897. const int64_t nb1 = src->nb[1];
  898. const int64_t nb2 = src->nb[2];
  899. const int64_t nb3 = src->nb[3];
  900. const enum ggml_type type = src->type;
  901. const int64_t ts = ggml_type_size(type);
  902. const int64_t bs = ggml_blck_size(type);
  903. int64_t i1_diff = i1_high - i1_low;
  904. const void * x = (const void *) ((const char *) src->data + i1_low*nb1 + i2*nb2 + i3*nb3);
  905. if (nb0 == ts && nb1 == ts*ne0/bs) {
  906. return cudaMemcpyAsync(dst_char, x, i1_diff*nb1, cudaMemcpyHostToDevice, stream);
  907. } else if (nb0 == ts) {
  908. return cudaMemcpy2DAsync(dst_char, ts*ne0/bs, x, nb1, ts*ne0/bs, i1_diff, cudaMemcpyHostToDevice, stream);
  909. } else {
  910. for (int64_t i1 = 0; i1 < i1_diff; i1++) {
  911. const void * rx = (const void *) ((const char *) x + i1*nb1);
  912. void * rd = (void *) (dst_char + i1*ts*ne0/bs);
  913. // pretend the row is a matrix with cols=1
  914. cudaError_t r = cudaMemcpy2DAsync(rd, ts/bs, rx, nb0, ts/bs, ne0, cudaMemcpyHostToDevice, stream);
  915. if (r != cudaSuccess) return r;
  916. }
  917. return cudaSuccess;
  918. }
  919. }
  920. inline void ggml_cuda_op_add(
  921. const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
  922. float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
  923. cudaStream_t & cudaStream_main){
  924. GGML_ASSERT(src0_ddf_i != nullptr);
  925. GGML_ASSERT(src1_ddf_i != nullptr);
  926. GGML_ASSERT(dst_ddf_i != nullptr);
  927. const int64_t ne0 = src0->ne[0];
  928. const int64_t i01_diff = i01_high - i01_low;
  929. // compute
  930. add_f32_cuda(src0_ddf_i, src1_ddf_i, dst_ddf_i, ne0*i01_diff, cudaStream_main);
  931. CUDA_CHECK(cudaGetLastError());
  932. (void) src1;
  933. (void) dst;
  934. (void) src0_ddq_i;
  935. (void) i02;
  936. (void) i1;
  937. }
  938. inline void ggml_cuda_op_mul(
  939. const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
  940. float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
  941. cudaStream_t & cudaStream_main){
  942. GGML_ASSERT(src0_ddf_i != nullptr);
  943. GGML_ASSERT(src1_ddf_i != nullptr);
  944. GGML_ASSERT(dst_ddf_i != nullptr);
  945. const int64_t ne00 = src0->ne[0];
  946. const int64_t ne10 = src1->ne[0];
  947. const int64_t ne11 = src1->ne[1];
  948. for (int64_t i01 = i01_low; i01 < i01_high; i01++) {
  949. const int64_t i11 = i1*ne11 + i01%ne11; // broadcast src1 across src0
  950. float * src0_ddf_i01 = src0_ddf_i + i01*ne00;
  951. float * src1_ddf_i01 = src1_ddf_i + i11*ne10;
  952. float * dst_ddf_i01 = dst_ddf_i + i01*ne00;
  953. // compute
  954. mul_f32_cuda(src0_ddf_i01, src1_ddf_i01, dst_ddf_i01, ne00, ne10, cudaStream_main);
  955. CUDA_CHECK(cudaGetLastError());
  956. }
  957. (void) dst;
  958. (void) src0_ddq_i;
  959. (void) i02;
  960. }
  961. inline void ggml_cuda_op_silu(
  962. const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
  963. float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
  964. cudaStream_t & cudaStream_main){
  965. GGML_ASSERT(src0_ddf_i != nullptr);
  966. GGML_ASSERT(dst_ddf_i != nullptr);
  967. const int64_t ne00 = src0->ne[0];
  968. const int64_t i01_diff = i01_high - i01_low;
  969. // compute
  970. silu_f32_cuda(src0_ddf_i, dst_ddf_i, ne00*i01_diff, cudaStream_main);
  971. CUDA_CHECK(cudaGetLastError());
  972. (void) src1;
  973. (void) dst;
  974. (void) src0_ddq_i;
  975. (void) src1_ddf_i;
  976. (void) i02;
  977. (void) i1;
  978. }
  979. inline void ggml_cuda_op_rms_norm(
  980. const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
  981. float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
  982. cudaStream_t & cudaStream_main){
  983. GGML_ASSERT(src0_ddf_i != nullptr);
  984. GGML_ASSERT(dst_ddf_i != nullptr);
  985. const int64_t ne00 = src0->ne[0];
  986. const int64_t i01_diff = i01_high - i01_low;
  987. // compute
  988. rms_norm_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, cudaStream_main);
  989. CUDA_CHECK(cudaGetLastError());
  990. (void) src1;
  991. (void) dst;
  992. (void) src0_ddq_i;
  993. (void) src1_ddf_i;
  994. (void) i02;
  995. (void) i1;
  996. }
  997. inline void ggml_cuda_op_dequantize_mul_mat_vec(
  998. const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
  999. float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
  1000. cudaStream_t & cudaStream_main){
  1001. GGML_ASSERT(src0_ddq_i != nullptr);
  1002. GGML_ASSERT(src1_ddf_i != nullptr);
  1003. GGML_ASSERT(dst_ddf_i != nullptr);
  1004. const int64_t ne00 = src0->ne[0];
  1005. const int64_t nrows = i01_high - i01_low;
  1006. switch (src0->type) {
  1007. case GGML_TYPE_Q4_0:
  1008. dequantize_mul_mat_vec_q4_0_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1009. break;
  1010. case GGML_TYPE_Q4_1:
  1011. dequantize_mul_mat_vec_q4_1_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1012. break;
  1013. case GGML_TYPE_Q5_0:
  1014. dequantize_mul_mat_vec_q5_0_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1015. break;
  1016. case GGML_TYPE_Q5_1:
  1017. dequantize_mul_mat_vec_q5_1_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1018. break;
  1019. case GGML_TYPE_Q8_0:
  1020. dequantize_mul_mat_vec_q8_0_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1021. break;
  1022. case GGML_TYPE_Q2_K:
  1023. dequantize_mul_mat_vec_q2_k_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1024. break;
  1025. case GGML_TYPE_Q3_K:
  1026. dequantize_mul_mat_vec_q3_k_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1027. break;
  1028. case GGML_TYPE_Q4_K:
  1029. dequantize_mul_mat_vec_q4_k_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1030. break;
  1031. case GGML_TYPE_Q5_K:
  1032. dequantize_mul_mat_vec_q5_k_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1033. break;
  1034. case GGML_TYPE_Q6_K:
  1035. dequantize_mul_mat_vec_q6_k_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1036. break;
  1037. case GGML_TYPE_F16:
  1038. convert_mul_mat_vec_f16_cuda(src0_ddq_i, src1_ddf_i, dst_ddf_i, ne00, nrows, cudaStream_main);
  1039. break;
  1040. default:
  1041. GGML_ASSERT(false);
  1042. break;
  1043. }
  1044. CUDA_CHECK(cudaGetLastError());
  1045. (void) src1;
  1046. (void) dst;
  1047. (void) src0_ddf_i;
  1048. (void) i02;
  1049. (void) i1;
  1050. }
  1051. inline void ggml_cuda_op_mul_mat_cublas(
  1052. const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
  1053. float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
  1054. cudaStream_t & cudaStream_main){
  1055. GGML_ASSERT(src0_ddf_i != nullptr);
  1056. GGML_ASSERT(src1_ddf_i != nullptr);
  1057. GGML_ASSERT(dst_ddf_i != nullptr);
  1058. const float alpha = 1.0f;
  1059. const float beta = 0.0f;
  1060. const int64_t ne00 = src0->ne[0];
  1061. const int64_t ne10 = src1->ne[0];
  1062. const int64_t ne11 = src1->ne[1];
  1063. const int64_t ne0 = dst->ne[0];
  1064. const int64_t i01_diff = i01_high - i01_low;
  1065. int id;
  1066. CUDA_CHECK(cudaGetDevice(&id));
  1067. // the main device has a larger memory buffer to hold the results from all GPUs
  1068. // ldc == nrows of the matrix that cuBLAS writes into
  1069. int ldc = dst->backend == GGML_BACKEND_GPU && id == g_main_device ? ne0 : i01_diff;
  1070. CUBLAS_CHECK(cublasSetStream(g_cublas_handles[id], cudaStream_main));
  1071. CUBLAS_CHECK(
  1072. cublasSgemm(g_cublas_handles[id], CUBLAS_OP_T, CUBLAS_OP_N,
  1073. i01_diff, ne11, ne10,
  1074. &alpha, src0_ddf_i, ne00,
  1075. src1_ddf_i, ne10,
  1076. &beta, dst_ddf_i, ldc));
  1077. (void) dst;
  1078. (void) src0_ddq_i;
  1079. (void) i02;
  1080. (void) i1;
  1081. }
  1082. inline void ggml_cuda_op_rope(
  1083. const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
  1084. float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
  1085. cudaStream_t & cudaStream_main){
  1086. GGML_ASSERT(src0_ddf_i != nullptr);
  1087. GGML_ASSERT(dst_ddf_i != nullptr);
  1088. const int64_t ne00 = src0->ne[0];
  1089. const int64_t i01_diff = i01_high - i01_low;
  1090. const int n_past = ((int32_t *) src1->data)[0];
  1091. const int n_dims = ((int32_t *) src1->data)[1];
  1092. const int mode = ((int32_t *) src1->data)[2];
  1093. GGML_ASSERT(mode == 0);
  1094. const float theta_scale = powf(10000.0, -2.0f/n_dims);
  1095. const float p = ((mode & 1) == 0 ? n_past + i02 : i02);
  1096. // compute
  1097. rope_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, p, theta_scale, cudaStream_main);
  1098. CUDA_CHECK(cudaGetLastError());
  1099. (void) dst;
  1100. (void) src0_ddq_i;
  1101. (void) src1_ddf_i;
  1102. (void) i1;
  1103. }
  1104. static void ggml_cuda_op(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
  1105. ggml_cuda_op_t op, bool src0_needs_f32) {
  1106. const int64_t ne00 = src0->ne[0];
  1107. const int64_t ne01 = src0->ne[1];
  1108. const int64_t ne02 = src0->ne[2];
  1109. const int64_t ne03 = src0->ne[3];
  1110. const int64_t nrows0 = ggml_nrows(src0);
  1111. const bool use_src1 = src1 != nullptr;
  1112. const int64_t ne10 = use_src1 ? src1->ne[0] : 1;
  1113. const int64_t ne11 = use_src1 ? src1->ne[1] : 1;
  1114. const int64_t ne12 = use_src1 ? src1->ne[2] : 1;
  1115. const int64_t ne13 = use_src1 ? src1->ne[3] : 1;
  1116. const int64_t ne0 = dst->ne[0];
  1117. const int64_t ne1 = dst->ne[1];
  1118. const int nb2 = dst->nb[2];
  1119. const int nb3 = dst->nb[3];
  1120. GGML_ASSERT(dst->backend != GGML_BACKEND_GPU_SPLIT);
  1121. GGML_ASSERT(!use_src1 || src1->backend != GGML_BACKEND_GPU_SPLIT);
  1122. // strides for iteration over dims 3 and 2
  1123. const int64_t src0_stride = ne00 * ne01;
  1124. const int64_t src1_stride = ne10 * ne11;
  1125. const int64_t dst_stride = ne0 * ne1;
  1126. const int64_t num_iters = ne02 * ne03;
  1127. const size_t src0_ts = ggml_type_size(src0->type);
  1128. const size_t src0_bs = ggml_blck_size(src0->type);
  1129. struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
  1130. struct ggml_tensor_extra_gpu * src1_extra = use_src1 ? (ggml_tensor_extra_gpu *) src1->extra : nullptr;
  1131. struct ggml_tensor_extra_gpu * dst_extra = (ggml_tensor_extra_gpu *) dst->extra;
  1132. const bool src0_on_device = src0->backend == GGML_BACKEND_GPU || src0->backend == GGML_BACKEND_GPU_SPLIT;
  1133. const bool src0_is_f32 = src0->type == GGML_TYPE_F32;
  1134. const bool split = src0->backend == GGML_BACKEND_GPU_SPLIT;
  1135. const to_fp32_cuda_t to_fp32_cuda = ggml_get_to_fp32_cuda(src0->type);
  1136. // dd = data device
  1137. char * src0_ddq[GGML_CUDA_MAX_DEVICES] = {nullptr}; // quantized
  1138. float * src0_ddf[GGML_CUDA_MAX_DEVICES] = {nullptr}; // float
  1139. float * src1_ddf[GGML_CUDA_MAX_DEVICES] = {nullptr};
  1140. float * dst_ddf[GGML_CUDA_MAX_DEVICES] = {nullptr};
  1141. // asq = actual size quantized, asf = actual size float
  1142. size_t src0_asq[GGML_CUDA_MAX_DEVICES] = {0};
  1143. size_t src0_asf[GGML_CUDA_MAX_DEVICES] = {0};
  1144. size_t src1_asf[GGML_CUDA_MAX_DEVICES] = {0};
  1145. size_t dst_asf[GGML_CUDA_MAX_DEVICES] = {0};
  1146. for (int id = 0; id < g_device_count; ++id) {
  1147. if (!split && id != g_main_device) {
  1148. continue;
  1149. }
  1150. const bool src1_on_device = use_src1 && src1->backend == GGML_BACKEND_GPU && id == g_main_device;
  1151. const bool dst_on_device = dst->backend == GGML_BACKEND_GPU && id == g_main_device;
  1152. int64_t row_low, row_high;
  1153. if (split) {
  1154. row_low = id == 0 ? 0 : nrows0*g_tensor_split[id];
  1155. row_low -= row_low % GGML_CUDA_DMMV_Y;
  1156. row_high = id == g_device_count - 1 ? nrows0 : nrows0*g_tensor_split[id + 1];
  1157. row_high -= row_high % GGML_CUDA_DMMV_Y;
  1158. } else {
  1159. row_low = 0;
  1160. row_high = nrows0;
  1161. }
  1162. if (row_low == row_high) {
  1163. continue;
  1164. }
  1165. int64_t row_diff = row_high - row_low;
  1166. cudaSetDevice(id);
  1167. if (src0_on_device) {
  1168. if (src0_is_f32) {
  1169. src0_ddf[id] = (float *) src0_extra->data_device[id];
  1170. } else {
  1171. src0_ddq[id] = (char *) src0_extra->data_device[id];
  1172. }
  1173. } else {
  1174. if (src0_is_f32) {
  1175. src0_ddf[id] = (float *) ggml_cuda_pool_malloc(row_diff*ne00 * sizeof(float), &src0_asf[id]);
  1176. } else {
  1177. src0_ddq[id] = (char *) ggml_cuda_pool_malloc(row_diff*ne00 * src0_ts/src0_bs, &src0_asq[id]);
  1178. }
  1179. }
  1180. if (src0_needs_f32 && !src0_is_f32) {
  1181. src0_ddf[id] = (float *) ggml_cuda_pool_malloc(row_diff*ne00 * sizeof(float), &src0_asf[id]);
  1182. }
  1183. if (use_src1) {
  1184. if (src1_on_device) {
  1185. src1_ddf[id] = (float *) src1_extra->data_device[id];
  1186. } else {
  1187. src1_ddf[id] = (float *) ggml_cuda_pool_malloc(num_iters*src1_stride * sizeof(float), &src1_asf[id]);
  1188. }
  1189. }
  1190. if (dst_on_device) {
  1191. dst_ddf[id] = (float *) dst_extra->data_device[id];
  1192. } else {
  1193. size_t size_dst_ddf = split ? row_diff*ne1 * sizeof(float) : num_iters*dst_stride * sizeof(float);
  1194. dst_ddf[id] = (float *) ggml_cuda_pool_malloc(size_dst_ddf, &dst_asf[id]);
  1195. }
  1196. for (int64_t i03 = 0; i03 < ne03; i03++) {
  1197. const int64_t i13 = i03 % ne13;
  1198. for (int64_t i02 = 0; i02 < ne02; i02++) {
  1199. const int64_t i12 = i02 % ne12;
  1200. const int64_t i0 = i03*ne02 + i02;
  1201. const int64_t i0_offset_low = row_low/ne01;
  1202. const int64_t i0_offset_high = row_high/ne01;
  1203. int64_t i01_low = 0;
  1204. int64_t i01_high = ne01;
  1205. if (split) {
  1206. if (i0 < i0_offset_low || i0 > i0_offset_high) {
  1207. continue;
  1208. }
  1209. if (i0 == i0_offset_low) {
  1210. i01_low = row_low % ne01;
  1211. }
  1212. if (i0 == i0_offset_high) {
  1213. i01_high = row_high % ne01;
  1214. }
  1215. }
  1216. const int64_t i01_diff = i01_high - i01_low;
  1217. if (i01_diff == 0) {
  1218. continue;
  1219. }
  1220. const int64_t i11 = i13*ne12 + i12;
  1221. cudaStream_t cudaStream_main = g_cudaStreams_main[id][i0 % GGML_CUDA_MAX_STREAMS];
  1222. cudaStream_t cudaStream_memcpy_src1 = g_cudaStreams_memcpy_src1[id][i0 % GGML_CUDA_MAX_STREAMS];
  1223. cudaEvent_t cudaEvent_memcpy_src1 = g_cudaEvents_memcpy_src1[id][i0 % GGML_CUDA_MAX_EVENTS];
  1224. // for split tensors the data begins at i0 == i0_offset_low
  1225. char * src0_ddq_i = src0_ddq[id] + (i0 - i0_offset_low)*src0_stride*src0_ts/src0_bs;
  1226. float * src0_ddf_i = src0_ddf[id] + (i0 - i0_offset_low)*src0_stride;
  1227. float * src1_ddf_i = src1_ddf[id] + i11*src1_stride;
  1228. float * dst_ddf_i = dst_ddf[id] + (i0 - i0_offset_low)*dst_stride;
  1229. // for split tensors the data pointer needs to be rounded down
  1230. // to the bin edge for i03, i02 bins beyond the first
  1231. if (i0 - i0_offset_low > 0) {
  1232. src0_ddq_i -= (row_low % ne01)*ne00 * src0_ts/src0_bs;
  1233. src0_ddf_i -= (row_low % ne01)*ne00;
  1234. }
  1235. if (i0 - i0_offset_low > 0) {
  1236. dst_ddf_i -= (row_low % ne0)*ne1;
  1237. }
  1238. // the main device memory buffer can be on VRAM scratch, with space for all partial results
  1239. // in that case an offset on dst_ddf_i is needed
  1240. if (dst->backend == GGML_BACKEND_GPU && id == g_main_device) {
  1241. dst_ddf_i += i01_low; // offset is 0 if no tensor split
  1242. }
  1243. // copy src0, src1 to device if necessary
  1244. if (use_src1) {
  1245. if (src1->backend == GGML_BACKEND_CPU) {
  1246. CUDA_CHECK(ggml_cuda_h2d_tensor_2d(src1_ddf_i, src1, i03, i02, 0, ne11, cudaStream_memcpy_src1));
  1247. } else if (src1->backend == GGML_BACKEND_GPU) {
  1248. if (id != g_main_device) {
  1249. float * src1_ddf_i_source = (float *) src1_extra->data_device[g_main_device];
  1250. src1_ddf_i_source += i11*src1_stride;
  1251. CUDA_CHECK(cudaMemcpyAsync(src1_ddf_i, src1_ddf_i_source, src1_stride*sizeof(float),
  1252. cudaMemcpyDeviceToDevice, cudaStream_memcpy_src1));
  1253. }
  1254. } else {
  1255. GGML_ASSERT(false);
  1256. }
  1257. }
  1258. CUDA_CHECK(cudaEventRecord(cudaEvent_memcpy_src1, cudaStream_memcpy_src1));
  1259. if (!src0_on_device) {
  1260. if (src0_is_f32) {
  1261. CUDA_CHECK(ggml_cuda_h2d_tensor_2d(src0_ddf_i, src0, i03, i02, i01_low, i01_high, cudaStream_main));
  1262. } else {
  1263. CUDA_CHECK(ggml_cuda_h2d_tensor_2d(src0_ddq_i, src0, i03, i02, i01_low, i01_high, cudaStream_main));
  1264. }
  1265. }
  1266. // convert src0 to f32 if it's necessary for the ggml_cuda_op
  1267. if (src0_needs_f32 && !src0_is_f32) {
  1268. to_fp32_cuda(src0_ddq_i, src0_ddf_i, i01_diff*ne00, cudaStream_main);
  1269. CUDA_CHECK(cudaGetLastError());
  1270. }
  1271. // wait with main stream until src1 memcpy is done
  1272. CUDA_CHECK(cudaStreamWaitEvent(cudaStream_main, cudaEvent_memcpy_src1, 0));
  1273. // do the computation
  1274. op(src0, src1, dst, src0_ddq_i, src0_ddf_i, src1_ddf_i, dst_ddf_i, i02, i01_low, i01_high, i11, cudaStream_main);
  1275. // copy dst to host or other device if necessary
  1276. if (!dst_on_device) {
  1277. void * dst_off_device;
  1278. cudaMemcpyKind kind;
  1279. if (dst->backend == GGML_BACKEND_CPU) {
  1280. dst_off_device = dst->data;
  1281. kind = cudaMemcpyDeviceToHost;
  1282. } else if (dst->backend == GGML_BACKEND_GPU) {
  1283. dst_off_device = dst_extra->data_device[g_main_device];
  1284. kind = cudaMemcpyDeviceToDevice;
  1285. } else {
  1286. GGML_ASSERT(false);
  1287. }
  1288. if (split) {
  1289. // src0 = weight matrix is saved as a transposed matrix for better memory layout.
  1290. // dst is NOT transposed.
  1291. // The outputs of cuBLAS matrix matrix multiplications can therefore NOT simply be concatenated for >1 GPU.
  1292. // Instead they need to be copied to the correct slice in ne0 = dst row index.
  1293. // If dst is a vector with ne0 == 1 then you don't have to do this but it still produces correct results.
  1294. for (int64_t j = 0; j < ne1; ++j) {
  1295. float * dhf_dst_i = (float *) ((char *) dst_off_device + (j*ne0 + i01_low)*sizeof(float) + i02*nb2 + i03*nb3);
  1296. CUDA_CHECK(cudaMemcpyAsync(dhf_dst_i, dst_ddf_i + j*i01_diff, i01_diff*sizeof(float), kind, cudaStream_main));
  1297. }
  1298. } else {
  1299. float * dhf_dst_i = (float *) ((char *) dst_off_device + i02*nb2 + i03*nb3);
  1300. CUDA_CHECK(cudaMemcpyAsync(dhf_dst_i, dst_ddf_i, dst_stride*sizeof(float), kind, cudaStream_main));
  1301. }
  1302. }
  1303. }
  1304. }
  1305. }
  1306. // wait until each device is finished, then free their buffers
  1307. for (int id = 0; id < g_device_count; ++id) {
  1308. CUDA_CHECK(cudaSetDevice(id));
  1309. CUDA_CHECK(cudaDeviceSynchronize());
  1310. if (src0_asq[id] > 0) {
  1311. ggml_cuda_pool_free(src0_ddq[id], src0_asq[id]);
  1312. }
  1313. if (src0_asf[id] > 0) {
  1314. ggml_cuda_pool_free(src0_ddf[id], src0_asf[id]);
  1315. }
  1316. if (src1_asf[id] > 0) {
  1317. ggml_cuda_pool_free(src1_ddf[id], src1_asf[id]);
  1318. }
  1319. if (dst_asf[id] > 0) {
  1320. ggml_cuda_pool_free(dst_ddf[id], dst_asf[id]);
  1321. }
  1322. }
  1323. }
  1324. void ggml_cuda_add(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  1325. GGML_ASSERT(src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
  1326. ggml_cuda_op(src0, src1, dst, ggml_cuda_op_add, true);
  1327. }
  1328. void ggml_cuda_mul(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  1329. GGML_ASSERT(src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
  1330. ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul, true);
  1331. }
  1332. void ggml_cuda_silu(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  1333. GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
  1334. ggml_cuda_op(src0, src1, dst, ggml_cuda_op_silu, true);
  1335. }
  1336. void ggml_cuda_rms_norm(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  1337. GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
  1338. ggml_cuda_op(src0, src1, dst, ggml_cuda_op_rms_norm, true);
  1339. }
  1340. bool ggml_cuda_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst) {
  1341. GGML_ASSERT(src0->backend != GGML_BACKEND_GPU);
  1342. const int64_t ne10 = src1->ne[0];
  1343. const int64_t ne0 = dst->ne[0];
  1344. const int64_t ne1 = dst->ne[1];
  1345. // if (strcmp(dst->name, "KQ") == 0 || strcmp(dst->name, "KQV") == 0) {
  1346. // fprintf(stderr, "(%ld, %ld, %ld, %ld) + (%ld, %ld, %ld, %ld) -> (%ld, %ld, %ld, %ld)\n",
  1347. // src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3],
  1348. // src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3],
  1349. // dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3]);
  1350. // return false;
  1351. // }
  1352. // TODO: find the optimal values for these
  1353. if ((src0->type == GGML_TYPE_F32 || src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) &&
  1354. src1->type == GGML_TYPE_F32 &&
  1355. dst->type == GGML_TYPE_F32 &&
  1356. (ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) {
  1357. return true;
  1358. }
  1359. return false;
  1360. }
  1361. void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  1362. if (src0->type == GGML_TYPE_F32) {
  1363. ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, true);
  1364. } else if (ggml_is_quantized(src0->type) || src0->type == GGML_TYPE_F16) {
  1365. if (src1->ne[1] == 1) {
  1366. ggml_cuda_op(src0, src1, dst, ggml_cuda_op_dequantize_mul_mat_vec, false);
  1367. } else {
  1368. ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, true);
  1369. }
  1370. } else {
  1371. GGML_ASSERT(false);
  1372. }
  1373. }
  1374. void ggml_cuda_rope(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  1375. GGML_ASSERT(src0->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
  1376. ggml_cuda_op(src0, src1, dst, ggml_cuda_op_rope, true);
  1377. }
  1378. void ggml_cuda_nop(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
  1379. (void) src0;
  1380. (void) src1;
  1381. (void) dst;
  1382. }
  1383. void ggml_cuda_load_data(const char * fname, struct ggml_tensor * tensor, const size_t offset) {
  1384. FILE * fp = fopen(fname, "rb");
  1385. int nrows = ggml_nrows(tensor);
  1386. const size_t nb1 = tensor->nb[1];
  1387. ggml_backend backend = tensor->backend;
  1388. struct ggml_tensor_extra_gpu * extra = new struct ggml_tensor_extra_gpu;
  1389. for (int id = 0; id < g_device_count; ++id) {
  1390. extra->data_device[id] = nullptr;
  1391. if (backend == GGML_BACKEND_GPU && id != g_main_device) {
  1392. continue;
  1393. }
  1394. cudaSetDevice(id);
  1395. int row_low, row_high;
  1396. if (backend == GGML_BACKEND_GPU) {
  1397. row_low = 0;
  1398. row_high = nrows;
  1399. } else if (backend == GGML_BACKEND_GPU_SPLIT) {
  1400. row_low = id == 0 ? 0 : nrows*g_tensor_split[id];
  1401. row_low -= row_low % GGML_CUDA_DMMV_Y;
  1402. row_high = id == g_device_count - 1 ? nrows : nrows*g_tensor_split[id + 1];
  1403. row_high -= row_high % GGML_CUDA_DMMV_Y;
  1404. } else {
  1405. GGML_ASSERT(false);
  1406. }
  1407. if (row_low == row_high) {
  1408. continue;
  1409. }
  1410. int64_t nrows_split = row_high - row_low;
  1411. const size_t offset_split = offset + row_low*nb1;
  1412. const size_t size = ggml_nbytes_split(tensor, nrows_split);
  1413. void * buf;
  1414. CUDA_CHECK(cudaMalloc(&buf, size));
  1415. void * buf_host = malloc(size);
  1416. #ifdef _WIN32
  1417. int ret = _fseeki64(fp, (__int64) offset_split, SEEK_SET);
  1418. #else
  1419. int ret = fseek(fp, (long) offset_split, SEEK_SET);
  1420. #endif
  1421. GGML_ASSERT(ret == 0); // same
  1422. size_t ret2 = fread(buf_host, size, 1, fp);
  1423. if (ret2 != 1) {
  1424. fprintf(stderr, "unexpectedly reached end of file");
  1425. exit(1);
  1426. }
  1427. cudaMemcpy(buf, buf_host, size, cudaMemcpyHostToDevice);
  1428. cudaDeviceSynchronize();
  1429. free(buf_host);
  1430. extra->data_device[id] = buf;
  1431. }
  1432. tensor->extra = extra;
  1433. fclose(fp);
  1434. }
  1435. void ggml_cuda_free_data(struct ggml_tensor * tensor) {
  1436. if (tensor->backend != GGML_BACKEND_GPU && tensor->backend != GGML_BACKEND_GPU_SPLIT) {
  1437. return;
  1438. }
  1439. ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
  1440. for (int id = 0; id < g_device_count; ++id) {
  1441. if (extra->data_device[id] == nullptr) {
  1442. continue;
  1443. }
  1444. CUDA_CHECK(cudaSetDevice(id));
  1445. CUDA_CHECK(cudaFree(extra->data_device[id]));
  1446. }
  1447. delete extra;
  1448. }
  1449. void ggml_cuda_assign_buffers(struct ggml_tensor * tensor) {
  1450. if (tensor->src0 != nullptr && tensor->src0->op == GGML_OP_RESHAPE) {
  1451. ggml_cuda_assign_buffers(tensor);
  1452. }
  1453. const size_t size = ggml_nbytes(tensor);
  1454. GGML_ASSERT(size <= g_scratch_size);
  1455. if (g_scratch_offset + size > g_scratch_size) {
  1456. g_scratch_offset = 0;
  1457. }
  1458. tensor->backend = GGML_BACKEND_GPU;
  1459. struct ggml_tensor_extra_gpu * extra = new ggml_tensor_extra_gpu;
  1460. bool inplace = tensor->src0 != nullptr && tensor->src0->data == tensor->data;
  1461. CUDA_CHECK(cudaSetDevice(g_main_device));
  1462. if (inplace && tensor->src0->backend == GGML_BACKEND_GPU) {
  1463. struct ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu * ) tensor->src0->extra;
  1464. extra->data_device[g_main_device] = src0_extra->data_device;
  1465. GGML_ASSERT(false);
  1466. } else {
  1467. char * data = (char *) g_scratch_buffer;
  1468. if (data == nullptr) {
  1469. CUDA_CHECK(cudaMalloc(&data, g_scratch_size));
  1470. g_scratch_buffer = data;
  1471. }
  1472. extra->data_device[g_main_device] = data + g_scratch_offset;
  1473. }
  1474. // fprintf(stderr, "data=%p offset=%ld data_device=%p\n", data, g_scratch_offset, extra->data_device[0]);
  1475. g_scratch_offset += size;
  1476. // fprintf(stderr, "%s: scratch %d, %p - %p\n",
  1477. // tensor->name, g_scratch_index, data + g_scratch_offset, data + g_scratch_offset + size);
  1478. GGML_ASSERT(g_scratch_offset <= g_scratch_size);
  1479. tensor->extra = extra;
  1480. }
  1481. void ggml_cuda_set_main_device(int main_device) {
  1482. if (main_device > g_device_count) {
  1483. fprintf(stderr, "warning: cannot set main_device=%d because there are only %d devices. Using device %d instead.\n",
  1484. main_device, g_device_count, g_main_device);
  1485. return;
  1486. }
  1487. g_main_device = main_device;
  1488. if (g_device_count > 1) {
  1489. cudaDeviceProp prop;
  1490. CUDA_CHECK(cudaGetDeviceProperties(&prop, g_main_device));
  1491. fprintf(stderr, "%s: using device %d (%s) as main device\n", __func__, g_main_device, prop.name);
  1492. }
  1493. }
  1494. void ggml_cuda_set_scratch_size(size_t scratch_size) {
  1495. g_scratch_size = scratch_size;
  1496. }
  1497. bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor){
  1498. ggml_cuda_func_t func;
  1499. const bool any_on_device = tensor->backend == GGML_BACKEND_GPU
  1500. || tensor->src0->backend == GGML_BACKEND_GPU || tensor->src0->backend == GGML_BACKEND_GPU_SPLIT
  1501. || (tensor->src1 != nullptr && tensor->src1->backend == GGML_BACKEND_GPU);
  1502. switch (tensor->op) {
  1503. case GGML_OP_ADD:
  1504. if (!any_on_device) {
  1505. return false;
  1506. }
  1507. func = ggml_cuda_add;
  1508. break;
  1509. case GGML_OP_MUL:
  1510. if (!any_on_device) {
  1511. return false;
  1512. }
  1513. func = ggml_cuda_mul;
  1514. break;
  1515. case GGML_OP_SILU:
  1516. if (!any_on_device) {
  1517. return false;
  1518. }
  1519. func = ggml_cuda_silu;
  1520. break;
  1521. case GGML_OP_RMS_NORM:
  1522. if (!any_on_device) {
  1523. return false;
  1524. }
  1525. func = ggml_cuda_rms_norm;
  1526. break;
  1527. case GGML_OP_MUL_MAT:
  1528. if (!any_on_device && !ggml_cuda_can_mul_mat(tensor->src0, tensor->src1, tensor)) {
  1529. return false;
  1530. }
  1531. func = ggml_cuda_mul_mat;
  1532. break;
  1533. case GGML_OP_RESHAPE:
  1534. if (!any_on_device) {
  1535. return false;
  1536. }
  1537. func = ggml_cuda_nop;
  1538. break;
  1539. case GGML_OP_ROPE:
  1540. if (!any_on_device) {
  1541. return false;
  1542. }
  1543. func = ggml_cuda_rope;
  1544. break;
  1545. default:
  1546. return false;
  1547. }
  1548. if (params->ith != 0) {
  1549. return true;
  1550. }
  1551. if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
  1552. return true;
  1553. }
  1554. func(tensor->src0, tensor->src1, tensor);
  1555. return true;
  1556. }