ggml-metal.metal 74 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049
  1. #include <metal_stdlib>
  2. using namespace metal;
  3. #define MAX(x, y) ((x) > (y) ? (x) : (y))
  4. #define QK4_0 32
  5. #define QR4_0 2
  6. typedef struct {
  7. half d; // delta
  8. uint8_t qs[QK4_0 / 2]; // nibbles / quants
  9. } block_q4_0;
  10. #define QK4_1 32
  11. typedef struct {
  12. half d; // delta
  13. half m; // min
  14. uint8_t qs[QK4_1 / 2]; // nibbles / quants
  15. } block_q4_1;
  16. #define QK8_0 32
  17. typedef struct {
  18. half d; // delta
  19. int8_t qs[QK8_0]; // quants
  20. } block_q8_0;
  21. kernel void kernel_add(
  22. device const float * src0,
  23. device const float * src1,
  24. device float * dst,
  25. uint tpig[[thread_position_in_grid]]) {
  26. dst[tpig] = src0[tpig] + src1[tpig];
  27. }
  28. // assumption: src1 is a row
  29. // broadcast src1 into src0
  30. kernel void kernel_add_row(
  31. device const float * src0,
  32. device const float * src1,
  33. device float * dst,
  34. constant int64_t & ne00,
  35. uint tpig[[thread_position_in_grid]]) {
  36. dst[tpig] = src0[tpig] + src1[tpig % ne00];
  37. }
  38. kernel void kernel_mul(
  39. device const float * src0,
  40. device const float * src1,
  41. device float * dst,
  42. uint tpig[[thread_position_in_grid]]) {
  43. dst[tpig] = src0[tpig] * src1[tpig];
  44. }
  45. // assumption: src1 is a row
  46. // broadcast src1 into src0
  47. kernel void kernel_mul_row(
  48. device const float * src0,
  49. device const float * src1,
  50. device float * dst,
  51. constant int64_t & ne00,
  52. uint tpig[[thread_position_in_grid]]) {
  53. dst[tpig] = src0[tpig] * src1[tpig % ne00];
  54. }
  55. kernel void kernel_scale(
  56. device const float * src0,
  57. device float * dst,
  58. constant float & scale,
  59. uint tpig[[thread_position_in_grid]]) {
  60. dst[tpig] = src0[tpig] * scale;
  61. }
  62. kernel void kernel_silu(
  63. device const float * src0,
  64. device float * dst,
  65. uint tpig[[thread_position_in_grid]]) {
  66. float x = src0[tpig];
  67. dst[tpig] = x / (1.0f + exp(-x));
  68. }
  69. kernel void kernel_relu(
  70. device const float * src0,
  71. device float * dst,
  72. uint tpig[[thread_position_in_grid]]) {
  73. dst[tpig] = max(0.0f, src0[tpig]);
  74. }
  75. constant float GELU_COEF_A = 0.044715f;
  76. constant float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
  77. kernel void kernel_gelu(
  78. device const float * src0,
  79. device float * dst,
  80. uint tpig[[thread_position_in_grid]]) {
  81. float x = src0[tpig];
  82. // BEWARE !!!
  83. // Simply using "tanh" instead of "precise::tanh" will sometimes results in NaNs!
  84. // This was observed with Falcon 7B and 40B models
  85. //
  86. dst[tpig] = 0.5f*x*(1.0f + precise::tanh(SQRT_2_OVER_PI*x*(1.0f + GELU_COEF_A*x*x)));
  87. }
  88. kernel void kernel_soft_max(
  89. device const float * src0,
  90. device float * dst,
  91. constant int64_t & ne00,
  92. constant int64_t & ne01,
  93. constant int64_t & ne02,
  94. threadgroup float * buf [[threadgroup(0)]],
  95. uint3 tgpig[[threadgroup_position_in_grid]],
  96. uint3 tpitg[[thread_position_in_threadgroup]],
  97. uint3 ntg[[threads_per_threadgroup]]) {
  98. const int64_t i03 = tgpig[2];
  99. const int64_t i02 = tgpig[1];
  100. const int64_t i01 = tgpig[0];
  101. device const float * psrc0 = src0 + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
  102. device float * pdst = dst + i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
  103. // parallel max
  104. buf[tpitg[0]] = -INFINITY;
  105. for (int i00 = tpitg[0]; i00 < ne00; i00 += ntg[0]) {
  106. buf[tpitg[0]] = MAX(buf[tpitg[0]], psrc0[i00]);
  107. }
  108. // reduce
  109. threadgroup_barrier(mem_flags::mem_threadgroup);
  110. for (uint i = ntg[0]/2; i > 0; i /= 2) {
  111. if (tpitg[0] < i) {
  112. buf[tpitg[0]] = MAX(buf[tpitg[0]], buf[tpitg[0] + i]);
  113. }
  114. threadgroup_barrier(mem_flags::mem_threadgroup);
  115. }
  116. // broadcast
  117. if (tpitg[0] == 0) {
  118. buf[0] = buf[0];
  119. }
  120. threadgroup_barrier(mem_flags::mem_threadgroup);
  121. const float max = buf[0];
  122. // parallel sum
  123. buf[tpitg[0]] = 0.0f;
  124. for (int i00 = tpitg[0]; i00 < ne00; i00 += ntg[0]) {
  125. buf[tpitg[0]] += exp(psrc0[i00] - max);
  126. }
  127. // reduce
  128. threadgroup_barrier(mem_flags::mem_threadgroup);
  129. for (uint i = ntg[0]/2; i > 0; i /= 2) {
  130. if (tpitg[0] < i) {
  131. buf[tpitg[0]] += buf[tpitg[0] + i];
  132. }
  133. threadgroup_barrier(mem_flags::mem_threadgroup);
  134. }
  135. // broadcast
  136. if (tpitg[0] == 0) {
  137. buf[0] = buf[0];
  138. }
  139. threadgroup_barrier(mem_flags::mem_threadgroup);
  140. const float sum = buf[0];
  141. for (int i00 = tpitg[0]; i00 < ne00; i00 += ntg[0]) {
  142. pdst[i00] = exp(psrc0[i00] - max) / sum;
  143. }
  144. }
  145. kernel void kernel_diag_mask_inf(
  146. device const float * src0,
  147. device float * dst,
  148. constant int64_t & ne00,
  149. constant int64_t & ne01,
  150. constant int & n_past,
  151. uint3 tpig[[thread_position_in_grid]]) {
  152. const int64_t i02 = tpig[2];
  153. const int64_t i01 = tpig[1];
  154. const int64_t i00 = tpig[0];
  155. if (i00 > n_past + i01) {
  156. dst[i02*ne01*ne00 + i01*ne00 + i00] = -INFINITY;
  157. } else {
  158. dst[i02*ne01*ne00 + i01*ne00 + i00] = src0[i02*ne01*ne00 + i01*ne00 + i00];
  159. }
  160. }
  161. kernel void kernel_norm(
  162. device const void * src0,
  163. device float * dst,
  164. constant int64_t & ne00,
  165. constant uint64_t & nb01,
  166. constant float & eps,
  167. threadgroup float * sum [[threadgroup(0)]],
  168. uint tgpig[[threadgroup_position_in_grid]],
  169. uint tpitg[[thread_position_in_threadgroup]],
  170. uint ntg[[threads_per_threadgroup]]) {
  171. device const float * x = (device const float *) ((device const char *) src0 + tgpig*nb01);
  172. // MEAN
  173. // parallel sum
  174. sum[tpitg] = 0.0f;
  175. for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
  176. sum[tpitg] += x[i00];
  177. }
  178. // reduce
  179. threadgroup_barrier(mem_flags::mem_threadgroup);
  180. for (uint i = ntg/2; i > 0; i /= 2) {
  181. if (tpitg < i) {
  182. sum[tpitg] += sum[tpitg + i];
  183. }
  184. threadgroup_barrier(mem_flags::mem_threadgroup);
  185. }
  186. // broadcast
  187. if (tpitg == 0) {
  188. sum[0] /= ne00;
  189. }
  190. threadgroup_barrier(mem_flags::mem_threadgroup);
  191. const float mean = sum[0];
  192. // recenter
  193. device float * y = dst + tgpig*ne00;
  194. for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
  195. y[i00] = x[i00] - mean;
  196. }
  197. // VARIANCE
  198. // parallel sum
  199. sum[tpitg] = 0.0f;
  200. for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
  201. sum[tpitg] += y[i00] * y[i00];
  202. }
  203. // reduce
  204. threadgroup_barrier(mem_flags::mem_threadgroup);
  205. for (uint i = ntg/2; i > 0; i /= 2) {
  206. if (tpitg < i) {
  207. sum[tpitg] += sum[tpitg + i];
  208. }
  209. threadgroup_barrier(mem_flags::mem_threadgroup);
  210. }
  211. // broadcast
  212. if (tpitg == 0) {
  213. sum[0] /= ne00;
  214. }
  215. threadgroup_barrier(mem_flags::mem_threadgroup);
  216. const float variance = sum[0];
  217. const float scale = 1.0f/sqrt(variance + eps);
  218. for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
  219. y[i00] = y[i00] * scale;
  220. }
  221. }
  222. kernel void kernel_rms_norm(
  223. device const void * src0,
  224. device float * dst,
  225. constant int64_t & ne00,
  226. constant uint64_t & nb01,
  227. constant float & eps,
  228. threadgroup float * sum [[threadgroup(0)]],
  229. uint tgpig[[threadgroup_position_in_grid]],
  230. uint tpitg[[thread_position_in_threadgroup]],
  231. uint sgitg[[simdgroup_index_in_threadgroup]],
  232. uint tiisg[[thread_index_in_simdgroup]],
  233. uint ntg[[threads_per_threadgroup]]) {
  234. device const float4 * x = (device const float4 *) ((device const char *) src0 + tgpig*nb01);
  235. device const float * x_scalar = (device const float *) x;
  236. float4 sumf=0;
  237. float all_sum=0;
  238. // parallel sum
  239. for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) {
  240. sumf += x[i00] * x[i00];
  241. }
  242. all_sum = sumf[0] + sumf[1] + sumf[2] + sumf[3];
  243. all_sum = simd_sum(all_sum);
  244. if (tiisg == 0) {
  245. sum[sgitg] = all_sum;
  246. }
  247. threadgroup_barrier(mem_flags::mem_threadgroup);
  248. // broadcast, simd group number is ntg / 32
  249. for (uint i = ntg / 32 / 2; i > 0; i /= 2) {
  250. if (tpitg < i) {
  251. sum[tpitg] += sum[tpitg + i];
  252. }
  253. }
  254. if (tpitg == 0) {
  255. for (int i = 4 * (ne00 / 4); i < ne00; i++) {sum[0] += x_scalar[i];}
  256. sum[0] /= ne00;
  257. }
  258. threadgroup_barrier(mem_flags::mem_threadgroup);
  259. const float mean = sum[0];
  260. const float scale = 1.0f/sqrt(mean + eps);
  261. device float4 * y = (device float4 *) (dst + tgpig*ne00);
  262. device float * y_scalar = (device float *) y;
  263. for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) {
  264. y[i00] = x[i00] * scale;
  265. }
  266. if (tpitg == 0) {
  267. for (int i00 = 4 * (ne00 / 4); i00 < ne00; i00++) {y_scalar[i00] = x_scalar[i00] * scale;}
  268. }
  269. }
  270. // function for calculate inner product between half a q4_0 block and 16 floats (yl), sumy is SUM(yl[i])
  271. // il indicates where the q4 quants begin (0 or QK4_0/4)
  272. // we assume that the yl's have been multiplied with the appropriate scale factor
  273. // that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096)
  274. inline float block_q_n_dot_y(device const block_q4_0 * qb_curr, float sumy, thread float * yl, int il) {
  275. float d = qb_curr->d;
  276. float2 acc = 0.f;
  277. device const uint16_t * qs = ((device const uint16_t *)qb_curr + 1 + il/2);
  278. for (int i = 0; i < 8; i+=2) {
  279. acc[0] += yl[i + 0] * (qs[i / 2] & 0x000F)
  280. + yl[i + 1] * (qs[i / 2] & 0x0F00);
  281. acc[1] += yl[i + 8] * (qs[i / 2] & 0x00F0)
  282. + yl[i + 9] * (qs[i / 2] & 0xF000);
  283. }
  284. return d * (sumy * -8.f + acc[0] + acc[1]);
  285. }
  286. // function for calculate inner product between half a q4_1 block and 16 floats (yl), sumy is SUM(yl[i])
  287. // il indicates where the q4 quants begin (0 or QK4_0/4)
  288. // we assume that the yl's have been multiplied with the appropriate scale factor
  289. // that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096)
  290. inline float block_q_n_dot_y(device const block_q4_1 * qb_curr, float sumy, thread float * yl, int il) {
  291. float d = qb_curr->d;
  292. float m = qb_curr->m;
  293. device const uint16_t * qs = ((device const uint16_t *)qb_curr + 2 + il/2);
  294. float2 acc = 0.f;
  295. for (int i = 0; i < 8; i+=2) {
  296. acc[0] += yl[i + 0] * (qs[i / 2] & 0x000F)
  297. + yl[i + 1] * (qs[i / 2] & 0x0F00);
  298. acc[1] += yl[i + 8] * (qs[i / 2] & 0x00F0)
  299. + yl[i + 9] * (qs[i / 2] & 0xF000);
  300. }
  301. return d * (acc[0] + acc[1]) + sumy * m;
  302. }
  303. // putting them in the kernel cause a significant performance penalty
  304. #define N_DST 4 // each SIMD group works on 4 rows
  305. #define N_SIMDGROUP 2 // number of SIMD groups in a thread group
  306. #define N_SIMDWIDTH 32 // assuming SIMD group size is 32
  307. //Note: This is a template, but strictly speaking it only applies to
  308. // quantizations where the block size is 32. It also does not
  309. // giard against the number of rows not being divisible by
  310. // N_DST, so this is another explicit assumption of the implementation.
  311. template<typename block_q_type, int nr, int nsg, int nw>
  312. void mul_vec_q_n_f32(device const void * src0, device const float * src1, device float * dst,
  313. int64_t ne00, int64_t ne01, int64_t ne02, int64_t ne10, int64_t ne12, int64_t ne0, int64_t ne1, uint gqa,
  314. uint3 tgpig, uint tiisg, uint sgitg) {
  315. const int nb = ne00/QK4_0;
  316. const int r0 = tgpig.x;
  317. const int r1 = tgpig.y;
  318. const int im = tgpig.z;
  319. const int first_row = (r0 * nsg + sgitg) * nr;
  320. const uint offset0 = first_row * nb + im/gqa*(nb*ne0);
  321. device const block_q_type * x = (device const block_q_type *) src0 + offset0;
  322. device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1;
  323. float yl[16]; // src1 vector cache
  324. float sumf[nr]={0.f};
  325. const int ix = tiisg/2;
  326. const int il = 8*(tiisg%2);
  327. device const float * yb = y + ix * QK4_0 + il;
  328. // each thread in a SIMD group deals with half a block.
  329. for (int ib = ix; ib < nb; ib += nw/2) {
  330. float sumy = 0;
  331. for (int i = 0; i < 8; i += 2) {
  332. sumy += yb[i] + yb[i+1];
  333. yl[i+0] = yb[i+ 0];
  334. yl[i+1] = yb[i+ 1]/256.f;
  335. sumy += yb[i+16] + yb[i+17];
  336. yl[i+8] = yb[i+16]/16.f;
  337. yl[i+9] = yb[i+17]/4096.f;
  338. }
  339. for (int row = 0; row < nr; row++) {
  340. sumf[row] += block_q_n_dot_y(x+ib+row*nb, sumy, yl, il);
  341. }
  342. yb += QK4_0 * 16;
  343. }
  344. for (int row = 0; row < nr; ++row) {
  345. const float tot = simd_sum(sumf[row]);
  346. if (tiisg == 0 && first_row + row < ne01) {
  347. dst[r1*ne0 + im*ne0*ne1 + first_row + row] = tot;
  348. }
  349. }
  350. }
  351. kernel void kernel_mul_mat_q4_0_f32(
  352. device const void * src0,
  353. device const float * src1,
  354. device float * dst,
  355. constant int64_t & ne00,
  356. constant int64_t & ne01[[buffer(4)]],
  357. constant int64_t & ne02[[buffer(5)]],
  358. constant int64_t & ne10[[buffer(9)]],
  359. constant int64_t & ne12[[buffer(11)]],
  360. constant int64_t & ne0[[buffer(15)]],
  361. constant int64_t & ne1[[buffer(16)]],
  362. constant uint & gqa[[buffer(17)]],
  363. uint3 tgpig[[threadgroup_position_in_grid]],
  364. uint tiisg[[thread_index_in_simdgroup]],
  365. uint sgitg[[simdgroup_index_in_threadgroup]]) {
  366. mul_vec_q_n_f32<block_q4_0, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,gqa,tgpig,tiisg,sgitg);
  367. }
  368. kernel void kernel_mul_mat_q4_1_f32(
  369. device const void * src0,
  370. device const float * src1,
  371. device float * dst,
  372. constant int64_t & ne00,
  373. constant int64_t & ne01[[buffer(4)]],
  374. constant int64_t & ne02[[buffer(5)]],
  375. constant int64_t & ne10[[buffer(9)]],
  376. constant int64_t & ne12[[buffer(11)]],
  377. constant int64_t & ne0[[buffer(15)]],
  378. constant int64_t & ne1[[buffer(16)]],
  379. constant uint & gqa[[buffer(17)]],
  380. uint3 tgpig[[threadgroup_position_in_grid]],
  381. uint tiisg[[thread_index_in_simdgroup]],
  382. uint sgitg[[simdgroup_index_in_threadgroup]]) {
  383. mul_vec_q_n_f32<block_q4_1, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,gqa,tgpig,tiisg,sgitg);
  384. }
  385. kernel void kernel_mul_mat_q8_0_f32(
  386. device const void * src0,
  387. device const float * src1,
  388. device float * dst,
  389. constant int64_t & ne00,
  390. constant int64_t & ne01[[buffer(4)]],
  391. constant int64_t & ne02[[buffer(5)]],
  392. constant int64_t & ne10[[buffer(9)]],
  393. constant int64_t & ne12[[buffer(11)]],
  394. constant int64_t & ne0[[buffer(15)]],
  395. constant int64_t & ne1[[buffer(16)]],
  396. constant uint & gqa[[buffer(17)]],
  397. uint3 tgpig[[threadgroup_position_in_grid]],
  398. uint tiisg[[thread_index_in_simdgroup]],
  399. uint sgitg[[simdgroup_index_in_threadgroup]]) {
  400. const int nr = N_DST;
  401. const int nsg = N_SIMDGROUP;
  402. const int nw = N_SIMDWIDTH;
  403. const int nb = ne00/QK8_0;
  404. const int r0 = tgpig.x;
  405. const int r1 = tgpig.y;
  406. const int im = tgpig.z;
  407. const int first_row = (r0 * nsg + sgitg) * nr;
  408. const uint offset0 = first_row * nb + im/gqa*(nb*ne0);
  409. device const block_q8_0 * x = (device const block_q8_0 *) src0 + offset0;
  410. device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1;
  411. float yl[16];
  412. float sumf[nr]={0.f};
  413. const int ix = tiisg/2;
  414. const int il = tiisg%2;
  415. device const float * yb = y + ix * QK8_0 + 16*il;
  416. // each thread in a SIMD group deals with half a block.
  417. for (int ib = ix; ib < nb; ib += nw/2) {
  418. for (int i = 0; i < 16; ++i) {
  419. yl[i] = yb[i];
  420. }
  421. for (int row = 0; row < nr; row++) {
  422. device const int8_t * qs = x[ib+row*nb].qs + 16*il;
  423. float sumq = 0.f;
  424. for (int iq = 0; iq < 16; ++iq) {
  425. sumq += qs[iq] * yl[iq];
  426. }
  427. sumf[row] += sumq*x[ib+row*nb].d;
  428. }
  429. yb += QK8_0 * 16;
  430. }
  431. for (int row = 0; row < nr; ++row) {
  432. const float tot = simd_sum(sumf[row]);
  433. if (tiisg == 0 && first_row + row < ne01) {
  434. dst[r1*ne0 + im*ne0*ne1 + first_row + row] = tot;
  435. }
  436. }
  437. }
  438. kernel void kernel_mul_mat_f16_f32(
  439. device const char * src0,
  440. device const char * src1,
  441. device float * dst,
  442. constant int64_t & ne00,
  443. constant int64_t & ne01,
  444. constant int64_t & ne02,
  445. constant uint64_t & nb00,
  446. constant uint64_t & nb01,
  447. constant uint64_t & nb02,
  448. constant int64_t & ne10,
  449. constant int64_t & ne11,
  450. constant int64_t & ne12,
  451. constant uint64_t & nb10,
  452. constant uint64_t & nb11,
  453. constant uint64_t & nb12,
  454. constant int64_t & ne0,
  455. constant int64_t & ne1,
  456. threadgroup float * sum [[threadgroup(0)]],
  457. uint3 tgpig[[threadgroup_position_in_grid]],
  458. uint3 tpig[[thread_position_in_grid]],
  459. uint3 tpitg[[thread_position_in_threadgroup]],
  460. uint3 tptg[[threads_per_threadgroup]]) {
  461. const int64_t r0 = tgpig.x;
  462. const int64_t r1 = tgpig.y;
  463. const int64_t im = tgpig.z;
  464. device const half * x = (device const half *) (src0 + r0*nb01 + im/(ne12/ne02)*nb02);
  465. device const float * y = (device const float *) (src1 + r1*nb11 + im*nb12);
  466. sum[tpitg.x] = 0.0f;
  467. for (int i = tpitg.x; i < ne00; i += tptg.x) {
  468. sum[tpitg.x] += (float) x[i] * (float) y[i];
  469. }
  470. // accumulate the sum from all threads in the threadgroup
  471. threadgroup_barrier(mem_flags::mem_threadgroup);
  472. for (uint i = tptg.x/2; i > 0; i /= 2) {
  473. if (tpitg.x < i) {
  474. sum[tpitg.x] += sum[tpitg.x + i];
  475. }
  476. threadgroup_barrier(mem_flags::mem_threadgroup);
  477. }
  478. if (tpitg.x == 0) {
  479. dst[im*ne1*ne0 + r1*ne0 + r0] = sum[0];
  480. }
  481. }
  482. kernel void kernel_alibi_f32(
  483. device const float * src0,
  484. device float * dst,
  485. constant int64_t & ne00,
  486. constant int64_t & ne01,
  487. constant int64_t & ne02,
  488. constant int64_t & ne03,
  489. constant uint64_t & nb00,
  490. constant uint64_t & nb01,
  491. constant uint64_t & nb02,
  492. constant uint64_t & nb03,
  493. constant int64_t & ne0,
  494. constant int64_t & ne1,
  495. constant int64_t & ne2,
  496. constant int64_t & ne3,
  497. constant uint64_t & nb0,
  498. constant uint64_t & nb1,
  499. constant uint64_t & nb2,
  500. constant uint64_t & nb3,
  501. constant float & m0,
  502. uint3 tgpig[[threadgroup_position_in_grid]],
  503. uint3 tpitg[[thread_position_in_threadgroup]],
  504. uint3 ntg[[threads_per_threadgroup]]) {
  505. const int64_t i03 = tgpig[2];
  506. const int64_t i02 = tgpig[1];
  507. const int64_t i01 = tgpig[0];
  508. const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
  509. const int64_t i3 = n / (ne2*ne1*ne0);
  510. const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0);
  511. const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0;
  512. const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0);
  513. device float * dst_data = (device float *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  514. float m_k = pow(m0, i2 + 1);
  515. for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
  516. device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
  517. dst_data[i00] = src[0] + m_k * (i00 - ne00 + 1);
  518. }
  519. }
  520. kernel void kernel_rope(
  521. device const void * src0,
  522. device float * dst,
  523. constant int64_t & ne00,
  524. constant int64_t & ne01,
  525. constant int64_t & ne02,
  526. constant int64_t & ne03,
  527. constant uint64_t & nb00,
  528. constant uint64_t & nb01,
  529. constant uint64_t & nb02,
  530. constant uint64_t & nb03,
  531. constant int64_t & ne0,
  532. constant int64_t & ne1,
  533. constant int64_t & ne2,
  534. constant int64_t & ne3,
  535. constant uint64_t & nb0,
  536. constant uint64_t & nb1,
  537. constant uint64_t & nb2,
  538. constant uint64_t & nb3,
  539. constant int & n_past,
  540. constant int & n_dims,
  541. constant int & mode,
  542. constant float & freq_base,
  543. constant float & freq_scale,
  544. uint3 tpig[[thread_position_in_grid]]) {
  545. const int64_t i3 = tpig[2];
  546. const int64_t i2 = tpig[1];
  547. const int64_t i1 = tpig[0];
  548. const bool is_neox = mode & 2;
  549. const float theta_scale = pow(freq_base, -2.0f/n_dims);
  550. const int64_t p = ((mode & 1) == 0 ? n_past + i2 : i2);
  551. float theta = freq_scale * (float)p;
  552. if (!is_neox) {
  553. for (int64_t i0 = 0; i0 < ne0; i0 += 2) {
  554. const float cos_theta = cos(theta);
  555. const float sin_theta = sin(theta);
  556. theta *= theta_scale;
  557. device const float * const src = (device float *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  558. device float * dst_data = (device float *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  559. const float x0 = src[0];
  560. const float x1 = src[1];
  561. dst_data[0] = x0*cos_theta - x1*sin_theta;
  562. dst_data[1] = x0*sin_theta + x1*cos_theta;
  563. }
  564. } else {
  565. for (int64_t ib = 0; ib < ne0/n_dims; ++ib) {
  566. for (int64_t ic = 0; ic < n_dims; ic += 2) {
  567. const float cos_theta = cos(theta);
  568. const float sin_theta = sin(theta);
  569. theta *= theta_scale;
  570. const int64_t i0 = ib*n_dims + ic/2;
  571. device const float * const src = (device float *)((device char *) src0 + i3*nb03 + i2*nb02 + i1*nb01 + i0*nb00);
  572. device float * dst_data = (device float *)((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  573. const float x0 = src[0];
  574. const float x1 = src[n_dims/2];
  575. dst_data[0] = x0*cos_theta - x1*sin_theta;
  576. dst_data[n_dims/2] = x0*sin_theta + x1*cos_theta;
  577. }
  578. }
  579. }
  580. }
  581. kernel void kernel_cpy_f16_f16(
  582. device const half * src0,
  583. device half * dst,
  584. constant int64_t & ne00,
  585. constant int64_t & ne01,
  586. constant int64_t & ne02,
  587. constant int64_t & ne03,
  588. constant uint64_t & nb00,
  589. constant uint64_t & nb01,
  590. constant uint64_t & nb02,
  591. constant uint64_t & nb03,
  592. constant int64_t & ne0,
  593. constant int64_t & ne1,
  594. constant int64_t & ne2,
  595. constant int64_t & ne3,
  596. constant uint64_t & nb0,
  597. constant uint64_t & nb1,
  598. constant uint64_t & nb2,
  599. constant uint64_t & nb3,
  600. uint3 tgpig[[threadgroup_position_in_grid]],
  601. uint3 tpitg[[thread_position_in_threadgroup]],
  602. uint3 ntg[[threads_per_threadgroup]]) {
  603. const int64_t i03 = tgpig[2];
  604. const int64_t i02 = tgpig[1];
  605. const int64_t i01 = tgpig[0];
  606. const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
  607. const int64_t i3 = n / (ne2*ne1*ne0);
  608. const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0);
  609. const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0;
  610. const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0);
  611. device half * dst_data = (device half *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  612. for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
  613. device const half * src = (device half *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
  614. dst_data[i00] = src[0];
  615. }
  616. }
  617. kernel void kernel_cpy_f32_f16(
  618. device const float * src0,
  619. device half * dst,
  620. constant int64_t & ne00,
  621. constant int64_t & ne01,
  622. constant int64_t & ne02,
  623. constant int64_t & ne03,
  624. constant uint64_t & nb00,
  625. constant uint64_t & nb01,
  626. constant uint64_t & nb02,
  627. constant uint64_t & nb03,
  628. constant int64_t & ne0,
  629. constant int64_t & ne1,
  630. constant int64_t & ne2,
  631. constant int64_t & ne3,
  632. constant uint64_t & nb0,
  633. constant uint64_t & nb1,
  634. constant uint64_t & nb2,
  635. constant uint64_t & nb3,
  636. uint3 tgpig[[threadgroup_position_in_grid]],
  637. uint3 tpitg[[thread_position_in_threadgroup]],
  638. uint3 ntg[[threads_per_threadgroup]]) {
  639. const int64_t i03 = tgpig[2];
  640. const int64_t i02 = tgpig[1];
  641. const int64_t i01 = tgpig[0];
  642. const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
  643. const int64_t i3 = n / (ne2*ne1*ne0);
  644. const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0);
  645. const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0;
  646. const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0);
  647. device half * dst_data = (device half *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  648. for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
  649. device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
  650. dst_data[i00] = src[0];
  651. }
  652. }
  653. kernel void kernel_cpy_f32_f32(
  654. device const float * src0,
  655. device float * dst,
  656. constant int64_t & ne00,
  657. constant int64_t & ne01,
  658. constant int64_t & ne02,
  659. constant int64_t & ne03,
  660. constant uint64_t & nb00,
  661. constant uint64_t & nb01,
  662. constant uint64_t & nb02,
  663. constant uint64_t & nb03,
  664. constant int64_t & ne0,
  665. constant int64_t & ne1,
  666. constant int64_t & ne2,
  667. constant int64_t & ne3,
  668. constant uint64_t & nb0,
  669. constant uint64_t & nb1,
  670. constant uint64_t & nb2,
  671. constant uint64_t & nb3,
  672. uint3 tgpig[[threadgroup_position_in_grid]],
  673. uint3 tpitg[[thread_position_in_threadgroup]],
  674. uint3 ntg[[threads_per_threadgroup]]) {
  675. const int64_t i03 = tgpig[2];
  676. const int64_t i02 = tgpig[1];
  677. const int64_t i01 = tgpig[0];
  678. const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
  679. const int64_t i3 = n / (ne2*ne1*ne0);
  680. const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0);
  681. const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0;
  682. const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0);
  683. device float * dst_data = (device float *) ((device char *) dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0);
  684. for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
  685. device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
  686. dst_data[i00] = src[0];
  687. }
  688. }
  689. //============================================ k-quants ======================================================
  690. #ifndef QK_K
  691. #define QK_K 256
  692. #else
  693. static_assert(QK_K == 256 || QK_K == 64, "QK_K must be 256 or 64");
  694. #endif
  695. #if QK_K == 256
  696. #define K_SCALE_SIZE 12
  697. #else
  698. #define K_SCALE_SIZE 4
  699. #endif
  700. typedef struct {
  701. uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
  702. uint8_t qs[QK_K/4]; // quants
  703. half d; // super-block scale for quantized scales
  704. half dmin; // super-block scale for quantized mins
  705. } block_q2_K;
  706. // 84 bytes / block
  707. typedef struct {
  708. uint8_t hmask[QK_K/8]; // quants - high bit
  709. uint8_t qs[QK_K/4]; // quants - low 2 bits
  710. #if QK_K == 64
  711. uint8_t scales[2];
  712. #else
  713. uint8_t scales[K_SCALE_SIZE]; // scales, quantized with 6 bits
  714. #endif
  715. half d; // super-block scale
  716. } block_q3_K;
  717. #if QK_K == 64
  718. typedef struct {
  719. half d[2]; // super-block scales/mins
  720. uint8_t scales[2];
  721. uint8_t qs[QK_K/2]; // 4-bit quants
  722. } block_q4_K;
  723. #else
  724. typedef struct {
  725. half d; // super-block scale for quantized scales
  726. half dmin; // super-block scale for quantized mins
  727. uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits
  728. uint8_t qs[QK_K/2]; // 4--bit quants
  729. } block_q4_K;
  730. #endif
  731. #if QK_K == 64
  732. typedef struct {
  733. half d; // super-block scales/mins
  734. int8_t scales[QK_K/16]; // 8-bit block scales
  735. uint8_t qh[QK_K/8]; // quants, high bit
  736. uint8_t qs[QK_K/2]; // quants, low 4 bits
  737. } block_q5_K;
  738. #else
  739. typedef struct {
  740. half d; // super-block scale for quantized scales
  741. half dmin; // super-block scale for quantized mins
  742. uint8_t scales[3*QK_K/64]; // scales and mins, quantized with 6 bits
  743. uint8_t qh[QK_K/8]; // quants, high bit
  744. uint8_t qs[QK_K/2]; // quants, low 4 bits
  745. } block_q5_K;
  746. // 176 bytes / block
  747. #endif
  748. typedef struct {
  749. uint8_t ql[QK_K/2]; // quants, lower 4 bits
  750. uint8_t qh[QK_K/4]; // quants, upper 2 bits
  751. int8_t scales[QK_K/16]; // scales, quantized with 8 bits
  752. half d; // super-block scale
  753. } block_q6_K;
  754. // 210 bytes / block
  755. static inline uchar4 get_scale_min_k4(int j, device const uint8_t * q) {
  756. uchar4 r;
  757. if (j < 4) {
  758. r[0] = q[j+0] & 63;
  759. r[2] = q[j+1] & 63;
  760. r[1] = q[j+4] & 63;
  761. r[3] = q[j+5] & 63;
  762. } else {
  763. r[0] = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
  764. r[2] = (q[j+5] & 0xF) | ((q[j-3] >> 6) << 4);
  765. r[1] = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
  766. r[3] = (q[j+5] >> 4) | ((q[j+1] >> 6) << 4);
  767. }
  768. return r;
  769. }
  770. //====================================== dot products =========================
  771. kernel void kernel_mul_mat_q2_K_f32(
  772. device const void * src0,
  773. device const float * src1,
  774. device float * dst,
  775. constant int64_t & ne00,
  776. constant int64_t & ne01[[buffer(4)]],
  777. constant int64_t & ne02[[buffer(5)]],
  778. constant int64_t & ne10[[buffer(9)]],
  779. constant int64_t & ne12[[buffer(11)]],
  780. constant int64_t & ne0[[buffer(15)]],
  781. constant int64_t & ne1[[buffer(16)]],
  782. constant uint & gqa[[buffer(17)]],
  783. uint3 tgpig[[threadgroup_position_in_grid]],
  784. uint tiisg[[thread_index_in_simdgroup]],
  785. uint sgitg[[simdgroup_index_in_threadgroup]]) {
  786. const int nb = ne00/QK_K;
  787. const int r0 = tgpig.x;
  788. const int r1 = tgpig.y;
  789. const int r2 = tgpig.z;
  790. const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST;
  791. const int ib_row = first_row * nb;
  792. const uint offset0 = r2/gqa*(nb*ne0);
  793. device const block_q2_K * x = (device const block_q2_K *) src0 + ib_row + offset0;
  794. device const float * y = (device const float *) src1 + r1*ne10 + r2*ne00*ne1;
  795. float yl[32];
  796. float sumf[N_DST]={0.f}, all_sum;
  797. const int step = sizeof(block_q2_K) * nb;
  798. #if QK_K == 256
  799. const int ix = tiisg/8; // 0...3
  800. const int it = tiisg%8; // 0...7
  801. const int im = it/4; // 0 or 1
  802. const int ir = it%4; // 0...3
  803. const int is = (8*ir)/16;// 0 or 1
  804. device const float * y4 = y + ix * QK_K + 128 * im + 8 * ir;
  805. for (int ib = ix; ib < nb; ib += 4) {
  806. float4 sumy = {0.f, 0.f, 0.f, 0.f};
  807. for (int i = 0; i < 8; ++i) {
  808. yl[i+ 0] = y4[i+ 0]; sumy[0] += yl[i+ 0];
  809. yl[i+ 8] = y4[i+32]; sumy[1] += yl[i+ 8];
  810. yl[i+16] = y4[i+64]; sumy[2] += yl[i+16];
  811. yl[i+24] = y4[i+96]; sumy[3] += yl[i+24];
  812. }
  813. device const uint8_t * sc = (device const uint8_t *)x[ib].scales + 8*im + is;
  814. device const uint16_t * qs = (device const uint16_t *)x[ib].qs + 16 * im + 4 * ir;
  815. device const half * dh = &x[ib].d;
  816. for (int row = 0; row < N_DST; row++) {
  817. float4 acc1 = {0.f, 0.f, 0.f, 0.f};
  818. float4 acc2 = {0.f, 0.f, 0.f, 0.f};
  819. for (int i = 0; i < 8; i += 2) {
  820. acc1[0] += yl[i+ 0] * (qs[i/2] & 0x0003);
  821. acc2[0] += yl[i+ 1] * (qs[i/2] & 0x0300);
  822. acc1[1] += yl[i+ 8] * (qs[i/2] & 0x000c);
  823. acc2[1] += yl[i+ 9] * (qs[i/2] & 0x0c00);
  824. acc1[2] += yl[i+16] * (qs[i/2] & 0x0030);
  825. acc2[2] += yl[i+17] * (qs[i/2] & 0x3000);
  826. acc1[3] += yl[i+24] * (qs[i/2] & 0x00c0);
  827. acc2[3] += yl[i+25] * (qs[i/2] & 0xc000);
  828. }
  829. float dall = dh[0];
  830. float dmin = dh[1] * 1.f/16.f;
  831. sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc2[0]) * (sc[0] & 0xF) * 1.f/ 1.f +
  832. (acc1[1] + 1.f/256.f * acc2[1]) * (sc[2] & 0xF) * 1.f/ 4.f +
  833. (acc1[2] + 1.f/256.f * acc2[2]) * (sc[4] & 0xF) * 1.f/16.f +
  834. (acc1[3] + 1.f/256.f * acc2[3]) * (sc[6] & 0xF) * 1.f/64.f) -
  835. dmin * (sumy[0] * (sc[0] & 0xF0) + sumy[1] * (sc[2] & 0xF0) + sumy[2] * (sc[4] & 0xF0) + sumy[3] * (sc[6] & 0xF0));
  836. qs += step/2;
  837. sc += step;
  838. dh += step/2;
  839. }
  840. y4 += 4 * QK_K;
  841. }
  842. #else
  843. const int ix = tiisg/2; // 0...15
  844. const int it = tiisg%2; // 0...1
  845. device const float * y4 = y + ix * QK_K + 8 * it;
  846. for (int ib = ix; ib < nb; ib += 16) {
  847. float4 sumy = {0.f, 0.f, 0.f, 0.f};
  848. for (int i = 0; i < 8; ++i) {
  849. yl[i+ 0] = y4[i+ 0]; sumy[0] += yl[i+ 0];
  850. yl[i+ 8] = y4[i+16]; sumy[1] += yl[i+ 8];
  851. yl[i+16] = y4[i+32]; sumy[2] += yl[i+16];
  852. yl[i+24] = y4[i+48]; sumy[3] += yl[i+24];
  853. }
  854. device const uint8_t * sc = (device const uint8_t *)x[ib].scales;
  855. device const uint16_t * qs = (device const uint16_t *)x[ib].qs + 4 * it;
  856. device const half * dh = &x[ib].d;
  857. for (int row = 0; row < N_DST; row++) {
  858. float4 acc1 = {0.f, 0.f, 0.f, 0.f};
  859. float4 acc2 = {0.f, 0.f, 0.f, 0.f};
  860. for (int i = 0; i < 8; i += 2) {
  861. acc1[0] += yl[i+ 0] * (qs[i/2] & 0x0003);
  862. acc2[0] += yl[i+ 1] * (qs[i/2] & 0x0300);
  863. acc1[1] += yl[i+ 8] * (qs[i/2] & 0x000c);
  864. acc2[1] += yl[i+ 9] * (qs[i/2] & 0x0c00);
  865. acc1[2] += yl[i+16] * (qs[i/2] & 0x0030);
  866. acc2[2] += yl[i+17] * (qs[i/2] & 0x3000);
  867. acc1[3] += yl[i+24] * (qs[i/2] & 0x00c0);
  868. acc2[3] += yl[i+25] * (qs[i/2] & 0xc000);
  869. }
  870. float dall = dh[0];
  871. float dmin = dh[1];
  872. sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc2[0]) * (sc[0] & 0xF) * 1.f/ 1.f +
  873. (acc1[1] + 1.f/256.f * acc2[1]) * (sc[1] & 0xF) * 1.f/ 4.f +
  874. (acc1[2] + 1.f/256.f * acc2[2]) * (sc[2] & 0xF) * 1.f/16.f +
  875. (acc1[3] + 1.f/256.f * acc2[3]) * (sc[3] & 0xF) * 1.f/64.f) -
  876. dmin * (sumy[0] * (sc[0] >> 4) + sumy[1] * (sc[1] >> 4) + sumy[2] * (sc[2] >> 4) + sumy[3] * (sc[3] >> 4));
  877. qs += step/2;
  878. sc += step;
  879. dh += step/2;
  880. }
  881. y4 += 16 * QK_K;
  882. }
  883. #endif
  884. for (int row = 0; row < N_DST; ++row) {
  885. all_sum = simd_sum(sumf[row]);
  886. if (tiisg == 0) {
  887. dst[r1*ne0 + r2*ne0*ne1 + first_row + row] = all_sum;
  888. }
  889. }
  890. }
  891. #if QK_K == 256
  892. kernel void kernel_mul_mat_q3_K_f32(
  893. device const void * src0,
  894. device const float * src1,
  895. device float * dst,
  896. constant int64_t & ne00,
  897. constant int64_t & ne01[[buffer(4)]],
  898. constant int64_t & ne02[[buffer(5)]],
  899. constant int64_t & ne10[[buffer(9)]],
  900. constant int64_t & ne12[[buffer(11)]],
  901. constant int64_t & ne0[[buffer(15)]],
  902. constant int64_t & ne1[[buffer(16)]],
  903. constant uint & gqa[[buffer(17)]],
  904. uint3 tgpig[[threadgroup_position_in_grid]],
  905. uint tiisg[[thread_index_in_simdgroup]],
  906. uint sgitg[[simdgroup_index_in_threadgroup]]) {
  907. const int nb = ne00/QK_K;
  908. const int64_t r0 = tgpig.x;
  909. const int64_t r1 = tgpig.y;
  910. const int64_t r2 = tgpig.z;
  911. const int first_row = (r0 * N_SIMDGROUP + sgitg) * 2;
  912. const uint offset0 = r2/gqa*(nb*ne0);
  913. device const block_q3_K * x = (device const block_q3_K *) src0 + first_row*nb + offset0;
  914. device const float * yy = (device const float *) src1 + r1*ne10 + r2*ne00*ne1;
  915. float yl[16];
  916. const uint16_t kmask1 = 0x0303;
  917. const uint16_t kmask2 = 0x0f0f;
  918. const int tid = tiisg/2;
  919. const int ix = tiisg%2;
  920. const int ip = tid/8; // 0 or 1
  921. const int il = tid/2 - 4*ip; // 0...3
  922. const int ir = tid%2;
  923. const int n = 8;
  924. const int l0 = n*ir;
  925. const uint16_t m1 = 1 << (4*ip + il);
  926. const uint16_t m2 = m1 << 8;
  927. const int shift = 2*il;
  928. const uint16_t qm1 = 0x0003 << shift;
  929. const uint16_t qm2 = 0x0300 << shift;
  930. const int32_t v1 = 4 << shift;
  931. const int32_t v2 = 1024 << shift;
  932. const uint16_t s_shift1 = 4*ip;
  933. const uint16_t s_shift2 = s_shift1 + 2*(il/2);
  934. const int ik = 4 + (il%2);
  935. const int q_offset = 32*ip + l0;
  936. const int y_offset = 128*ip + 32*il + l0;
  937. const int step = sizeof(block_q3_K) * nb / 2;
  938. device const float * y1 = yy + ix*QK_K + y_offset;
  939. float sumf1[2] = {0.f}, sumf2[2] = {0.f};
  940. for (int i = ix; i < nb; i += 2) {
  941. for (int l = 0; l < 8; ++l) {
  942. yl[l+0] = y1[l+ 0];
  943. yl[l+8] = y1[l+16];
  944. }
  945. device const uint16_t * q = (device const uint16_t *)(x[i].qs + q_offset);
  946. device const uint16_t * h = (device const uint16_t *)(x[i].hmask + l0);
  947. device const uint16_t * a = (device const uint16_t *)(x[i].scales);
  948. device const half * dh = &x[i].d;
  949. for (int row = 0; row < 2; ++row) {
  950. const float d_all = (float)dh[0];
  951. const char2 scales = as_type<char2>((uint16_t)(((a[il] >> s_shift1) & kmask2) | (((a[ik] >> s_shift2) & kmask1) << 4)));
  952. float s1 = 0, s2 = 0;
  953. for (int l = 0; l < n; l += 2) {
  954. const uint16_t qs = q[l/2];
  955. s1 += yl[l+0] * ((int32_t)(qs & qm1) - ((h[l/2] & m1) ? 0 : v1));
  956. s2 += yl[l+1] * ((int32_t)(qs & qm2) - ((h[l/2] & m2) ? 0 : v2));
  957. }
  958. float d = d_all * (s1 + 1.f/256.f * s2);
  959. sumf1[row] += d * scales[0];
  960. sumf2[row] += d;
  961. s1 = s2 = 0;
  962. for (int l = 0; l < n; l += 2) {
  963. const uint16_t qs = q[l/2+8];
  964. s1 += yl[l+8] * ((int32_t)(qs & qm1) - ((h[l/2+8] & m1) ? 0 : v1));
  965. s2 += yl[l+9] * ((int32_t)(qs & qm2) - ((h[l/2+8] & m2) ? 0 : v2));
  966. }
  967. d = d_all * (s1 + 1.f/256.f * s2);
  968. sumf1[row] += d * scales[1];
  969. sumf2[row] += d;
  970. q += step;
  971. h += step;
  972. a += step;
  973. dh += step;
  974. }
  975. y1 += 2 * QK_K;
  976. }
  977. for (int row = 0; row < 2; ++row) {
  978. const float sumf = (sumf1[row] - 32.f*sumf2[row]) / (1 << shift);
  979. const float tot = simd_sum(sumf);
  980. if (tiisg == 0) {
  981. dst[r1*ne0 + r2*ne0*ne1 + first_row + row] = tot;
  982. }
  983. }
  984. }
  985. #else
  986. kernel void kernel_mul_mat_q3_K_f32(
  987. device const void * src0,
  988. device const float * src1,
  989. device float * dst,
  990. constant int64_t & ne00,
  991. constant int64_t & ne01[[buffer(4)]],
  992. constant int64_t & ne02[[buffer(5)]],
  993. constant int64_t & ne10[[buffer(9)]],
  994. constant int64_t & ne12[[buffer(11)]],
  995. constant int64_t & ne0[[buffer(15)]],
  996. constant int64_t & ne1[[buffer(16)]],
  997. constant uint & gqa[[buffer(17)]],
  998. uint3 tgpig[[threadgroup_position_in_grid]],
  999. uint tiisg[[thread_index_in_simdgroup]],
  1000. uint sgitg[[simdgroup_index_in_threadgroup]]) {
  1001. const int nb = ne00/QK_K;
  1002. const int64_t r0 = tgpig.x;
  1003. const int64_t r1 = tgpig.y;
  1004. const int64_t r2 = tgpig.z;
  1005. const int row = 2 * r0 + sgitg;
  1006. const uint offset0 = r2/gqa*(nb*ne0);
  1007. device const block_q3_K * x = (device const block_q3_K *) src0 + row*nb + offset0;
  1008. device const float * yy = (device const float *) src1 + r1*ne10 + r2*ne00*ne1;
  1009. const int ix = tiisg/4;
  1010. const int il = 4 * (tiisg%4);// 0, 4, 8, 12
  1011. const int im = il/8; // 0, 0, 1, 1
  1012. const int in = il%8; // 0, 4, 0, 4
  1013. float2 sum = {0.f, 0.f};
  1014. for (int i = ix; i < nb; i += 8) {
  1015. const float d_all = (float)(x[i].d);
  1016. device const uint16_t * q = (device const uint16_t *)(x[i].qs + il);
  1017. device const uint16_t * h = (device const uint16_t *)(x[i].hmask + in);
  1018. device const uint16_t * s = (device const uint16_t *)(x[i].scales);
  1019. device const float * y = yy + i * QK_K + il;
  1020. const float d1 = d_all * ((int32_t)(s[0] & 0x000F) - 8);
  1021. const float d2 = d_all * ((int32_t)(s[0] & 0x00F0) - 128) * 1.f/64.f;
  1022. const float d3 = d_all * ((int32_t)(s[0] & 0x0F00) - 2048) * 1.f/4096.f;
  1023. const float d4 = d_all * ((int32_t)(s[0] & 0xF000) - 32768) * 1.f/262144.f;
  1024. for (int l = 0; l < 4; l += 2) {
  1025. const uint16_t hm = h[l/2] >> im;
  1026. sum[0] += y[l+ 0] * d1 * ((int32_t)(q[l/2] & 0x0003) - ((hm & 0x0001) ? 0 : 4))
  1027. + y[l+16] * d2 * ((int32_t)(q[l/2] & 0x000c) - ((hm & 0x0004) ? 0 : 16))
  1028. + y[l+32] * d3 * ((int32_t)(q[l/2] & 0x0030) - ((hm & 0x0010) ? 0 : 64))
  1029. + y[l+48] * d4 * ((int32_t)(q[l/2] & 0x00c0) - ((hm & 0x0040) ? 0 : 256));
  1030. sum[1] += y[l+ 1] * d1 * ((int32_t)(q[l/2] & 0x0300) - ((hm & 0x0100) ? 0 : 1024))
  1031. + y[l+17] * d2 * ((int32_t)(q[l/2] & 0x0c00) - ((hm & 0x0400) ? 0 : 4096))
  1032. + y[l+33] * d3 * ((int32_t)(q[l/2] & 0x3000) - ((hm & 0x1000) ? 0 : 16384))
  1033. + y[l+49] * d4 * ((int32_t)(q[l/2] & 0xc000) - ((hm & 0x4000) ? 0 : 65536));
  1034. }
  1035. }
  1036. const float sumf = sum[0] + sum[1] * 1.f/256.f;
  1037. const float tot = simd_sum(sumf);
  1038. if (tiisg == 0) {
  1039. dst[r1*ne0 + r2*ne0*ne1 + row] = tot;
  1040. }
  1041. }
  1042. #endif
  1043. #if QK_K == 256
  1044. kernel void kernel_mul_mat_q4_K_f32(
  1045. device const void * src0,
  1046. device const float * src1,
  1047. device float * dst,
  1048. constant int64_t & ne00,
  1049. constant int64_t & ne01[[buffer(4)]],
  1050. constant int64_t & ne02[[buffer(5)]],
  1051. constant int64_t & ne10[[buffer(9)]],
  1052. constant int64_t & ne12[[buffer(11)]],
  1053. constant int64_t & ne0[[buffer(15)]],
  1054. constant int64_t & ne1[[buffer(16)]],
  1055. constant uint & gqa[[buffer(17)]],
  1056. uint3 tgpig[[threadgroup_position_in_grid]],
  1057. uint tiisg[[thread_index_in_simdgroup]],
  1058. uint sgitg[[simdgroup_index_in_threadgroup]]) {
  1059. const uint16_t kmask1 = 0x3f3f;
  1060. const uint16_t kmask2 = 0x0f0f;
  1061. const uint16_t kmask3 = 0xc0c0;
  1062. const int ix = tiisg/8; // 0...3
  1063. const int it = tiisg%8; // 0...7
  1064. const int im = it/4; // 0 or 1
  1065. const int ir = it%4; // 0...3
  1066. const int nb = ne00/QK_K;
  1067. const int r0 = tgpig.x;
  1068. const int r1 = tgpig.y;
  1069. const int r2 = tgpig.z;
  1070. const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST;
  1071. const int ib_row = first_row * nb;
  1072. const uint offset0 = r2/gqa*(nb*ne0);
  1073. device const block_q4_K * x = (device const block_q4_K *) src0 + ib_row + offset0;
  1074. device const float * y = (device const float *) src1 + r1*ne10 + r2*ne00*ne1;
  1075. float yl[16];
  1076. float yh[16];
  1077. float sumf[N_DST]={0.f}, all_sum;
  1078. const int step = sizeof(block_q4_K) * nb / 2;
  1079. device const float * y4 = y + ix * QK_K + 64 * im + 8 * ir;
  1080. uint16_t sc16[4];
  1081. thread const uint8_t * sc8 = (thread const uint8_t *)sc16;
  1082. for (int ib = ix; ib < nb; ib += 4) {
  1083. float4 sumy = {0.f, 0.f, 0.f, 0.f};
  1084. for (int i = 0; i < 8; ++i) {
  1085. yl[i+0] = y4[i+ 0]; sumy[0] += yl[i+0];
  1086. yl[i+8] = y4[i+ 32]; sumy[1] += yl[i+8];
  1087. yh[i+0] = y4[i+128]; sumy[2] += yh[i+0];
  1088. yh[i+8] = y4[i+160]; sumy[3] += yh[i+8];
  1089. }
  1090. device const uint16_t * sc = (device const uint16_t *)x[ib].scales + im;
  1091. device const uint16_t * q1 = (device const uint16_t *)x[ib].qs + 16 * im + 4 * ir;
  1092. device const half * dh = &x[ib].d;
  1093. for (int row = 0; row < N_DST; row++) {
  1094. sc16[0] = sc[0] & kmask1;
  1095. sc16[1] = sc[2] & kmask1;
  1096. sc16[2] = ((sc[4] >> 0) & kmask2) | ((sc[0] & kmask3) >> 2);
  1097. sc16[3] = ((sc[4] >> 4) & kmask2) | ((sc[2] & kmask3) >> 2);
  1098. device const uint16_t * q2 = q1 + 32;
  1099. float4 acc1 = {0.f, 0.f, 0.f, 0.f};
  1100. float4 acc2 = {0.f, 0.f, 0.f, 0.f};
  1101. for (int i = 0; i < 8; i += 2) {
  1102. acc1[0] += yl[i+0] * (q1[i/2] & 0x000F);
  1103. acc1[1] += yl[i+1] * (q1[i/2] & 0x0F00);
  1104. acc1[2] += yl[i+8] * (q1[i/2] & 0x00F0);
  1105. acc1[3] += yl[i+9] * (q1[i/2] & 0xF000);
  1106. acc2[0] += yh[i+0] * (q2[i/2] & 0x000F);
  1107. acc2[1] += yh[i+1] * (q2[i/2] & 0x0F00);
  1108. acc2[2] += yh[i+8] * (q2[i/2] & 0x00F0);
  1109. acc2[3] += yh[i+9] * (q2[i/2] & 0xF000);
  1110. }
  1111. float dall = dh[0];
  1112. float dmin = dh[1];
  1113. sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc1[1]) * sc8[0] +
  1114. (acc1[2] + 1.f/256.f * acc1[3]) * sc8[1] * 1.f/16.f +
  1115. (acc2[0] + 1.f/256.f * acc2[1]) * sc8[4] +
  1116. (acc2[2] + 1.f/256.f * acc2[3]) * sc8[5] * 1.f/16.f) -
  1117. dmin * (sumy[0] * sc8[2] + sumy[1] * sc8[3] + sumy[2] * sc8[6] + sumy[3] * sc8[7]);
  1118. q1 += step;
  1119. sc += step;
  1120. dh += step;
  1121. }
  1122. y4 += 4 * QK_K;
  1123. }
  1124. for (int row = 0; row < N_DST; ++row) {
  1125. all_sum = simd_sum(sumf[row]);
  1126. if (tiisg == 0) {
  1127. dst[r1*ne0 + r2*ne0*ne1 + first_row + row] = all_sum;
  1128. }
  1129. }
  1130. }
  1131. #else
  1132. kernel void kernel_mul_mat_q4_K_f32(
  1133. device const void * src0,
  1134. device const float * src1,
  1135. device float * dst,
  1136. constant int64_t & ne00,
  1137. constant int64_t & ne01[[buffer(4)]],
  1138. constant int64_t & ne02[[buffer(5)]],
  1139. constant int64_t & ne10[[buffer(9)]],
  1140. constant int64_t & ne12[[buffer(11)]],
  1141. constant int64_t & ne0[[buffer(15)]],
  1142. constant int64_t & ne1[[buffer(16)]],
  1143. constant uint & gqa[[buffer(17)]],
  1144. uint3 tgpig[[threadgroup_position_in_grid]],
  1145. uint tiisg[[thread_index_in_simdgroup]],
  1146. uint sgitg[[simdgroup_index_in_threadgroup]]) {
  1147. const int ix = tiisg/4; // 0...7
  1148. const int it = tiisg%4; // 0...3
  1149. const int nb = ne00/QK_K;
  1150. const int r0 = tgpig.x;
  1151. const int r1 = tgpig.y;
  1152. const int r2 = tgpig.z;
  1153. const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST;
  1154. const int ib_row = first_row * nb;
  1155. const uint offset0 = r2/gqa*(nb*ne0);
  1156. device const block_q4_K * x = (device const block_q4_K *) src0 + ib_row + offset0;
  1157. device const float * y = (device const float *) src1 + r1*ne10 + r2*ne00*ne1;
  1158. float yl[8];
  1159. float yh[8];
  1160. float sumf[N_DST]={0.f}, all_sum;
  1161. const int step = sizeof(block_q4_K) * nb / 2;
  1162. device const float * y4 = y + ix * QK_K + 8 * it;
  1163. uint16_t sc16[4];
  1164. for (int ib = ix; ib < nb; ib += 8) {
  1165. float2 sumy = {0.f, 0.f};
  1166. for (int i = 0; i < 8; ++i) {
  1167. yl[i] = y4[i+ 0]; sumy[0] += yl[i];
  1168. yh[i] = y4[i+32]; sumy[1] += yh[i];
  1169. }
  1170. device const uint16_t * sc = (device const uint16_t *)x[ib].scales;
  1171. device const uint16_t * qs = (device const uint16_t *)x[ib].qs + 4 * it;
  1172. device const half * dh = x[ib].d;
  1173. for (int row = 0; row < N_DST; row++) {
  1174. sc16[0] = sc[0] & 0x000f;
  1175. sc16[1] = sc[0] & 0x0f00;
  1176. sc16[2] = sc[0] & 0x00f0;
  1177. sc16[3] = sc[0] & 0xf000;
  1178. float2 acc1 = {0.f, 0.f};
  1179. float2 acc2 = {0.f, 0.f};
  1180. for (int i = 0; i < 8; i += 2) {
  1181. acc1[0] += yl[i+0] * (qs[i/2] & 0x000F);
  1182. acc1[1] += yl[i+1] * (qs[i/2] & 0x0F00);
  1183. acc2[0] += yh[i+0] * (qs[i/2] & 0x00F0);
  1184. acc2[1] += yh[i+1] * (qs[i/2] & 0xF000);
  1185. }
  1186. float dall = dh[0];
  1187. float dmin = dh[1];
  1188. sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc1[1]) * sc16[0] +
  1189. (acc2[0] + 1.f/256.f * acc2[1]) * sc16[1] * 1.f/4096.f) -
  1190. dmin * 1.f/16.f * (sumy[0] * sc16[2] + sumy[1] * sc16[3] * 1.f/256.f);
  1191. qs += step;
  1192. sc += step;
  1193. dh += step;
  1194. }
  1195. y4 += 8 * QK_K;
  1196. }
  1197. for (int row = 0; row < N_DST; ++row) {
  1198. all_sum = simd_sum(sumf[row]);
  1199. if (tiisg == 0) {
  1200. dst[r1*ne0+ r2*ne0*ne1 + first_row + row] = all_sum;
  1201. }
  1202. }
  1203. }
  1204. #endif
  1205. kernel void kernel_mul_mat_q5_K_f32(
  1206. device const void * src0,
  1207. device const float * src1,
  1208. device float * dst,
  1209. constant int64_t & ne00,
  1210. constant int64_t & ne01[[buffer(4)]],
  1211. constant int64_t & ne02[[buffer(5)]],
  1212. constant int64_t & ne10[[buffer(9)]],
  1213. constant int64_t & ne12[[buffer(11)]],
  1214. constant int64_t & ne0[[buffer(15)]],
  1215. constant int64_t & ne1[[buffer(16)]],
  1216. constant uint & gqa[[buffer(17)]],
  1217. uint3 tgpig[[threadgroup_position_in_grid]],
  1218. uint tiisg[[thread_index_in_simdgroup]],
  1219. uint sgitg[[simdgroup_index_in_threadgroup]]) {
  1220. const int nb = ne00/QK_K;
  1221. const int64_t r0 = tgpig.x;
  1222. const int64_t r1 = tgpig.y;
  1223. const int r2 = tgpig.z;
  1224. const int first_row = (r0 * N_SIMDGROUP + sgitg) * 2;
  1225. const uint offset0 = r2/gqa*(nb*ne0);
  1226. device const block_q5_K * x = (device const block_q5_K *) src0 + first_row*nb + offset0;
  1227. device const float * yy = (device const float *) src1 + r1*ne10 + r2*ne00*ne1;
  1228. float sumf[2]={0.f};
  1229. const int step = sizeof(block_q5_K) * nb;
  1230. #if QK_K == 256
  1231. #
  1232. float yl[16], yh[16];
  1233. const uint16_t kmask1 = 0x3f3f;
  1234. const uint16_t kmask2 = 0x0f0f;
  1235. const uint16_t kmask3 = 0xc0c0;
  1236. const int tid = tiisg/4;
  1237. const int ix = tiisg%4;
  1238. const int im = tid/4;
  1239. const int ir = tid%4;
  1240. const int n = 8;
  1241. const int l0 = n*ir;
  1242. const int q_offset = 32*im + l0;
  1243. const int y_offset = 64*im + l0;
  1244. const uint8_t hm1 = 1u << (2*im);
  1245. const uint8_t hm2 = hm1 << 1;
  1246. const uint8_t hm3 = hm1 << 4;
  1247. const uint8_t hm4 = hm2 << 4;
  1248. uint16_t sc16[4];
  1249. thread const uint8_t * sc8 = (thread const uint8_t *)sc16;
  1250. device const float * y1 = yy + ix*QK_K + y_offset;
  1251. for (int i = ix; i < nb; i += 4) {
  1252. device const uint8_t * q1 = x[i].qs + q_offset;
  1253. device const uint8_t * qh = x[i].qh + l0;
  1254. device const half * dh = &x[i].d;
  1255. device const uint16_t * a = (device const uint16_t *)x[i].scales + im;
  1256. device const float * y2 = y1 + 128;
  1257. float4 sumy = {0.f, 0.f, 0.f, 0.f};
  1258. for (int l = 0; l < 8; ++l) {
  1259. yl[l+0] = y1[l+ 0]; sumy[0] += yl[l+0];
  1260. yl[l+8] = y1[l+32]; sumy[1] += yl[l+8];
  1261. yh[l+0] = y2[l+ 0]; sumy[2] += yh[l+0];
  1262. yh[l+8] = y2[l+32]; sumy[3] += yh[l+8];
  1263. }
  1264. for (int row = 0; row < 2; ++row) {
  1265. device const uint8_t * q2 = q1 + 64;
  1266. sc16[0] = a[0] & kmask1;
  1267. sc16[1] = a[2] & kmask1;
  1268. sc16[2] = ((a[4] >> 0) & kmask2) | ((a[0] & kmask3) >> 2);
  1269. sc16[3] = ((a[4] >> 4) & kmask2) | ((a[2] & kmask3) >> 2);
  1270. float4 acc = {0.f, 0.f, 0.f, 0.f};
  1271. for (int l = 0; l < n; ++l) {
  1272. uint8_t h = qh[l];
  1273. acc[0] += yl[l+0] * ((uint16_t)(q1[l] & 0x0F) + (h & hm1 ? 16 : 0));
  1274. acc[1] += yl[l+8] * ((uint16_t)(q1[l] & 0xF0) + (h & hm2 ? 256 : 0));
  1275. acc[2] += yh[l+0] * ((uint16_t)(q2[l] & 0x0F) + (h & hm3 ? 16 : 0));
  1276. acc[3] += yh[l+8] * ((uint16_t)(q2[l] & 0xF0) + (h & hm4 ? 256 : 0));
  1277. }
  1278. const float dall = dh[0];
  1279. const float dmin = dh[1];
  1280. sumf[row] += dall * (acc[0] * sc8[0] + acc[1] * sc8[1] * 1.f/16.f + acc[2] * sc8[4] + acc[3] * sc8[5] * 1.f/16.f) -
  1281. dmin * (sumy[0] * sc8[2] + sumy[1] * sc8[3] + sumy[2] * sc8[6] + sumy[3] * sc8[7]);
  1282. q1 += step;
  1283. qh += step;
  1284. dh += step/2;
  1285. a += step/2;
  1286. }
  1287. y1 += 4 * QK_K;
  1288. }
  1289. #else
  1290. float yl[8], yh[8];
  1291. const int il = 4 * (tiisg/8); // 0, 4, 8, 12
  1292. const int ix = tiisg%8;
  1293. const int im = il/8; // 0, 0, 1, 1
  1294. const int in = il%8; // 0, 4, 0, 4
  1295. device const float * y = yy + ix*QK_K + il;
  1296. for (int i = ix; i < nb; i += 8) {
  1297. for (int l = 0; l < 4; ++l) {
  1298. yl[l+0] = y[l+ 0];
  1299. yl[l+4] = y[l+16];
  1300. yh[l+0] = y[l+32];
  1301. yh[l+4] = y[l+48];
  1302. }
  1303. device const half * dh = &x[i].d;
  1304. device const uint8_t * q = x[i].qs + il;
  1305. device const uint8_t * h = x[i].qh + in;
  1306. device const int8_t * s = x[i].scales;
  1307. for (int row = 0; row < 2; ++row) {
  1308. const float d = dh[0];
  1309. float2 acc = {0.f, 0.f};
  1310. for (int l = 0; l < 4; ++l) {
  1311. const uint8_t hl = h[l] >> im;
  1312. acc[0] += yl[l+0] * s[0] * ((int16_t)(q[l+ 0] & 0x0F) - (hl & 0x01 ? 0 : 16))
  1313. + yl[l+4] * s[1] * ((int16_t)(q[l+16] & 0x0F) - (hl & 0x04 ? 0 : 16));
  1314. acc[1] += yh[l+0] * s[2] * ((int16_t)(q[l+ 0] & 0xF0) - (hl & 0x10 ? 0 : 256))
  1315. + yh[l+4] * s[3] * ((int16_t)(q[l+16] & 0xF0) - (hl & 0x40 ? 0 : 256));
  1316. }
  1317. sumf[row] += d * (acc[0] + 1.f/16.f * acc[1]);
  1318. q += step;
  1319. h += step;
  1320. s += step;
  1321. dh += step/2;
  1322. }
  1323. y += 8 * QK_K;
  1324. }
  1325. #endif
  1326. for (int row = 0; row < 2; ++row) {
  1327. const float tot = simd_sum(sumf[row]);
  1328. if (tiisg == 0) {
  1329. dst[r1*ne0 + r2*ne0*ne1 + first_row + row] = tot;
  1330. }
  1331. }
  1332. }
  1333. kernel void kernel_mul_mat_q6_K_f32(
  1334. device const void * src0,
  1335. device const float * src1,
  1336. device float * dst,
  1337. constant int64_t & ne00,
  1338. constant int64_t & ne01[[buffer(4)]],
  1339. constant int64_t & ne02[[buffer(5)]],
  1340. constant int64_t & ne10[[buffer(9)]],
  1341. constant int64_t & ne12[[buffer(11)]],
  1342. constant int64_t & ne0[[buffer(15)]],
  1343. constant int64_t & ne1[[buffer(16)]],
  1344. constant uint & gqa[[buffer(17)]],
  1345. uint3 tgpig[[threadgroup_position_in_grid]],
  1346. uint tiisg[[thread_index_in_simdgroup]],
  1347. uint sgitg[[simdgroup_index_in_threadgroup]]) {
  1348. const uint8_t kmask1 = 0x03;
  1349. const uint8_t kmask2 = 0x0C;
  1350. const uint8_t kmask3 = 0x30;
  1351. const uint8_t kmask4 = 0xC0;
  1352. const int nb = ne00/QK_K;
  1353. const int64_t r0 = tgpig.x;
  1354. const int64_t r1 = tgpig.y;
  1355. const int r2 = tgpig.z;
  1356. const int row = 2 * r0 + sgitg;
  1357. const uint offset0 = r2/gqa*(nb*ne0);
  1358. device const block_q6_K * x = (device const block_q6_K *) src0 + row * nb + offset0;
  1359. device const float * yy = (device const float *) src1 + r1*ne10 + r2*ne00*ne1;
  1360. float sumf = 0;
  1361. #if QK_K == 256
  1362. const int tid = tiisg/2;
  1363. const int ix = tiisg%2;
  1364. const int ip = tid/8; // 0 or 1
  1365. const int il = tid%8;
  1366. const int n = 4;
  1367. const int l0 = n*il;
  1368. const int is = 8*ip + l0/16;
  1369. const int y_offset = 128*ip + l0;
  1370. const int q_offset_l = 64*ip + l0;
  1371. const int q_offset_h = 32*ip + l0;
  1372. for (int i = ix; i < nb; i += 2) {
  1373. device const uint8_t * q1 = x[i].ql + q_offset_l;
  1374. device const uint8_t * q2 = q1 + 32;
  1375. device const uint8_t * qh = x[i].qh + q_offset_h;
  1376. device const int8_t * sc = x[i].scales + is;
  1377. device const float * y = yy + i * QK_K + y_offset;
  1378. const float dall = x[i].d;
  1379. float4 sums = {0.f, 0.f, 0.f, 0.f};
  1380. for (int l = 0; l < n; ++l) {
  1381. sums[0] += y[l+ 0] * ((int8_t)((q1[l] & 0xF) | ((qh[l] & kmask1) << 4)) - 32);
  1382. sums[1] += y[l+32] * ((int8_t)((q2[l] & 0xF) | ((qh[l] & kmask2) << 2)) - 32);
  1383. sums[2] += y[l+64] * ((int8_t)((q1[l] >> 4) | ((qh[l] & kmask3) << 0)) - 32);
  1384. sums[3] += y[l+96] * ((int8_t)((q2[l] >> 4) | ((qh[l] & kmask4) >> 2)) - 32);
  1385. }
  1386. sumf += dall * (sums[0] * sc[0] + sums[1] * sc[2] + sums[2] * sc[4] + sums[3] * sc[6]);
  1387. }
  1388. #else
  1389. const int ix = tiisg/4;
  1390. const int il = 4*(tiisg%4);
  1391. for (int i = ix; i < nb; i += 8) {
  1392. device const float * y = yy + i * QK_K + il;
  1393. device const uint8_t * ql = x[i].ql + il;
  1394. device const uint8_t * qh = x[i].qh + il;
  1395. device const int8_t * s = x[i].scales;
  1396. const float d = x[i].d;
  1397. float4 sums = {0.f, 0.f, 0.f, 0.f};
  1398. for (int l = 0; l < 4; ++l) {
  1399. sums[0] += y[l+ 0] * ((int8_t)((ql[l+ 0] & 0xF) | ((qh[l] & kmask1) << 4)) - 32);
  1400. sums[1] += y[l+16] * ((int8_t)((ql[l+16] & 0xF) | ((qh[l] & kmask2) << 2)) - 32);
  1401. sums[2] += y[l+32] * ((int8_t)((ql[l+ 0] >> 4) | ((qh[l] & kmask3) >> 0)) - 32);
  1402. sums[3] += y[l+48] * ((int8_t)((ql[l+16] >> 4) | ((qh[l] & kmask4) >> 2)) - 32);
  1403. }
  1404. sumf += d * (sums[0] * s[0] + sums[1] * s[1] + sums[2] * s[2] + sums[3] * s[3]);
  1405. }
  1406. #endif
  1407. const float tot = simd_sum(sumf);
  1408. if (tiisg == 0) {
  1409. dst[r1*ne0 + r2*ne0*ne1 + row] = tot;
  1410. }
  1411. }
  1412. //============================= templates and their specializations =============================
  1413. template <typename type4x4>
  1414. void dequantize_f16(device const half4x4 * src, short il, thread type4x4 & reg) {
  1415. half4x4 temp = *(((device half4x4 *)src));
  1416. for (int i = 0; i < 16; i++){
  1417. reg[i/4][i%4] = temp[i/4][i%4];
  1418. }
  1419. }
  1420. template <typename type4x4>
  1421. void dequantize_q4_0(device const block_q4_0 *xb, short il, thread type4x4 & reg) {
  1422. device const uint16_t * qs = ((device const uint16_t *)xb + 1);
  1423. const half d = il ? (xb->d / 16.h) : xb->d;
  1424. const half m = il ? ( -8.h * 16.h) : -8.h;
  1425. const ushort mask0 = il ? 0x00F0 : 0x000F;
  1426. const ushort mask1 = il ? 0xF000 : 0x0F00;
  1427. for (int i=0;i<8;i++) {
  1428. reg[i/2][2*(i%2)] = (((qs[i] & mask0) ) + m) * d;
  1429. reg[i/2][2*(i%2)+1] = (((qs[i] & mask1) >> 8) + m) * d;
  1430. }
  1431. }
  1432. template <typename type4x4>
  1433. void dequantize_q4_1(device const block_q4_1 *xb, short il, thread type4x4 & reg) {
  1434. device const uint16_t * qs = ((device const uint16_t *)xb + 2);
  1435. const half d = il ? (xb->d / 16.h) : xb->d;
  1436. const half m = xb->m;
  1437. const ushort mask0 = il ? 0x00F0 : 0x000F;
  1438. const ushort mask1 = il ? 0xF000 : 0x0F00;
  1439. for (int i=0;i<8;i++) {
  1440. reg[i/2][2*(i%2)] = (((qs[i] & mask0) ) * d) + m;
  1441. reg[i/2][2*(i%2)+1] = (((qs[i] & mask1) >> 8) * d) + m;
  1442. }
  1443. }
  1444. template <typename type4x4>
  1445. void dequantize_q8_0(device const block_q8_0 *xb, short il, thread type4x4 & reg) {
  1446. device const int8_t * qs = ((device const int8_t *)xb->qs);
  1447. const half d = xb->d;
  1448. for (int i=0;i<16;i++) {
  1449. reg[i/4][i%4] = (qs[i + 16*il] * d);
  1450. }
  1451. }
  1452. template <typename type4x4>
  1453. void dequantize_q2_K(device const block_q2_K *xb, short il, thread type4x4 & reg) {
  1454. const half d = xb->d;
  1455. const half min = xb->dmin;
  1456. device const uint8_t * q = (device const uint8_t *)xb->qs;
  1457. half dl, ml;
  1458. uint8_t sc = xb->scales[il];
  1459. #if QK_K == 256
  1460. q = q + 32*(il/8) + 16*(il&1);
  1461. il = (il/2)%4;
  1462. #endif
  1463. half coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h);
  1464. uchar mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3);
  1465. dl = d * (sc & 0xF) * coef, ml = min * (sc >> 4);
  1466. for (int i = 0; i < 16; ++i) {
  1467. reg[i/4][i%4] = dl * (q[i] & mask) - ml;
  1468. }
  1469. }
  1470. template <typename type4x4>
  1471. void dequantize_q3_K(device const block_q3_K *xb, short il, thread type4x4 & reg) {
  1472. const float d_all = (float)(xb->d);
  1473. device const uint8_t * q = (device const uint8_t *)xb->qs;
  1474. device const uint8_t * h = (device const uint8_t *)xb->hmask;
  1475. device const int8_t * scales = (device const int8_t *)xb->scales;
  1476. #if QK_K == 256
  1477. q = q + 32 * (il/8) + 16 * (il&1);
  1478. h = h + 16 * (il&1);
  1479. uint8_t m = 1 << (il/2);
  1480. uint16_t kmask1 = (il/4)>1 ? ((il/4)>2 ? 192 : 48) : \
  1481. ((il/4)>0 ? 12 : 3);
  1482. uint16_t kmask2 = il/8 ? 0xF0 : 0x0F;
  1483. uint16_t scale_2 = scales[il%8], scale_1 = scales[8 + il%4];
  1484. int16_t dl_int = (il/4)&1 ? (scale_2&kmask2) | ((scale_1&kmask1) << 2) : \
  1485. (scale_2&kmask2) | ((scale_1&kmask1) << 4);
  1486. float dl = il<8 ? d_all * (dl_int - 32.f) : d_all * (dl_int / 16.f - 32.f);
  1487. il = (il/2)%4;
  1488. float coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h);
  1489. uint8_t mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3);
  1490. for (int i = 0; i < 16; ++i) {
  1491. reg[i/4][i%4] = coef * dl * ((q[i] & mask) - ((h[i] & m) ? 0 : 4.f/coef));
  1492. }
  1493. #else
  1494. float kcoef = il&1 ? 1.f/16.f : 1.f;
  1495. uint16_t kmask = il&1 ? 0xF0 : 0x0F;
  1496. float dl = d_all * ((scales[il/2] & kmask) * kcoef - 8);
  1497. float coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h);
  1498. uint8_t mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3);
  1499. uint8_t m = 1<<(il*2);
  1500. for (int i = 0; i < 16; ++i) {
  1501. reg[i/4][i%4] = coef * dl * ((q[i] & mask) - ((h[i%8] & (m * (1 + i/8))) ? 0 : 4.f/coef));
  1502. }
  1503. #endif
  1504. }
  1505. template <typename type4x4>
  1506. void dequantize_q4_K(device const block_q4_K *xb, short il, thread type4x4 & reg) {
  1507. device const uint8_t * q = xb->qs;
  1508. #if QK_K == 256
  1509. const float d = (float)(xb->d);
  1510. const float min = (float)(xb->dmin);
  1511. short is = (il/4) * 2;
  1512. q = q + (il/4) * 32 + 16 * (il&1);
  1513. il = il%4;
  1514. const uchar4 sc = get_scale_min_k4(is, xb->scales);
  1515. const float dl = il<2 ? d * sc[0] : d * sc[2]/16.h;
  1516. const float ml = il<2 ? min * sc[1] : min * sc[3];
  1517. #else
  1518. q = q + 16 * (il&1);
  1519. device const uint8_t * s = xb->scales;
  1520. device const half2 * dh = (device const half2 *)xb->d;
  1521. const float2 d = (float2)dh[0];
  1522. const float dl = il<2 ? d[0] * (s[0]&0xF) : d[0] * (s[1]&0xF)/16.h;
  1523. const float ml = il<2 ? d[1] * (s[0]>>4) : d[1 ]* (s[1]>>4);
  1524. #endif
  1525. const ushort mask = il<2 ? 0x0F : 0xF0;
  1526. for (int i = 0; i < 16; ++i) {
  1527. reg[i/4][i%4] = dl * (q[i] & mask) - ml;
  1528. }
  1529. }
  1530. template <typename type4x4>
  1531. void dequantize_q5_K(device const block_q5_K *xb, short il, thread type4x4 & reg) {
  1532. device const uint8_t * q = xb->qs;
  1533. device const uint8_t * qh = xb->qh;
  1534. #if QK_K == 256
  1535. const float d = (float)(xb->d);
  1536. const float min = (float)(xb->dmin);
  1537. short is = (il/4) * 2;
  1538. q = q + 32 * (il/4) + 16 * (il&1);
  1539. qh = qh + 16 * (il&1);
  1540. uint8_t ul = 1 << (il/2);
  1541. il = il%4;
  1542. const uchar4 sc = get_scale_min_k4(is, xb->scales);
  1543. const float dl = il<2 ? d * sc[0] : d * sc[2]/16.h;
  1544. const float ml = il<2 ? min * sc[1] : min * sc[3];
  1545. const ushort mask = il<2 ? 0x0F : 0xF0;
  1546. const float qh_val = il<2 ? 16.f : 256.f;
  1547. for (int i = 0; i < 16; ++i) {
  1548. reg[i/4][i%4] = dl * ((q[i] & mask) + (qh[i] & ul ? qh_val : 0)) - ml;
  1549. }
  1550. #else
  1551. q = q + 16 * (il&1);
  1552. device const int8_t * s = xb->scales;
  1553. const float dl = xb->d * s[il];
  1554. uint8_t m = 1<<(il*2);
  1555. const float coef = il<2 ? 1.f : 1.f/16.f;
  1556. const ushort mask = il<2 ? 0x0F : 0xF0;
  1557. for (int i = 0; i < 16; ++i) {
  1558. reg[i/4][i%4] = coef * dl * ((q[i] & mask) - (qh[i%8] & (m*(1+i/8)) ? 0.f : 16.f/coef));
  1559. }
  1560. #endif
  1561. }
  1562. template <typename type4x4>
  1563. void dequantize_q6_K(device const block_q6_K *xb, short il, thread type4x4 & reg) {
  1564. const float d_all = (float)(xb->d);
  1565. device const uint8_t * ql = (device const uint8_t *)xb->ql;
  1566. device const uint8_t * qh = (device const uint8_t *)xb->qh;
  1567. device const int8_t * scales = (device const int8_t *)xb->scales;
  1568. #if QK_K == 256
  1569. ql = ql + 64*(il/8) + 32*((il/2)&1) + 16*(il&1);
  1570. qh = qh + 32*(il/8) + 16*(il&1);
  1571. float sc = scales[(il%2) + 2 * ((il/2))];
  1572. il = (il/2)%4;
  1573. #else
  1574. ql = ql + 16 * (il&1);
  1575. float sc = scales[il];
  1576. #endif
  1577. for (int i = 0; i < 16; ++i) {
  1578. uint16_t kmask1 = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3);
  1579. uint16_t kmask2 = il>1 ? 0xF0 : 0x0F;
  1580. const float coef = il>1 ? 1.f/16.f : 1.f;
  1581. float q = il&1 ? ((ql[i]&kmask2)|((qh[i]&kmask1)<<2)) - 32.f/coef : \
  1582. ((ql[i]&kmask2)|((qh[i]&kmask1)<<4)) - 32.f/coef;
  1583. reg[i/4][i%4] = d_all * sc * q * coef;
  1584. }
  1585. }
  1586. template<typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread float4x4 &)>
  1587. kernel void kernel_get_rows(
  1588. device const void * src0,
  1589. device const int * src1,
  1590. device float * dst,
  1591. constant int64_t & ne00,
  1592. constant uint64_t & nb01,
  1593. constant uint64_t & nb1,
  1594. uint tgpig[[threadgroup_position_in_grid]],
  1595. uint tiitg[[thread_index_in_threadgroup]],
  1596. uint tptg[[threads_per_threadgroup]]) {
  1597. const int i = tgpig;
  1598. const int r = ((device int32_t *) src1)[i];
  1599. for (int ind = tiitg; ind < ne00/16; ind += tptg) {
  1600. float4x4 temp;
  1601. dequantize_func(
  1602. ((device const block_q *) ((device char *) src0 + r*nb01)) + ind/nl, ind%nl, temp);
  1603. *(((device float4x4 *) ((device char *) dst + i*nb1)) + ind) = temp;
  1604. }
  1605. }
  1606. #define BLOCK_SIZE_M 64 // 8 simdgroup matrices from matrix A
  1607. #define BLOCK_SIZE_N 32 // 4 simdgroup matrices from matrix A
  1608. #define BLOCK_SIZE_K 32
  1609. #define THREAD_MAT_M 4 // each thread take 4 simdgroup matrices from matrix A
  1610. #define THREAD_MAT_N 2 // each thread take 2 simdgroup matrices from matrix B
  1611. #define THREAD_PER_BLOCK 128
  1612. #define THREAD_PER_ROW 2 // 2 thread for each row in matrix A to load numbers
  1613. #define THREAD_PER_COL 4 // 4 thread for each row in matrix B to load numbers
  1614. #define SG_MAT_SIZE 64 // simdgroup matrix is of shape 8x8
  1615. #define SG_MAT_ROW 8
  1616. // each block_q contains 16*nl weights
  1617. template<typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread half4x4 &)>
  1618. kernel void kernel_mul_mm(device const uchar * src0,
  1619. device const float * src1,
  1620. device float * dst,
  1621. constant int64_t & ne00,
  1622. constant int64_t & ne02,
  1623. constant int64_t & nb01,
  1624. constant int64_t & nb02,
  1625. constant int64_t & ne12,
  1626. constant int64_t & ne0,
  1627. constant int64_t & ne1,
  1628. constant uint & gqa,
  1629. threadgroup uchar * shared_memory [[threadgroup(0)]],
  1630. uint3 tgpig[[threadgroup_position_in_grid]],
  1631. uint tiitg[[thread_index_in_threadgroup]],
  1632. uint sgitg[[simdgroup_index_in_threadgroup]]) {
  1633. threadgroup half * sa = ((threadgroup half *)shared_memory);
  1634. threadgroup float * sb = (threadgroup float *)(shared_memory + 4096);
  1635. const uint r0 = tgpig.y;
  1636. const uint r1 = tgpig.x;
  1637. const uint im = tgpig.z;
  1638. // if this block is of 64x32 shape or smaller
  1639. short n_rows = (ne0 - r0 * BLOCK_SIZE_M < BLOCK_SIZE_M) ? (ne0 - r0 * BLOCK_SIZE_M) : BLOCK_SIZE_M;
  1640. short n_cols = (ne1 - r1 * BLOCK_SIZE_N < BLOCK_SIZE_N) ? (ne1 - r1 * BLOCK_SIZE_N) : BLOCK_SIZE_N;
  1641. // a thread shouldn't load data outside of the matrix
  1642. short thread_row = ((short)tiitg/THREAD_PER_ROW) < n_rows ? ((short)tiitg/THREAD_PER_ROW) : n_rows - 1;
  1643. short thread_col = ((short)tiitg/THREAD_PER_COL) < n_cols ? ((short)tiitg/THREAD_PER_COL) : n_cols - 1;
  1644. simdgroup_half8x8 ma[4];
  1645. simdgroup_float8x8 mb[2];
  1646. simdgroup_float8x8 c_res[8];
  1647. for (int i = 0; i < 8; i++){
  1648. c_res[i] = make_filled_simdgroup_matrix<float, 8>(0.f);
  1649. }
  1650. short il = (tiitg % THREAD_PER_ROW);
  1651. uint offset0 = im/gqa*nb02; ushort offset1 = il/nl;
  1652. device const block_q * x = (device const block_q *)(src0 + (r0 * BLOCK_SIZE_M + thread_row) * nb01 + offset0) + offset1;
  1653. device const float * y = src1 + (r1 * BLOCK_SIZE_N + thread_col) * ne00 \
  1654. + BLOCK_SIZE_K / THREAD_PER_COL * (tiitg % THREAD_PER_COL) + im * ne00 * ne1;
  1655. for (int loop_k = 0; loop_k < ne00; loop_k += BLOCK_SIZE_K) {
  1656. //load data and store to threadgroup memory
  1657. half4x4 temp_a;
  1658. dequantize_func(x, il, temp_a);
  1659. threadgroup_barrier(mem_flags::mem_threadgroup);
  1660. #pragma unroll(16)
  1661. for (int i = 0; i < 16; i++) {
  1662. *(sa + SG_MAT_SIZE * ((tiitg / THREAD_PER_ROW / 8) \
  1663. + 16 * (tiitg % THREAD_PER_ROW) + 8 * (i / 8)) \
  1664. + (tiitg / THREAD_PER_ROW) % 8 + (i & 7) * 8) = temp_a[i/4][i%4];
  1665. }
  1666. *(threadgroup float2x4 *)(sb + (tiitg % THREAD_PER_COL) * 8 * 32 + 8 * (tiitg / THREAD_PER_COL)) \
  1667. = *((device float2x4 *)y);
  1668. il = (il + 2 < nl) ? il + 2 : il % 2;
  1669. x = (il < 2) ? x + (2+nl-1)/nl : x;
  1670. y += BLOCK_SIZE_K;
  1671. threadgroup_barrier(mem_flags::mem_threadgroup);
  1672. //load matrices from threadgroup memory and conduct outer products
  1673. threadgroup half * lsma = (sa + THREAD_MAT_M * SG_MAT_SIZE * (sgitg % 2));
  1674. threadgroup float * lsmb = (sb + THREAD_MAT_N * SG_MAT_SIZE * (sgitg / 2));
  1675. #pragma unroll(4)
  1676. for (int ik = 0; ik < BLOCK_SIZE_K / 8; ik++) {
  1677. #pragma unroll(4)
  1678. for (int i = 0; i < 4; i++) {
  1679. simdgroup_load(ma[i],lsma + SG_MAT_SIZE * i);
  1680. }
  1681. simdgroup_barrier(mem_flags::mem_none);
  1682. #pragma unroll(2)
  1683. for (int i = 0; i < 2; i++) {
  1684. simdgroup_load(mb[i],lsmb + SG_MAT_SIZE * i);
  1685. }
  1686. lsma += BLOCK_SIZE_M / SG_MAT_ROW * SG_MAT_SIZE;
  1687. lsmb += BLOCK_SIZE_N / SG_MAT_ROW * SG_MAT_SIZE;
  1688. #pragma unroll(8)
  1689. for (int i = 0; i < 8; i++){
  1690. simdgroup_multiply_accumulate(c_res[i], mb[i/4], ma[i%4], c_res[i]);
  1691. }
  1692. }
  1693. }
  1694. if ((r0 + 1) * BLOCK_SIZE_M <= ne0 && (r1 + 1) * BLOCK_SIZE_N <= ne1) {
  1695. device float *C = dst + BLOCK_SIZE_M * r0 + 32 * (sgitg&1) \
  1696. + (BLOCK_SIZE_N * r1 + 16 * (sgitg>>1)) * ne0 + im*ne1*ne0;
  1697. for (int i = 0; i < 8; i++) {
  1698. simdgroup_store(c_res[i], C + 8 * (i%4) + 8 * ne0 * (i/4), ne0);
  1699. }
  1700. } else {
  1701. // block is smaller than 64x32, we should avoid writing data outside of the matrix
  1702. threadgroup_barrier(mem_flags::mem_threadgroup);
  1703. threadgroup float *temp_str = ((threadgroup float *)shared_memory) \
  1704. + 32 * (sgitg&1) + (16 * (sgitg>>1)) * BLOCK_SIZE_M;
  1705. for (int i = 0; i < 8; i++) {
  1706. simdgroup_store(c_res[i], temp_str + 8 * (i%4) + 8 * BLOCK_SIZE_M * (i/4), BLOCK_SIZE_M);
  1707. }
  1708. threadgroup_barrier(mem_flags::mem_threadgroup);
  1709. device float *C = dst + BLOCK_SIZE_M * r0 + (BLOCK_SIZE_N * r1) * ne0 + im*ne1*ne0;
  1710. if (sgitg==0) {
  1711. for (int i = 0; i < n_rows; i++) {
  1712. for (int j = tiitg; j< n_cols; j += BLOCK_SIZE_N) {
  1713. *(C + i + j * ne0) = *(temp_str + i + j * BLOCK_SIZE_M);
  1714. }
  1715. }
  1716. }
  1717. }
  1718. }
  1719. #if QK_K == 256
  1720. #define QK_NL 16
  1721. #else
  1722. #define QK_NL 4
  1723. #endif
  1724. typedef void (get_rows_t)(device const void *, device const int *, device float *, constant int64_t &, \
  1725. constant uint64_t &, constant uint64_t &, uint, uint, uint);
  1726. template [[host_name("kernel_get_rows_f16")]] kernel get_rows_t kernel_get_rows<half4x4, 1, dequantize_f16>;
  1727. template [[host_name("kernel_get_rows_q4_0")]] kernel get_rows_t kernel_get_rows<block_q4_0, 2, dequantize_q4_0>;
  1728. template [[host_name("kernel_get_rows_q4_1")]] kernel get_rows_t kernel_get_rows<block_q4_1, 2, dequantize_q4_1>;
  1729. template [[host_name("kernel_get_rows_q8_0")]] kernel get_rows_t kernel_get_rows<block_q8_0, 2, dequantize_q8_0>;
  1730. template [[host_name("kernel_get_rows_q2_K")]] kernel get_rows_t kernel_get_rows<block_q2_K, QK_NL, dequantize_q2_K>;
  1731. template [[host_name("kernel_get_rows_q3_K")]] kernel get_rows_t kernel_get_rows<block_q3_K, QK_NL, dequantize_q3_K>;
  1732. template [[host_name("kernel_get_rows_q4_K")]] kernel get_rows_t kernel_get_rows<block_q4_K, QK_NL, dequantize_q4_K>;
  1733. template [[host_name("kernel_get_rows_q5_K")]] kernel get_rows_t kernel_get_rows<block_q5_K, QK_NL, dequantize_q5_K>;
  1734. template [[host_name("kernel_get_rows_q6_K")]] kernel get_rows_t kernel_get_rows<block_q6_K, QK_NL, dequantize_q6_K>;
  1735. typedef void (mat_mm_t)(device const uchar *, device const float *, device float *, constant int64_t &,\
  1736. constant int64_t &, constant int64_t &, constant int64_t &, constant int64_t &, \
  1737. constant int64_t &, constant int64_t &, constant uint &, threadgroup uchar *, uint3, uint, uint);
  1738. template [[host_name("kernel_mul_mm_f16_f32")]] kernel mat_mm_t kernel_mul_mm<half4x4, 1, dequantize_f16>;
  1739. template [[host_name("kernel_mul_mm_q4_0_f32")]] kernel mat_mm_t kernel_mul_mm<block_q4_0, 2, dequantize_q4_0>;
  1740. template [[host_name("kernel_mul_mm_q4_1_f32")]] kernel mat_mm_t kernel_mul_mm<block_q4_1, 2, dequantize_q4_1>;
  1741. template [[host_name("kernel_mul_mm_q8_0_f32")]] kernel mat_mm_t kernel_mul_mm<block_q8_0, 2, dequantize_q8_0>;
  1742. template [[host_name("kernel_mul_mm_q2_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q2_K, QK_NL, dequantize_q2_K>;
  1743. template [[host_name("kernel_mul_mm_q3_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q3_K, QK_NL, dequantize_q3_K>;
  1744. template [[host_name("kernel_mul_mm_q4_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q4_K, QK_NL, dequantize_q4_K>;
  1745. template [[host_name("kernel_mul_mm_q5_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q5_K, QK_NL, dequantize_q5_K>;
  1746. template [[host_name("kernel_mul_mm_q6_K_f32")]] kernel mat_mm_t kernel_mul_mm<block_q6_K, QK_NL, dequantize_q6_K>;