ggml-metal.m 169 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999
  1. #import "ggml-metal.h"
  2. #import "ggml-backend-impl.h"
  3. #import "ggml.h"
  4. #import <Foundation/Foundation.h>
  5. #import <Metal/Metal.h>
  6. #undef MIN
  7. #undef MAX
  8. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  9. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  10. #ifdef GGML_METAL_NDEBUG
  11. #define GGML_METAL_LOG_INFO(...)
  12. #define GGML_METAL_LOG_WARN(...)
  13. #define GGML_METAL_LOG_ERROR(...)
  14. #else
  15. #define GGML_METAL_LOG_INFO(...) ggml_metal_log(GGML_LOG_LEVEL_INFO, __VA_ARGS__)
  16. #define GGML_METAL_LOG_WARN(...) ggml_metal_log(GGML_LOG_LEVEL_WARN, __VA_ARGS__)
  17. #define GGML_METAL_LOG_ERROR(...) ggml_metal_log(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
  18. #endif
  19. #define UNUSED(x) (void)(x)
  20. struct ggml_metal_kernel {
  21. id<MTLComputePipelineState> pipeline;
  22. };
  23. enum ggml_metal_kernel_type {
  24. GGML_METAL_KERNEL_TYPE_ADD,
  25. GGML_METAL_KERNEL_TYPE_ADD_ROW,
  26. GGML_METAL_KERNEL_TYPE_MUL,
  27. GGML_METAL_KERNEL_TYPE_MUL_ROW,
  28. GGML_METAL_KERNEL_TYPE_DIV,
  29. GGML_METAL_KERNEL_TYPE_DIV_ROW,
  30. GGML_METAL_KERNEL_TYPE_SCALE,
  31. GGML_METAL_KERNEL_TYPE_SCALE_4,
  32. GGML_METAL_KERNEL_TYPE_CLAMP,
  33. GGML_METAL_KERNEL_TYPE_TANH,
  34. GGML_METAL_KERNEL_TYPE_RELU,
  35. GGML_METAL_KERNEL_TYPE_GELU,
  36. GGML_METAL_KERNEL_TYPE_GELU_4,
  37. GGML_METAL_KERNEL_TYPE_GELU_QUICK,
  38. GGML_METAL_KERNEL_TYPE_GELU_QUICK_4,
  39. GGML_METAL_KERNEL_TYPE_SILU,
  40. GGML_METAL_KERNEL_TYPE_SILU_4,
  41. GGML_METAL_KERNEL_TYPE_SOFT_MAX,
  42. GGML_METAL_KERNEL_TYPE_SOFT_MAX_4,
  43. GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF,
  44. GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8,
  45. GGML_METAL_KERNEL_TYPE_GET_ROWS_F32,
  46. GGML_METAL_KERNEL_TYPE_GET_ROWS_F16,
  47. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_0,
  48. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_1,
  49. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0,
  50. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1,
  51. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0,
  52. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K,
  53. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K,
  54. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K,
  55. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_K,
  56. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q6_K,
  57. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XXS,
  58. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XS,
  59. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_XXS,
  60. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_S,
  61. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_S,
  62. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_S,
  63. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_M,
  64. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL,
  65. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS,
  66. GGML_METAL_KERNEL_TYPE_GET_ROWS_I32,
  67. GGML_METAL_KERNEL_TYPE_RMS_NORM,
  68. GGML_METAL_KERNEL_TYPE_GROUP_NORM,
  69. GGML_METAL_KERNEL_TYPE_NORM,
  70. GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32,
  71. GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16,
  72. GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32,
  73. GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW,
  74. GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4,
  75. GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32,
  76. GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32,
  77. GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32,
  78. GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32,
  79. GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32,
  80. GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32,
  81. GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32,
  82. GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32,
  83. GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32,
  84. GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32,
  85. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32,
  86. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32,
  87. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32,
  88. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_S_F32,
  89. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_S_F32,
  90. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32,
  91. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_M_F32,
  92. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32,
  93. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32,
  94. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F32_F32,
  95. //GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F16,
  96. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32,
  97. //GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_1ROW,
  98. //GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_L4,
  99. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_0_F32,
  100. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_1_F32,
  101. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32,
  102. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32,
  103. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32,
  104. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32,
  105. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32,
  106. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32,
  107. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_K_F32,
  108. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q6_K_F32,
  109. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XXS_F32,
  110. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XS_F32,
  111. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_XXS_F32,
  112. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_S_F32,
  113. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_S_F32,
  114. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_S_F32,
  115. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_M_F32,
  116. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32,
  117. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32,
  118. GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32,
  119. GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32,
  120. GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32,
  121. GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32,
  122. GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32,
  123. GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32,
  124. GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32,
  125. GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32,
  126. GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32,
  127. GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32,
  128. GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32,
  129. GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32,
  130. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32,
  131. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32,
  132. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32,
  133. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_S_F32,
  134. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_S_F32,
  135. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32,
  136. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32,
  137. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32,
  138. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32,
  139. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F32,
  140. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F16_F32,
  141. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_0_F32,
  142. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_1_F32,
  143. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F32,
  144. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F32,
  145. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F32,
  146. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F32,
  147. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F32,
  148. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F32,
  149. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_K_F32,
  150. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q6_K_F32,
  151. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XXS_F32,
  152. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XS_F32,
  153. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_XXS_F32,
  154. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_S_F32,
  155. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_S_F32,
  156. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_S_F32,
  157. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F32,
  158. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32,
  159. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32,
  160. GGML_METAL_KERNEL_TYPE_ROPE_F32,
  161. GGML_METAL_KERNEL_TYPE_ROPE_F16,
  162. GGML_METAL_KERNEL_TYPE_ALIBI_F32,
  163. GGML_METAL_KERNEL_TYPE_IM2COL_F16,
  164. GGML_METAL_KERNEL_TYPE_IM2COL_F32,
  165. GGML_METAL_KERNEL_TYPE_UPSCALE_F32,
  166. GGML_METAL_KERNEL_TYPE_PAD_F32,
  167. GGML_METAL_KERNEL_TYPE_ARANGE_F32,
  168. GGML_METAL_KERNEL_TYPE_TIMESTEP_EMBEDDING_F32,
  169. GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC,
  170. GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC,
  171. GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32,
  172. GGML_METAL_KERNEL_TYPE_CPY_F32_F16,
  173. GGML_METAL_KERNEL_TYPE_CPY_F32_F32,
  174. GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0,
  175. GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_0,
  176. GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_1,
  177. GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_0,
  178. GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_1,
  179. GGML_METAL_KERNEL_TYPE_CPY_F32_IQ4_NL,
  180. GGML_METAL_KERNEL_TYPE_CPY_F16_F16,
  181. GGML_METAL_KERNEL_TYPE_CPY_F16_F32,
  182. GGML_METAL_KERNEL_TYPE_CONCAT,
  183. GGML_METAL_KERNEL_TYPE_SQR,
  184. GGML_METAL_KERNEL_TYPE_SUM_ROWS,
  185. GGML_METAL_KERNEL_TYPE_COUNT
  186. };
  187. struct ggml_metal_context {
  188. int n_cb;
  189. id<MTLDevice> device;
  190. id<MTLCommandQueue> queue;
  191. dispatch_queue_t d_queue;
  192. struct ggml_metal_kernel kernels[GGML_METAL_KERNEL_TYPE_COUNT];
  193. bool support_simdgroup_reduction;
  194. bool support_simdgroup_mm;
  195. bool should_capture_next_compute;
  196. };
  197. // MSL code
  198. // TODO: move the contents here when ready
  199. // for now it is easier to work in a separate file
  200. // static NSString * const msl_library_source = @"see metal.metal";
  201. // Here to assist with NSBundle Path Hack
  202. @interface GGMLMetalClass : NSObject
  203. @end
  204. @implementation GGMLMetalClass
  205. @end
  206. static void ggml_metal_default_log_callback(enum ggml_log_level level, const char * msg, void * user_data) {
  207. fprintf(stderr, "%s", msg);
  208. UNUSED(level);
  209. UNUSED(user_data);
  210. }
  211. ggml_log_callback ggml_metal_log_callback = ggml_metal_default_log_callback;
  212. void * ggml_metal_log_user_data = NULL;
  213. GGML_ATTRIBUTE_FORMAT(2, 3)
  214. static void ggml_metal_log(enum ggml_log_level level, const char * format, ...){
  215. if (ggml_metal_log_callback != NULL) {
  216. va_list args;
  217. va_start(args, format);
  218. char buffer[128];
  219. int len = vsnprintf(buffer, 128, format, args);
  220. if (len < 128) {
  221. ggml_metal_log_callback(level, buffer, ggml_metal_log_user_data);
  222. } else {
  223. char* buffer2 = malloc(len+1);
  224. va_end(args);
  225. va_start(args, format);
  226. vsnprintf(buffer2, len+1, format, args);
  227. buffer2[len] = 0;
  228. ggml_metal_log_callback(level, buffer2, ggml_metal_log_user_data);
  229. free(buffer2);
  230. }
  231. va_end(args);
  232. }
  233. }
  234. static void * ggml_metal_host_malloc(size_t n) {
  235. void * data = NULL;
  236. const int result = posix_memalign((void **) &data, sysconf(_SC_PAGESIZE), n);
  237. if (result != 0) {
  238. GGML_METAL_LOG_ERROR("%s: error: posix_memalign failed\n", __func__);
  239. return NULL;
  240. }
  241. return data;
  242. }
  243. static struct ggml_metal_context * ggml_metal_init(int n_cb) {
  244. GGML_METAL_LOG_INFO("%s: allocating\n", __func__);
  245. #if TARGET_OS_OSX && !GGML_METAL_NDEBUG
  246. // Show all the Metal device instances in the system
  247. NSArray * devices = MTLCopyAllDevices();
  248. for (id<MTLDevice> device in devices) {
  249. GGML_METAL_LOG_INFO("%s: found device: %s\n", __func__, [[device name] UTF8String]);
  250. }
  251. [devices release]; // since it was created by a *Copy* C method
  252. #endif
  253. // Pick and show default Metal device
  254. id<MTLDevice> device = MTLCreateSystemDefaultDevice();
  255. GGML_METAL_LOG_INFO("%s: picking default device: %s\n", __func__, [[device name] UTF8String]);
  256. // Configure context
  257. struct ggml_metal_context * ctx = malloc(sizeof(struct ggml_metal_context));
  258. ctx->device = device;
  259. ctx->n_cb = MIN(n_cb, GGML_METAL_MAX_BUFFERS);
  260. ctx->queue = [ctx->device newCommandQueue];
  261. ctx->d_queue = dispatch_queue_create("ggml-metal", DISPATCH_QUEUE_CONCURRENT);
  262. id<MTLLibrary> metal_library;
  263. // load library
  264. //
  265. // - first check if the library is embedded
  266. // - then check if the library is in the bundle
  267. // - if not found, load the source and compile it
  268. // - if that fails, return NULL
  269. {
  270. NSBundle * bundle = nil;
  271. #ifdef SWIFT_PACKAGE
  272. bundle = SWIFTPM_MODULE_BUNDLE;
  273. #else
  274. bundle = [NSBundle bundleForClass:[GGMLMetalClass class]];
  275. #endif
  276. NSError * error = nil;
  277. #if GGML_METAL_EMBED_LIBRARY
  278. const bool try_metallib = false;
  279. #else
  280. const bool try_metallib = true;
  281. #endif
  282. NSString * path_lib = [bundle pathForResource:@"default" ofType:@"metallib"];
  283. if (try_metallib && path_lib != nil) {
  284. // pre-compiled library found
  285. NSURL * libURL = [NSURL fileURLWithPath:path_lib];
  286. GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [path_lib UTF8String]);
  287. metal_library = [ctx->device newLibraryWithURL:libURL error:&error];
  288. if (error) {
  289. GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
  290. return NULL;
  291. }
  292. } else {
  293. #if GGML_METAL_EMBED_LIBRARY
  294. GGML_METAL_LOG_INFO("%s: using embedded metal library\n", __func__);
  295. extern const char ggml_metallib_start[];
  296. extern const char ggml_metallib_end[];
  297. NSString * src = [[NSString alloc] initWithBytes:ggml_metallib_start length:(ggml_metallib_end-ggml_metallib_start) encoding:NSUTF8StringEncoding];
  298. #else
  299. GGML_METAL_LOG_INFO("%s: default.metallib not found, loading from source\n", __func__);
  300. NSString * path_source;
  301. NSString * path_resource = [[NSProcessInfo processInfo].environment objectForKey:@"GGML_METAL_PATH_RESOURCES"];
  302. GGML_METAL_LOG_INFO("%s: GGML_METAL_PATH_RESOURCES = %s\n", __func__, path_resource ? [path_resource UTF8String] : "nil");
  303. if (path_resource) {
  304. path_source = [path_resource stringByAppendingPathComponent:@"ggml-metal.metal"];
  305. } else {
  306. path_source = [bundle pathForResource:@"ggml-metal" ofType:@"metal"];
  307. }
  308. if (path_source == nil) {
  309. GGML_METAL_LOG_WARN("%s: error: could not use bundle path to find ggml-metal.metal, falling back to trying cwd\n", __func__);
  310. path_source = @"ggml-metal.metal";
  311. }
  312. GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [path_source UTF8String]);
  313. NSString * src = [NSString stringWithContentsOfFile:path_source encoding:NSUTF8StringEncoding error:&error];
  314. if (error) {
  315. GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
  316. return NULL;
  317. }
  318. #endif // GGML_METAL_EMBED_LIBRARY
  319. @autoreleasepool {
  320. // dictionary of preprocessor macros
  321. NSMutableDictionary * prep = [NSMutableDictionary dictionary];
  322. #ifdef GGML_QKK_64
  323. prep[@"GGML_QKK_64"] = @(1);
  324. #endif
  325. MTLCompileOptions* options = [MTLCompileOptions new];
  326. options.preprocessorMacros = prep;
  327. //[options setFastMathEnabled:false];
  328. metal_library = [ctx->device newLibraryWithSource:src options:options error:&error];
  329. if (error) {
  330. GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
  331. return NULL;
  332. }
  333. }
  334. }
  335. }
  336. // print MTL GPU family:
  337. GGML_METAL_LOG_INFO("%s: GPU name: %s\n", __func__, [[ctx->device name] UTF8String]);
  338. const NSInteger MTLGPUFamilyMetal3 = 5001;
  339. // determine max supported GPU family
  340. // https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf
  341. // https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
  342. {
  343. for (int i = MTLGPUFamilyApple1 + 20; i >= MTLGPUFamilyApple1; --i) {
  344. if ([ctx->device supportsFamily:i]) {
  345. GGML_METAL_LOG_INFO("%s: GPU family: MTLGPUFamilyApple%d (%d)\n", __func__, i - (int) MTLGPUFamilyApple1 + 1, i);
  346. break;
  347. }
  348. }
  349. for (int i = MTLGPUFamilyCommon1 + 5; i >= MTLGPUFamilyCommon1; --i) {
  350. if ([ctx->device supportsFamily:i]) {
  351. GGML_METAL_LOG_INFO("%s: GPU family: MTLGPUFamilyCommon%d (%d)\n", __func__, i - (int) MTLGPUFamilyCommon1 + 1, i);
  352. break;
  353. }
  354. }
  355. for (int i = MTLGPUFamilyMetal3 + 5; i >= MTLGPUFamilyMetal3; --i) {
  356. if ([ctx->device supportsFamily:i]) {
  357. GGML_METAL_LOG_INFO("%s: GPU family: MTLGPUFamilyMetal%d (%d)\n", __func__, i - (int) MTLGPUFamilyMetal3 + 3, i);
  358. break;
  359. }
  360. }
  361. }
  362. ctx->support_simdgroup_reduction = [ctx->device supportsFamily:MTLGPUFamilyApple7];
  363. ctx->support_simdgroup_reduction |= [ctx->device supportsFamily:MTLGPUFamilyMetal3];
  364. ctx->support_simdgroup_mm = [ctx->device supportsFamily:MTLGPUFamilyApple7];
  365. GGML_METAL_LOG_INFO("%s: simdgroup reduction support = %s\n", __func__, ctx->support_simdgroup_reduction ? "true" : "false");
  366. GGML_METAL_LOG_INFO("%s: simdgroup matrix mul. support = %s\n", __func__, ctx->support_simdgroup_mm ? "true" : "false");
  367. GGML_METAL_LOG_INFO("%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false");
  368. ctx->should_capture_next_compute = false;
  369. #if TARGET_OS_OSX || (TARGET_OS_IOS && __clang_major__ >= 15)
  370. if (@available(macOS 10.12, iOS 16.0, *)) {
  371. GGML_METAL_LOG_INFO("%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1e6);
  372. }
  373. #elif TARGET_OS_OSX
  374. if (ctx->device.maxTransferRate != 0) {
  375. GGML_METAL_LOG_INFO("%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1e6);
  376. } else {
  377. GGML_METAL_LOG_INFO("%s: maxTransferRate = built-in GPU\n", __func__);
  378. }
  379. #endif
  380. // load kernels
  381. {
  382. NSError * error = nil;
  383. for (int i = 0; i < GGML_METAL_KERNEL_TYPE_COUNT; ++i) {
  384. ctx->kernels[i].pipeline = nil;
  385. }
  386. /*
  387. GGML_METAL_LOG_INFO("%s: loaded %-32s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) kernel->pipeline, \
  388. (int) kernel->pipeline.maxTotalThreadsPerThreadgroup, \
  389. (int) kernel->pipeline.threadExecutionWidth); \
  390. */
  391. #define GGML_METAL_ADD_KERNEL(e, name, supported) \
  392. if (supported) { \
  393. struct ggml_metal_kernel * kernel = &ctx->kernels[e]; \
  394. id<MTLFunction> metal_function = [metal_library newFunctionWithName:@"kernel_"#name]; \
  395. kernel->pipeline = [ctx->device newComputePipelineStateWithFunction:metal_function error:&error]; \
  396. [metal_function release]; \
  397. if (error) { \
  398. GGML_METAL_LOG_ERROR("%s: error: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \
  399. [metal_library release]; \
  400. return NULL; \
  401. } \
  402. } else { \
  403. GGML_METAL_LOG_WARN("%s: skipping %-32s (not supported)\n", __func__, "kernel_"#name); \
  404. }
  405. // simd_sum and simd_max requires MTLGPUFamilyApple7
  406. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD, add, true);
  407. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_ROW, add_row, true);
  408. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL, mul, true);
  409. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_ROW, mul_row, true);
  410. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIV, div, true);
  411. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIV_ROW, div_row, true);
  412. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SCALE, scale, true);
  413. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SCALE_4, scale_4, true);
  414. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CLAMP, clamp, true);
  415. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_TANH, tanh, true);
  416. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RELU, relu, true);
  417. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU, gelu, true);
  418. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_4, gelu_4, true);
  419. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_QUICK, gelu_quick, true);
  420. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_QUICK_4, gelu_quick_4, true);
  421. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SILU, silu, true);
  422. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SILU_4, silu_4, true);
  423. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX, soft_max, ctx->support_simdgroup_reduction);
  424. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_4, soft_max_4, ctx->support_simdgroup_reduction);
  425. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF, diag_mask_inf, true);
  426. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8, diag_mask_inf_8, true);
  427. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_F32, get_rows_f32, true);
  428. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_F16, get_rows_f16, true);
  429. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_0, get_rows_q4_0, true);
  430. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_1, get_rows_q4_1, true);
  431. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0, get_rows_q5_0, true);
  432. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1, get_rows_q5_1, true);
  433. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0, get_rows_q8_0, true);
  434. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K, get_rows_q2_K, true);
  435. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K, get_rows_q3_K, true);
  436. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K, get_rows_q4_K, true);
  437. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_K, get_rows_q5_K, true);
  438. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q6_K, get_rows_q6_K, true);
  439. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XXS, get_rows_iq2_xxs, true);
  440. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XS, get_rows_iq2_xs, true);
  441. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_XXS, get_rows_iq3_xxs, true);
  442. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_S, get_rows_iq3_s, true);
  443. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_S, get_rows_iq2_s, true);
  444. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_S, get_rows_iq1_s, true);
  445. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_M, get_rows_iq1_m, true);
  446. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL, get_rows_iq4_nl, true);
  447. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS, get_rows_iq4_xs, true);
  448. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_I32, get_rows_i32, true);
  449. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RMS_NORM, rms_norm, ctx->support_simdgroup_reduction);
  450. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GROUP_NORM, group_norm, ctx->support_simdgroup_reduction);
  451. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NORM, norm, true);
  452. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32, mul_mv_f32_f32, ctx->support_simdgroup_reduction);
  453. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16, mul_mv_f16_f16, ctx->support_simdgroup_reduction);
  454. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32, mul_mv_f16_f32, ctx->support_simdgroup_reduction);
  455. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW, mul_mv_f16_f32_1row, ctx->support_simdgroup_reduction);
  456. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4, mul_mv_f16_f32_l4, ctx->support_simdgroup_reduction);
  457. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32, mul_mv_q4_0_f32, ctx->support_simdgroup_reduction);
  458. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32, mul_mv_q4_1_f32, ctx->support_simdgroup_reduction);
  459. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32, mul_mv_q5_0_f32, ctx->support_simdgroup_reduction);
  460. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32, mul_mv_q5_1_f32, ctx->support_simdgroup_reduction);
  461. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32, mul_mv_q8_0_f32, ctx->support_simdgroup_reduction);
  462. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32, mul_mv_q2_K_f32, ctx->support_simdgroup_reduction);
  463. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32, mul_mv_q3_K_f32, ctx->support_simdgroup_reduction);
  464. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32, mul_mv_q4_K_f32, ctx->support_simdgroup_reduction);
  465. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32, mul_mv_q5_K_f32, ctx->support_simdgroup_reduction);
  466. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32, mul_mv_q6_K_f32, ctx->support_simdgroup_reduction);
  467. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32, mul_mv_iq2_xxs_f32, ctx->support_simdgroup_reduction);
  468. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32, mul_mv_iq2_xs_f32, ctx->support_simdgroup_reduction);
  469. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32, mul_mv_iq3_xxs_f32, ctx->support_simdgroup_reduction);
  470. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_S_F32, mul_mv_iq3_s_f32, ctx->support_simdgroup_reduction);
  471. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_S_F32, mul_mv_iq2_s_f32, ctx->support_simdgroup_reduction);
  472. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32, mul_mv_iq1_s_f32, ctx->support_simdgroup_reduction);
  473. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_M_F32, mul_mv_iq1_m_f32, ctx->support_simdgroup_reduction);
  474. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32, mul_mv_iq4_nl_f32, ctx->support_simdgroup_reduction);
  475. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32, mul_mv_iq4_xs_f32, ctx->support_simdgroup_reduction);
  476. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F32_F32, mul_mv_id_f32_f32, ctx->support_simdgroup_reduction);
  477. //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F16, mul_mv_id_f16_f16, ctx->support_simdgroup_reduction);
  478. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32, mul_mv_id_f16_f32, ctx->support_simdgroup_reduction);
  479. //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_1ROW, mul_mv_id_f16_f32_1row, ctx->support_simdgroup_reduction);
  480. //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_L4, mul_mv_id_f16_f32_l4, ctx->support_simdgroup_reduction);
  481. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_0_F32, mul_mv_id_q4_0_f32, ctx->support_simdgroup_reduction);
  482. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_1_F32, mul_mv_id_q4_1_f32, ctx->support_simdgroup_reduction);
  483. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32, mul_mv_id_q5_0_f32, ctx->support_simdgroup_reduction);
  484. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32, mul_mv_id_q5_1_f32, ctx->support_simdgroup_reduction);
  485. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32, mul_mv_id_q8_0_f32, ctx->support_simdgroup_reduction);
  486. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32, mul_mv_id_q2_K_f32, ctx->support_simdgroup_reduction);
  487. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32, mul_mv_id_q3_K_f32, ctx->support_simdgroup_reduction);
  488. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32, mul_mv_id_q4_K_f32, ctx->support_simdgroup_reduction);
  489. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_K_F32, mul_mv_id_q5_K_f32, ctx->support_simdgroup_reduction);
  490. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q6_K_F32, mul_mv_id_q6_K_f32, ctx->support_simdgroup_reduction);
  491. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XXS_F32, mul_mv_id_iq2_xxs_f32, ctx->support_simdgroup_reduction);
  492. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XS_F32, mul_mv_id_iq2_xs_f32, ctx->support_simdgroup_reduction);
  493. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_XXS_F32, mul_mv_id_iq3_xxs_f32, ctx->support_simdgroup_reduction);
  494. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_S_F32, mul_mv_id_iq3_s_f32, ctx->support_simdgroup_reduction);
  495. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_S_F32, mul_mv_id_iq2_s_f32, ctx->support_simdgroup_reduction);
  496. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_S_F32, mul_mv_id_iq1_s_f32, ctx->support_simdgroup_reduction);
  497. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_M_F32, mul_mv_id_iq1_m_f32, ctx->support_simdgroup_reduction);
  498. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32, mul_mv_id_iq4_nl_f32, ctx->support_simdgroup_reduction);
  499. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32, mul_mv_id_iq4_xs_f32, ctx->support_simdgroup_reduction);
  500. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32, mul_mm_f32_f32, ctx->support_simdgroup_mm);
  501. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32, mul_mm_f16_f32, ctx->support_simdgroup_mm);
  502. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32, mul_mm_q4_0_f32, ctx->support_simdgroup_mm);
  503. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32, mul_mm_q4_1_f32, ctx->support_simdgroup_mm);
  504. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32, mul_mm_q5_0_f32, ctx->support_simdgroup_mm);
  505. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32, mul_mm_q5_1_f32, ctx->support_simdgroup_mm);
  506. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32, mul_mm_q8_0_f32, ctx->support_simdgroup_mm);
  507. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32, mul_mm_q2_K_f32, ctx->support_simdgroup_mm);
  508. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32, mul_mm_q3_K_f32, ctx->support_simdgroup_mm);
  509. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32, mul_mm_q4_K_f32, ctx->support_simdgroup_mm);
  510. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32, mul_mm_q5_K_f32, ctx->support_simdgroup_mm);
  511. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32, mul_mm_q6_K_f32, ctx->support_simdgroup_mm);
  512. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32, mul_mm_iq2_xxs_f32, ctx->support_simdgroup_mm);
  513. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32, mul_mm_iq2_xs_f32, ctx->support_simdgroup_mm);
  514. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32, mul_mm_iq3_xxs_f32, ctx->support_simdgroup_mm);
  515. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_S_F32, mul_mm_iq3_s_f32, ctx->support_simdgroup_mm);
  516. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_S_F32, mul_mm_iq2_s_f32, ctx->support_simdgroup_mm);
  517. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32, mul_mm_iq1_s_f32, ctx->support_simdgroup_mm);
  518. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32, mul_mm_iq1_m_f32, ctx->support_simdgroup_mm);
  519. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32, mul_mm_iq4_nl_f32, ctx->support_simdgroup_mm);
  520. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32, mul_mm_iq4_xs_f32, ctx->support_simdgroup_mm);
  521. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F32, mul_mm_id_f32_f32, ctx->support_simdgroup_mm);
  522. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F16_F32, mul_mm_id_f16_f32, ctx->support_simdgroup_mm);
  523. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_0_F32, mul_mm_id_q4_0_f32, ctx->support_simdgroup_mm);
  524. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_1_F32, mul_mm_id_q4_1_f32, ctx->support_simdgroup_mm);
  525. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F32, mul_mm_id_q5_0_f32, ctx->support_simdgroup_mm);
  526. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F32, mul_mm_id_q5_1_f32, ctx->support_simdgroup_mm);
  527. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F32, mul_mm_id_q8_0_f32, ctx->support_simdgroup_mm);
  528. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F32, mul_mm_id_q2_K_f32, ctx->support_simdgroup_mm);
  529. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F32, mul_mm_id_q3_K_f32, ctx->support_simdgroup_mm);
  530. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F32, mul_mm_id_q4_K_f32, ctx->support_simdgroup_mm);
  531. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_K_F32, mul_mm_id_q5_K_f32, ctx->support_simdgroup_mm);
  532. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q6_K_F32, mul_mm_id_q6_K_f32, ctx->support_simdgroup_mm);
  533. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XXS_F32, mul_mm_id_iq2_xxs_f32, ctx->support_simdgroup_mm);
  534. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XS_F32, mul_mm_id_iq2_xs_f32, ctx->support_simdgroup_mm);
  535. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_XXS_F32, mul_mm_id_iq3_xxs_f32, ctx->support_simdgroup_mm);
  536. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_S_F32, mul_mm_id_iq3_s_f32, ctx->support_simdgroup_mm);
  537. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_S_F32, mul_mm_id_iq2_s_f32, ctx->support_simdgroup_mm);
  538. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_S_F32, mul_mm_id_iq1_s_f32, ctx->support_simdgroup_mm);
  539. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F32, mul_mm_id_iq1_m_f32, ctx->support_simdgroup_mm);
  540. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32, mul_mm_id_iq4_nl_f32, ctx->support_simdgroup_mm);
  541. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32, mul_mm_id_iq4_xs_f32, ctx->support_simdgroup_mm);
  542. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F32, rope_f32, true);
  543. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F16, rope_f16, true);
  544. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ALIBI_F32, alibi_f32, true);
  545. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F16, im2col_f16, true);
  546. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F32, im2col_f32, true);
  547. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_UPSCALE_F32, upscale_f32, true);
  548. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_PAD_F32, pad_f32, true);
  549. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_TIMESTEP_EMBEDDING_F32, timestep_embedding_f32, true);
  550. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARANGE_F32, arange_f32, true);
  551. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC, argsort_f32_i32_asc, true);
  552. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC, argsort_f32_i32_desc, true);
  553. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32, leaky_relu_f32, true);
  554. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F16, cpy_f32_f16, true);
  555. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F32, cpy_f32_f32, true);
  556. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0, cpy_f32_q8_0, true);
  557. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_0, cpy_f32_q4_0, true);
  558. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_1, cpy_f32_q4_1, true);
  559. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_0, cpy_f32_q5_0, true);
  560. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_1, cpy_f32_q5_1, true);
  561. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_IQ4_NL, cpy_f32_iq4_nl, true);
  562. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F16_F16, cpy_f16_f16, true);
  563. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F16_F32, cpy_f16_f32, true);
  564. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CONCAT, concat, true);
  565. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SQR, sqr, true);
  566. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUM_ROWS, sum_rows, true);
  567. }
  568. [metal_library release];
  569. return ctx;
  570. }
  571. static void ggml_metal_free(struct ggml_metal_context * ctx) {
  572. GGML_METAL_LOG_INFO("%s: deallocating\n", __func__);
  573. for (int i = 0; i < GGML_METAL_KERNEL_TYPE_COUNT; ++i) {
  574. [ctx->kernels[i].pipeline release];
  575. }
  576. [ctx->queue release];
  577. [ctx->device release];
  578. dispatch_release(ctx->d_queue);
  579. free(ctx);
  580. }
  581. // temporarily defined here for compatibility between ggml-backend and the old API
  582. struct ggml_backend_metal_buffer {
  583. void * data;
  584. size_t size;
  585. id<MTLBuffer> metal;
  586. };
  587. struct ggml_backend_metal_buffer_context {
  588. void * all_data;
  589. size_t all_size;
  590. bool owned;
  591. // multiple buffers are used only to avoid the maximum buffer size limitation when using mmap
  592. int n_buffers;
  593. struct ggml_backend_metal_buffer buffers[GGML_METAL_MAX_BUFFERS];
  594. };
  595. // finds the Metal buffer that contains the tensor data on the GPU device
  596. // the assumption is that there is 1-to-1 mapping between the host and device memory buffers, so we can find the
  597. // Metal buffer based on the host memory pointer
  598. //
  599. static id<MTLBuffer> ggml_metal_get_buffer(struct ggml_tensor * t, size_t * offs) {
  600. //GGML_METAL_LOG_INFO("%s: data tensor '%16s', offs_data = %8ld, offs_eval = %8ld, offs_cach = %8ld\n", __func__, t->name, offs_data, offs_eval, offs_cach);
  601. const int64_t tsize = ggml_nbytes(t);
  602. ggml_backend_buffer_t buffer = t->view_src ? t->view_src->buffer : t->buffer;
  603. struct ggml_backend_metal_buffer_context * buf_ctx = (struct ggml_backend_metal_buffer_context *) buffer->context;
  604. // find the view that contains the tensor fully
  605. for (int i = 0; i < buf_ctx->n_buffers; ++i) {
  606. const int64_t ioffs = (int64_t) t->data - (int64_t) buf_ctx->buffers[i].data;
  607. //GGML_METAL_LOG_INFO("ioffs = %10ld, tsize = %10ld, sum = %10ld, buf_ctx->buffers[%d].size = %10ld\n", ioffs, tsize, ioffs + tsize, i, buf_ctx->buffers[i].size);
  608. if (ioffs >= 0 && ioffs + tsize <= (int64_t) buf_ctx->buffers[i].size) {
  609. *offs = (size_t) ioffs;
  610. //GGML_METAL_LOG_INFO("%s: tensor '%16s', offs = %8ld\n", __func__, t->name, *offs);
  611. return buf_ctx->buffers[i].metal;
  612. }
  613. }
  614. GGML_METAL_LOG_ERROR("%s: error: tensor '%s' buffer is nil\n", __func__, t->name);
  615. return nil;
  616. }
  617. static bool ggml_metal_supports_op(const struct ggml_metal_context * ctx, const struct ggml_tensor * op) {
  618. switch (op->op) {
  619. case GGML_OP_UNARY:
  620. switch (ggml_get_unary_op(op)) {
  621. case GGML_UNARY_OP_TANH:
  622. case GGML_UNARY_OP_RELU:
  623. case GGML_UNARY_OP_GELU:
  624. case GGML_UNARY_OP_GELU_QUICK:
  625. case GGML_UNARY_OP_SILU:
  626. return true;
  627. default:
  628. return false;
  629. }
  630. case GGML_OP_NONE:
  631. case GGML_OP_RESHAPE:
  632. case GGML_OP_VIEW:
  633. case GGML_OP_TRANSPOSE:
  634. case GGML_OP_PERMUTE:
  635. case GGML_OP_CONCAT:
  636. case GGML_OP_ADD:
  637. case GGML_OP_ACC:
  638. case GGML_OP_MUL:
  639. case GGML_OP_DIV:
  640. case GGML_OP_SCALE:
  641. case GGML_OP_CLAMP:
  642. case GGML_OP_SQR:
  643. case GGML_OP_SUM_ROWS:
  644. return true;
  645. case GGML_OP_SOFT_MAX:
  646. case GGML_OP_RMS_NORM:
  647. case GGML_OP_GROUP_NORM:
  648. return ctx->support_simdgroup_reduction;
  649. case GGML_OP_NORM:
  650. case GGML_OP_ALIBI:
  651. case GGML_OP_ROPE:
  652. case GGML_OP_IM2COL:
  653. return true;
  654. case GGML_OP_POOL_1D:
  655. case GGML_OP_POOL_2D:
  656. return false;
  657. case GGML_OP_UPSCALE:
  658. case GGML_OP_PAD:
  659. case GGML_OP_ARANGE:
  660. case GGML_OP_TIMESTEP_EMBEDDING:
  661. case GGML_OP_ARGSORT:
  662. case GGML_OP_LEAKY_RELU:
  663. return true;
  664. case GGML_OP_MUL_MAT:
  665. case GGML_OP_MUL_MAT_ID:
  666. return ctx->support_simdgroup_reduction &&
  667. (op->src[0]->type != GGML_TYPE_F32 || op->src[1]->type == GGML_TYPE_F32);
  668. case GGML_OP_CPY:
  669. case GGML_OP_DUP:
  670. case GGML_OP_CONT:
  671. {
  672. switch (op->src[0]->type) {
  673. case GGML_TYPE_F32:
  674. switch (op->type) {
  675. case GGML_TYPE_F16:
  676. case GGML_TYPE_F32:
  677. case GGML_TYPE_Q8_0:
  678. case GGML_TYPE_Q4_0:
  679. case GGML_TYPE_Q4_1:
  680. case GGML_TYPE_Q5_0:
  681. case GGML_TYPE_Q5_1:
  682. case GGML_TYPE_IQ4_NL:
  683. return true;
  684. default:
  685. return false;
  686. }
  687. case GGML_TYPE_F16:
  688. switch (op->type) {
  689. case GGML_TYPE_F16:
  690. case GGML_TYPE_F32:
  691. return true;
  692. default:
  693. return false;
  694. }
  695. default:
  696. return false;
  697. };
  698. }
  699. case GGML_OP_DIAG_MASK_INF:
  700. case GGML_OP_GET_ROWS:
  701. {
  702. return op->ne[3] == 1;
  703. }
  704. default:
  705. return false;
  706. }
  707. }
  708. static enum ggml_status ggml_metal_graph_compute(
  709. struct ggml_metal_context * ctx,
  710. struct ggml_cgraph * gf) {
  711. @autoreleasepool {
  712. MTLComputePassDescriptor * edesc = MTLComputePassDescriptor.computePassDescriptor;
  713. edesc.dispatchType = MTLDispatchTypeSerial;
  714. // create multiple command buffers and enqueue them
  715. // then, we encode the graph into the command buffers in parallel
  716. const int n_nodes = gf->n_nodes;
  717. const int n_cb = ctx->n_cb;
  718. const int n_nodes_per_cb = (n_nodes + n_cb - 1) / n_cb;
  719. const bool should_capture = ctx->should_capture_next_compute;
  720. if (should_capture) {
  721. ctx->should_capture_next_compute = false;
  722. MTLCaptureDescriptor * descriptor = [MTLCaptureDescriptor new];
  723. descriptor.captureObject = ctx->queue;
  724. NSError * error = nil;
  725. if (![[MTLCaptureManager sharedCaptureManager] startCaptureWithDescriptor:descriptor error:&error]) {
  726. GGML_METAL_LOG_ERROR("%s: error: unable to start capture '%s'\n", __func__, [[error localizedDescription] UTF8String]);
  727. GGML_ASSERT(!"capture failed");
  728. }
  729. }
  730. id<MTLCommandBuffer> command_buffer_builder[n_cb];
  731. for (int cb_idx = 0; cb_idx < n_cb; ++cb_idx) {
  732. id<MTLCommandBuffer> command_buffer = [ctx->queue commandBufferWithUnretainedReferences];
  733. command_buffer_builder[cb_idx] = command_buffer;
  734. // enqueue the command buffers in order to specify their execution order
  735. [command_buffer enqueue];
  736. }
  737. const id<MTLCommandBuffer> *command_buffers = command_buffer_builder;
  738. dispatch_apply(n_cb, ctx->d_queue, ^(size_t iter) {
  739. const int cb_idx = iter;
  740. size_t offs_src0 = 0;
  741. size_t offs_src1 = 0;
  742. size_t offs_src2 = 0;
  743. size_t offs_dst = 0;
  744. id<MTLCommandBuffer> command_buffer = command_buffers[cb_idx];
  745. id<MTLComputeCommandEncoder> encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
  746. const int node_start = (cb_idx + 0) * n_nodes_per_cb;
  747. const int node_end = MIN((cb_idx == n_cb - 1) ? n_nodes : (cb_idx + 1) * n_nodes_per_cb, n_nodes);
  748. for (int i = node_start; i < node_end; ++i) {
  749. if (i == -1) {
  750. [encoder memoryBarrierWithScope:MTLBarrierScopeBuffers];
  751. continue;
  752. }
  753. //GGML_METAL_LOG_INFO("%s: encoding node %3d, op = %8s\n", __func__, i, ggml_op_name(gf->nodes[i]->op));
  754. struct ggml_tensor * src0 = gf->nodes[i]->src[0];
  755. struct ggml_tensor * src1 = gf->nodes[i]->src[1];
  756. struct ggml_tensor * src2 = gf->nodes[i]->src[2];
  757. struct ggml_tensor * dst = gf->nodes[i];
  758. if (ggml_is_empty(dst)) {
  759. continue;
  760. }
  761. switch (dst->op) {
  762. case GGML_OP_NONE:
  763. case GGML_OP_RESHAPE:
  764. case GGML_OP_VIEW:
  765. case GGML_OP_TRANSPOSE:
  766. case GGML_OP_PERMUTE:
  767. {
  768. // noop -> next node
  769. } continue;
  770. default:
  771. {
  772. } break;
  773. }
  774. if (!ggml_metal_supports_op(ctx, dst)) {
  775. GGML_METAL_LOG_ERROR("%s: error: unsupported op '%s'\n", __func__, ggml_op_desc(dst));
  776. GGML_ASSERT(!"unsupported op");
  777. }
  778. if (should_capture) {
  779. [encoder pushDebugGroup:[NSString stringWithCString:ggml_op_desc(dst) encoding:NSUTF8StringEncoding]];
  780. }
  781. const int64_t ne00 = src0 ? src0->ne[0] : 0;
  782. const int64_t ne01 = src0 ? src0->ne[1] : 0;
  783. const int64_t ne02 = src0 ? src0->ne[2] : 0;
  784. const int64_t ne03 = src0 ? src0->ne[3] : 0;
  785. const uint64_t nb00 = src0 ? src0->nb[0] : 0;
  786. const uint64_t nb01 = src0 ? src0->nb[1] : 0;
  787. const uint64_t nb02 = src0 ? src0->nb[2] : 0;
  788. const uint64_t nb03 = src0 ? src0->nb[3] : 0;
  789. const int64_t ne10 = src1 ? src1->ne[0] : 0;
  790. const int64_t ne11 = src1 ? src1->ne[1] : 0;
  791. const int64_t ne12 = src1 ? src1->ne[2] : 0;
  792. const int64_t ne13 = src1 ? src1->ne[3] : 0; UNUSED(ne13);
  793. const uint64_t nb10 = src1 ? src1->nb[0] : 0;
  794. const uint64_t nb11 = src1 ? src1->nb[1] : 0;
  795. const uint64_t nb12 = src1 ? src1->nb[2] : 0;
  796. const uint64_t nb13 = src1 ? src1->nb[3] : 0; UNUSED(nb13);
  797. const int64_t ne0 = dst ? dst->ne[0] : 0;
  798. const int64_t ne1 = dst ? dst->ne[1] : 0;
  799. const int64_t ne2 = dst ? dst->ne[2] : 0;
  800. const int64_t ne3 = dst ? dst->ne[3] : 0;
  801. const uint64_t nb0 = dst ? dst->nb[0] : 0;
  802. const uint64_t nb1 = dst ? dst->nb[1] : 0;
  803. const uint64_t nb2 = dst ? dst->nb[2] : 0;
  804. const uint64_t nb3 = dst ? dst->nb[3] : 0;
  805. const enum ggml_type src0t = src0 ? src0->type : GGML_TYPE_COUNT;
  806. const enum ggml_type src1t = src1 ? src1->type : GGML_TYPE_COUNT;
  807. const enum ggml_type dstt = dst ? dst->type : GGML_TYPE_COUNT;
  808. id<MTLBuffer> id_src0 = src0 ? ggml_metal_get_buffer(src0, &offs_src0) : nil;
  809. id<MTLBuffer> id_src1 = src1 ? ggml_metal_get_buffer(src1, &offs_src1) : nil;
  810. id<MTLBuffer> id_src2 = src2 ? ggml_metal_get_buffer(src2, &offs_src2) : nil;
  811. id<MTLBuffer> id_dst = dst ? ggml_metal_get_buffer(dst, &offs_dst) : nil;
  812. //GGML_METAL_LOG_INFO("%s: op - %s\n", __func__, ggml_op_name(dst->op));
  813. //if (src0) {
  814. // GGML_METAL_LOG_INFO("%s: src0 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src0t), ne00, ne01, ne02,
  815. // ggml_is_contiguous(src0), src0->name);
  816. //}
  817. //if (src1) {
  818. // GGML_METAL_LOG_INFO("%s: src1 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src1t), ne10, ne11, ne12,
  819. // ggml_is_contiguous(src1), src1->name);
  820. //}
  821. //if (dst) {
  822. // GGML_METAL_LOG_INFO("%s: dst - %4s [%5lld, %5lld, %5lld], 1, %s\n", __func__, ggml_type_name(dstt), ne0, ne1, ne2,
  823. // dst->name);
  824. //}
  825. switch (dst->op) {
  826. case GGML_OP_CONCAT:
  827. {
  828. const int64_t nb = ne00;
  829. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CONCAT].pipeline;
  830. [encoder setComputePipelineState:pipeline];
  831. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  832. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  833. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  834. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  835. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
  836. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
  837. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:6];
  838. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:7];
  839. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:8];
  840. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:9];
  841. [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:10];
  842. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:11];
  843. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:12];
  844. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:13];
  845. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:14];
  846. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:15];
  847. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:16];
  848. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:17];
  849. [encoder setBytes:&nb13 length:sizeof(nb13) atIndex:18];
  850. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:19];
  851. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:20];
  852. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:21];
  853. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:22];
  854. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:23];
  855. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:24];
  856. [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:25];
  857. [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:26];
  858. [encoder setBytes:&nb length:sizeof(nb) atIndex:27];
  859. const int nth = MIN(1024, ne0);
  860. [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  861. } break;
  862. case GGML_OP_ADD:
  863. case GGML_OP_MUL:
  864. case GGML_OP_DIV:
  865. {
  866. const size_t offs = 0;
  867. bool bcast_row = false;
  868. int64_t nb = ne00;
  869. id<MTLComputePipelineState> pipeline = nil;
  870. if (ggml_nelements(src1) == ne10 && ggml_is_contiguous(src1) && ne00 % 4 == 0 && ne10 % 4 == 0) {
  871. GGML_ASSERT(ggml_is_contiguous(src0));
  872. // src1 is a row
  873. GGML_ASSERT(ne11 == 1);
  874. nb = ne00 / 4;
  875. switch (dst->op) {
  876. case GGML_OP_ADD: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_ROW].pipeline; break;
  877. case GGML_OP_MUL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_ROW].pipeline; break;
  878. case GGML_OP_DIV: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIV_ROW].pipeline; break;
  879. default: GGML_ASSERT(false);
  880. }
  881. bcast_row = true;
  882. } else {
  883. switch (dst->op) {
  884. case GGML_OP_ADD: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD].pipeline; break;
  885. case GGML_OP_MUL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL].pipeline; break;
  886. case GGML_OP_DIV: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIV].pipeline; break;
  887. default: GGML_ASSERT(false);
  888. }
  889. }
  890. [encoder setComputePipelineState:pipeline];
  891. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  892. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  893. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  894. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  895. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
  896. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
  897. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:6];
  898. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:7];
  899. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:8];
  900. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:9];
  901. [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:10];
  902. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:11];
  903. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:12];
  904. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:13];
  905. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:14];
  906. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:15];
  907. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:16];
  908. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:17];
  909. [encoder setBytes:&nb13 length:sizeof(nb13) atIndex:18];
  910. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:19];
  911. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:20];
  912. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:21];
  913. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:22];
  914. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:23];
  915. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:24];
  916. [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:25];
  917. [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:26];
  918. [encoder setBytes:&offs length:sizeof(offs) atIndex:27];
  919. [encoder setBytes:&nb length:sizeof(nb) atIndex:28];
  920. if (bcast_row) {
  921. const int64_t n = ggml_nelements(dst)/4;
  922. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  923. } else {
  924. const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne0);
  925. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  926. }
  927. } break;
  928. case GGML_OP_ACC:
  929. {
  930. GGML_ASSERT(src0t == GGML_TYPE_F32);
  931. GGML_ASSERT(src1t == GGML_TYPE_F32);
  932. GGML_ASSERT(dstt == GGML_TYPE_F32);
  933. GGML_ASSERT(ggml_is_contiguous(src0));
  934. GGML_ASSERT(ggml_is_contiguous(src1));
  935. const size_t pnb1 = ((int32_t *) dst->op_params)[0];
  936. const size_t pnb2 = ((int32_t *) dst->op_params)[1];
  937. const size_t pnb3 = ((int32_t *) dst->op_params)[2];
  938. const size_t offs = ((int32_t *) dst->op_params)[3];
  939. const bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  940. if (!inplace) {
  941. // run a separete kernel to cpy src->dst
  942. // not sure how to avoid this
  943. // TODO: make a simpler cpy_bytes kernel
  944. const id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_F32].pipeline;
  945. [encoder setComputePipelineState:pipeline];
  946. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  947. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  948. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  949. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  950. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  951. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
  952. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
  953. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
  954. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
  955. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
  956. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
  957. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
  958. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
  959. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
  960. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
  961. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
  962. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
  963. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
  964. const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne00);
  965. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  966. }
  967. const id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD].pipeline;
  968. [encoder setComputePipelineState:pipeline];
  969. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  970. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  971. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  972. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  973. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
  974. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
  975. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:6];
  976. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:7];
  977. [encoder setBytes:&pnb1 length:sizeof(pnb1) atIndex:8];
  978. [encoder setBytes:&pnb2 length:sizeof(pnb2) atIndex:9];
  979. [encoder setBytes:&pnb3 length:sizeof(pnb3) atIndex:10];
  980. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:11];
  981. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:12];
  982. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:13];
  983. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:14];
  984. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:15];
  985. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:16];
  986. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:17];
  987. [encoder setBytes:&nb13 length:sizeof(nb13) atIndex:18];
  988. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:19];
  989. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:20];
  990. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:21];
  991. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:22];
  992. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:23];
  993. [encoder setBytes:&pnb1 length:sizeof(pnb1) atIndex:24];
  994. [encoder setBytes:&pnb2 length:sizeof(pnb2) atIndex:25];
  995. [encoder setBytes:&pnb3 length:sizeof(pnb3) atIndex:26];
  996. [encoder setBytes:&offs length:sizeof(offs) atIndex:27];
  997. const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne00);
  998. [encoder dispatchThreadgroups:MTLSizeMake(ne11, ne12, ne13) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  999. } break;
  1000. case GGML_OP_SCALE:
  1001. {
  1002. GGML_ASSERT(ggml_is_contiguous(src0));
  1003. float scale;
  1004. memcpy(&scale, dst->op_params, sizeof(scale));
  1005. int64_t n = ggml_nelements(dst);
  1006. id<MTLComputePipelineState> pipeline = nil;
  1007. if (n % 4 == 0) {
  1008. n /= 4;
  1009. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SCALE_4].pipeline;
  1010. } else {
  1011. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SCALE].pipeline;
  1012. }
  1013. [encoder setComputePipelineState:pipeline];
  1014. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1015. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1016. [encoder setBytes:&scale length:sizeof(scale) atIndex:2];
  1017. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1018. } break;
  1019. case GGML_OP_CLAMP:
  1020. {
  1021. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CLAMP].pipeline;
  1022. float min;
  1023. float max;
  1024. memcpy(&min, ((int32_t *) dst->op_params) + 0, sizeof(float));
  1025. memcpy(&max, ((int32_t *) dst->op_params) + 1, sizeof(float));
  1026. [encoder setComputePipelineState:pipeline];
  1027. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1028. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1029. [encoder setBytes:&min length:sizeof(min) atIndex:2];
  1030. [encoder setBytes:&max length:sizeof(max) atIndex:3];
  1031. const int64_t n = ggml_nelements(dst);
  1032. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1033. } break;
  1034. case GGML_OP_UNARY:
  1035. switch (ggml_get_unary_op(gf->nodes[i])) {
  1036. // we are not taking into account the strides, so for now require contiguous tensors
  1037. GGML_ASSERT(ggml_is_contiguous(src0));
  1038. case GGML_UNARY_OP_TANH:
  1039. {
  1040. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_TANH].pipeline;
  1041. [encoder setComputePipelineState:pipeline];
  1042. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1043. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1044. const int64_t n = ggml_nelements(dst);
  1045. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1046. } break;
  1047. case GGML_UNARY_OP_RELU:
  1048. {
  1049. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_RELU].pipeline;
  1050. [encoder setComputePipelineState:pipeline];
  1051. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1052. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1053. const int64_t n = ggml_nelements(dst);
  1054. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1055. } break;
  1056. case GGML_UNARY_OP_GELU:
  1057. {
  1058. int64_t n = ggml_nelements(dst);
  1059. id<MTLComputePipelineState> pipeline = nil;
  1060. if (n % 4 == 0) {
  1061. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GELU_4].pipeline;
  1062. n /= 4;
  1063. } else {
  1064. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GELU].pipeline;
  1065. }
  1066. [encoder setComputePipelineState:pipeline];
  1067. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1068. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1069. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1070. } break;
  1071. case GGML_UNARY_OP_GELU_QUICK:
  1072. {
  1073. int64_t n = ggml_nelements(dst);
  1074. id<MTLComputePipelineState> pipeline = nil;
  1075. if (n % 4 == 0) {
  1076. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GELU_QUICK_4].pipeline;
  1077. n /= 4;
  1078. } else {
  1079. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GELU_QUICK].pipeline;
  1080. }
  1081. [encoder setComputePipelineState:pipeline];
  1082. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1083. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1084. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1085. } break;
  1086. case GGML_UNARY_OP_SILU:
  1087. {
  1088. int64_t n = ggml_nelements(dst);
  1089. id<MTLComputePipelineState> pipeline = nil;
  1090. if (n % 4 == 0) {
  1091. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SILU_4].pipeline;
  1092. n /= 4;
  1093. } else {
  1094. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SILU].pipeline;
  1095. }
  1096. [encoder setComputePipelineState:pipeline];
  1097. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1098. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1099. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1100. } break;
  1101. default:
  1102. {
  1103. GGML_METAL_LOG_WARN("%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
  1104. GGML_ASSERT(false);
  1105. }
  1106. } break;
  1107. case GGML_OP_SQR:
  1108. {
  1109. GGML_ASSERT(ggml_is_contiguous(src0));
  1110. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SQR].pipeline;
  1111. [encoder setComputePipelineState:pipeline];
  1112. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1113. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1114. const int64_t n = ggml_nelements(dst);
  1115. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1116. } break;
  1117. case GGML_OP_SUM_ROWS:
  1118. {
  1119. GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type));
  1120. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SUM_ROWS].pipeline;
  1121. [encoder setComputePipelineState:pipeline];
  1122. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1123. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1124. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
  1125. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
  1126. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
  1127. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:5];
  1128. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6];
  1129. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7];
  1130. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8];
  1131. [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:9];
  1132. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:10];
  1133. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:11];
  1134. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:12];
  1135. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:13];
  1136. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:14];
  1137. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:15];
  1138. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:16];
  1139. [encoder setBytes:&nb13 length:sizeof(nb13) atIndex:17];
  1140. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:18];
  1141. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:19];
  1142. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:20];
  1143. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:21];
  1144. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:22];
  1145. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:23];
  1146. [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:24];
  1147. [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:25];
  1148. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1149. } break;
  1150. case GGML_OP_SOFT_MAX:
  1151. {
  1152. int nth = 32; // SIMD width
  1153. id<MTLComputePipelineState> pipeline = nil;
  1154. if (ne00%4 == 0) {
  1155. while (nth < ne00/4 && nth < 256) {
  1156. nth *= 2;
  1157. }
  1158. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX_4].pipeline;
  1159. } else {
  1160. while (nth < ne00 && nth < 1024) {
  1161. nth *= 2;
  1162. }
  1163. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX].pipeline;
  1164. }
  1165. float scale;
  1166. float max_bias;
  1167. memcpy(&scale, ((int32_t *) dst->op_params) + 0, sizeof(scale));
  1168. memcpy(&max_bias, ((int32_t *) dst->op_params) + 1, sizeof(max_bias));
  1169. const int64_t nrows_x = ggml_nrows(src0);
  1170. const int64_t nrows_y = src0->ne[1];
  1171. const uint32_t n_head_kv = nrows_x/nrows_y;
  1172. const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv));
  1173. const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
  1174. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
  1175. [encoder setComputePipelineState:pipeline];
  1176. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1177. if (id_src1) {
  1178. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1179. } else {
  1180. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
  1181. }
  1182. if (id_src2) {
  1183. [encoder setBuffer:id_src2 offset:offs_src2 atIndex:2];
  1184. } else {
  1185. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:2];
  1186. }
  1187. [encoder setBuffer:id_dst offset:offs_dst atIndex:3];
  1188. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:4];
  1189. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:5];
  1190. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:6];
  1191. [encoder setBytes:&scale length:sizeof(scale) atIndex:7];
  1192. [encoder setBytes:&max_bias length:sizeof(max_bias) atIndex:8];
  1193. [encoder setBytes:&m0 length:sizeof(m0) atIndex:9];
  1194. [encoder setBytes:&m1 length:sizeof(m1) atIndex:10];
  1195. [encoder setBytes:&n_head_log2 length:sizeof(n_head_log2) atIndex:11];
  1196. [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
  1197. [encoder dispatchThreadgroups:MTLSizeMake(ne01*ne02*ne03, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1198. } break;
  1199. case GGML_OP_DIAG_MASK_INF:
  1200. {
  1201. const int n_past = ((int32_t *)(dst->op_params))[0];
  1202. id<MTLComputePipelineState> pipeline = nil;
  1203. if (ne00%8 == 0) {
  1204. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8].pipeline;
  1205. } else {
  1206. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF].pipeline;
  1207. }
  1208. [encoder setComputePipelineState:pipeline];
  1209. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1210. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1211. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
  1212. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
  1213. [encoder setBytes:&n_past length:sizeof(int) atIndex:4];
  1214. if (ne00%8 == 0) {
  1215. [encoder dispatchThreadgroups:MTLSizeMake(ne00*ne01*ne02/8, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1216. }
  1217. else {
  1218. [encoder dispatchThreadgroups:MTLSizeMake(ne00, ne01, ne02) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1219. }
  1220. } break;
  1221. case GGML_OP_MUL_MAT:
  1222. {
  1223. GGML_ASSERT(ne00 == ne10);
  1224. // TODO: assert that dim2 and dim3 are contiguous
  1225. GGML_ASSERT(ne12 % ne02 == 0);
  1226. GGML_ASSERT(ne13 % ne03 == 0);
  1227. const uint r2 = ne12/ne02;
  1228. const uint r3 = ne13/ne03;
  1229. // find the break-even point where the matrix-matrix kernel becomes more efficient compared
  1230. // to the matrix-vector kernel
  1231. int ne11_mm_min = 1;
  1232. #if 0
  1233. // the numbers below are measured on M2 Ultra for 7B and 13B models
  1234. // these numbers do not translate to other devices or model sizes
  1235. // TODO: need to find a better approach
  1236. if ([ctx->device.name isEqualToString:@"Apple M2 Ultra"]) {
  1237. switch (src0t) {
  1238. case GGML_TYPE_F16: ne11_mm_min = 2; break;
  1239. case GGML_TYPE_Q8_0: ne11_mm_min = 7; break;
  1240. case GGML_TYPE_Q2_K: ne11_mm_min = 15; break;
  1241. case GGML_TYPE_Q3_K: ne11_mm_min = 7; break;
  1242. case GGML_TYPE_Q4_0:
  1243. case GGML_TYPE_Q4_1: ne11_mm_min = 15; break;
  1244. case GGML_TYPE_Q4_K: ne11_mm_min = 11; break;
  1245. case GGML_TYPE_Q5_0: // not tested yet
  1246. case GGML_TYPE_Q5_1: ne11_mm_min = 13; break; // not tested yet
  1247. case GGML_TYPE_Q5_K: ne11_mm_min = 7; break;
  1248. case GGML_TYPE_Q6_K: ne11_mm_min = 7; break;
  1249. default: ne11_mm_min = 1; break;
  1250. }
  1251. }
  1252. #endif
  1253. // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
  1254. // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
  1255. if ([ctx->device supportsFamily:MTLGPUFamilyApple7] &&
  1256. !ggml_is_transposed(src0) &&
  1257. !ggml_is_transposed(src1) &&
  1258. src1t == GGML_TYPE_F32 &&
  1259. ne00 % 32 == 0 && ne00 >= 64 &&
  1260. (ne11 > ne11_mm_min || (ggml_is_quantized(src0t) && ne12 > 1))) {
  1261. //printf("matrix: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12);
  1262. // some Metal matrix data types require aligned pointers
  1263. // ref: https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf (Table 2.5)
  1264. switch (src0->type) {
  1265. case GGML_TYPE_F32: GGML_ASSERT(nb01 % 16 == 0); break;
  1266. case GGML_TYPE_F16: GGML_ASSERT(nb01 % 8 == 0); break;
  1267. default: break;
  1268. }
  1269. id<MTLComputePipelineState> pipeline = nil;
  1270. switch (src0->type) {
  1271. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32 ].pipeline; break;
  1272. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32 ].pipeline; break;
  1273. case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32 ].pipeline; break;
  1274. case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32 ].pipeline; break;
  1275. case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32 ].pipeline; break;
  1276. case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32 ].pipeline; break;
  1277. case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32 ].pipeline; break;
  1278. case GGML_TYPE_Q2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32 ].pipeline; break;
  1279. case GGML_TYPE_Q3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32 ].pipeline; break;
  1280. case GGML_TYPE_Q4_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32 ].pipeline; break;
  1281. case GGML_TYPE_Q5_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32 ].pipeline; break;
  1282. case GGML_TYPE_Q6_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32 ].pipeline; break;
  1283. case GGML_TYPE_IQ2_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32].pipeline; break;
  1284. case GGML_TYPE_IQ2_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32 ].pipeline; break;
  1285. case GGML_TYPE_IQ3_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32].pipeline; break;
  1286. case GGML_TYPE_IQ3_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_S_F32 ].pipeline; break;
  1287. case GGML_TYPE_IQ2_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_S_F32 ].pipeline; break;
  1288. case GGML_TYPE_IQ1_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32 ].pipeline; break;
  1289. case GGML_TYPE_IQ1_M: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32 ].pipeline; break;
  1290. case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32 ].pipeline; break;
  1291. case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32 ].pipeline; break;
  1292. default: GGML_ASSERT(false && "MUL MAT-MAT not implemented");
  1293. }
  1294. [encoder setComputePipelineState:pipeline];
  1295. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1296. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1297. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1298. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  1299. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
  1300. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:5];
  1301. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:6];
  1302. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:7];
  1303. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:8];
  1304. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:9];
  1305. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:10];
  1306. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:11];
  1307. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:12];
  1308. [encoder setBytes:&r2 length:sizeof(r2) atIndex:13];
  1309. [encoder setBytes:&r3 length:sizeof(r3) atIndex:14];
  1310. [encoder setThreadgroupMemoryLength:8192 atIndex:0];
  1311. [encoder dispatchThreadgroups:MTLSizeMake( (ne11 + 31)/32, (ne01 + 63)/64, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
  1312. } else {
  1313. int nth0 = 32;
  1314. int nth1 = 1;
  1315. int nrows = 1;
  1316. //printf("vector: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12);
  1317. id<MTLComputePipelineState> pipeline = nil;
  1318. // use custom matrix x vector kernel
  1319. switch (src0t) {
  1320. case GGML_TYPE_F32:
  1321. {
  1322. GGML_ASSERT(src1t == GGML_TYPE_F32);
  1323. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32].pipeline;
  1324. nrows = 4;
  1325. } break;
  1326. case GGML_TYPE_F16:
  1327. {
  1328. nth0 = 32;
  1329. nth1 = 1;
  1330. if (src1t == GGML_TYPE_F32) {
  1331. if (ne11 * ne12 < 4) {
  1332. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW].pipeline;
  1333. } else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) {
  1334. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4].pipeline;
  1335. nrows = ne11;
  1336. } else {
  1337. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32].pipeline;
  1338. nrows = 4;
  1339. }
  1340. } else {
  1341. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16].pipeline;
  1342. nrows = 4;
  1343. }
  1344. } break;
  1345. case GGML_TYPE_Q4_0:
  1346. {
  1347. nth0 = 8;
  1348. nth1 = 8;
  1349. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32].pipeline;
  1350. } break;
  1351. case GGML_TYPE_Q4_1:
  1352. {
  1353. nth0 = 8;
  1354. nth1 = 8;
  1355. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32].pipeline;
  1356. } break;
  1357. case GGML_TYPE_Q5_0:
  1358. {
  1359. nth0 = 8;
  1360. nth1 = 8;
  1361. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32].pipeline;
  1362. } break;
  1363. case GGML_TYPE_Q5_1:
  1364. {
  1365. nth0 = 8;
  1366. nth1 = 8;
  1367. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32].pipeline;
  1368. } break;
  1369. case GGML_TYPE_Q8_0:
  1370. {
  1371. nth0 = 8;
  1372. nth1 = 8;
  1373. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32].pipeline;
  1374. } break;
  1375. case GGML_TYPE_Q2_K:
  1376. {
  1377. nth0 = 2;
  1378. nth1 = 32;
  1379. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32].pipeline;
  1380. } break;
  1381. case GGML_TYPE_Q3_K:
  1382. {
  1383. nth0 = 2;
  1384. nth1 = 32;
  1385. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32].pipeline;
  1386. } break;
  1387. case GGML_TYPE_Q4_K:
  1388. {
  1389. nth0 = 4; //1;
  1390. nth1 = 8; //32;
  1391. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32].pipeline;
  1392. } break;
  1393. case GGML_TYPE_Q5_K:
  1394. {
  1395. nth0 = 2;
  1396. nth1 = 32;
  1397. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32].pipeline;
  1398. } break;
  1399. case GGML_TYPE_Q6_K:
  1400. {
  1401. nth0 = 2;
  1402. nth1 = 32;
  1403. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32].pipeline;
  1404. } break;
  1405. case GGML_TYPE_IQ2_XXS:
  1406. {
  1407. nth0 = 4;
  1408. nth1 = 16;
  1409. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32].pipeline;
  1410. } break;
  1411. case GGML_TYPE_IQ2_XS:
  1412. {
  1413. nth0 = 4;
  1414. nth1 = 16;
  1415. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32].pipeline;
  1416. } break;
  1417. case GGML_TYPE_IQ3_XXS:
  1418. {
  1419. nth0 = 4;
  1420. nth1 = 16;
  1421. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32].pipeline;
  1422. } break;
  1423. case GGML_TYPE_IQ3_S:
  1424. {
  1425. nth0 = 4;
  1426. nth1 = 16;
  1427. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_S_F32].pipeline;
  1428. } break;
  1429. case GGML_TYPE_IQ2_S:
  1430. {
  1431. nth0 = 4;
  1432. nth1 = 16;
  1433. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_S_F32].pipeline;
  1434. } break;
  1435. case GGML_TYPE_IQ1_S:
  1436. {
  1437. nth0 = 4;
  1438. nth1 = 16;
  1439. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32].pipeline;
  1440. } break;
  1441. case GGML_TYPE_IQ1_M:
  1442. {
  1443. nth0 = 4;
  1444. nth1 = 16;
  1445. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_M_F32].pipeline;
  1446. } break;
  1447. case GGML_TYPE_IQ4_NL:
  1448. {
  1449. nth0 = 4;
  1450. nth1 = 16;
  1451. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32].pipeline;
  1452. } break;
  1453. case GGML_TYPE_IQ4_XS:
  1454. {
  1455. nth0 = 4;
  1456. nth1 = 16;
  1457. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32].pipeline;
  1458. } break;
  1459. default:
  1460. {
  1461. GGML_METAL_LOG_ERROR("Asserting on type %d\n", (int)src0t);
  1462. GGML_ASSERT(false && "not implemented");
  1463. }
  1464. };
  1465. if (ggml_is_quantized(src0t)) {
  1466. GGML_ASSERT(ne00 >= nth0*nth1);
  1467. }
  1468. [encoder setComputePipelineState:pipeline];
  1469. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1470. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1471. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1472. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  1473. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
  1474. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
  1475. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6];
  1476. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7];
  1477. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8];
  1478. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:9];
  1479. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:10];
  1480. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:11];
  1481. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:12];
  1482. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:13];
  1483. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:14];
  1484. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:15];
  1485. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:16];
  1486. [encoder setBytes:&r2 length:sizeof(r2) atIndex:17];
  1487. [encoder setBytes:&r3 length:sizeof(r3) atIndex:18];
  1488. if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 || src0t == GGML_TYPE_Q5_0 ||
  1489. src0t == GGML_TYPE_Q5_1 || src0t == GGML_TYPE_Q8_0 || src0t == GGML_TYPE_Q2_K ||
  1490. src0t == GGML_TYPE_IQ1_S || src0t == GGML_TYPE_IQ1_M || src0t == GGML_TYPE_IQ2_S) {
  1491. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1492. }
  1493. else if (src0t == GGML_TYPE_IQ2_XXS || src0t == GGML_TYPE_IQ2_XS) {
  1494. const int mem_size = src0t == GGML_TYPE_IQ2_XXS ? 256*8+128 : 512*8+128;
  1495. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1496. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1497. }
  1498. else if (src0t == GGML_TYPE_IQ3_XXS || src0t == GGML_TYPE_IQ3_S) {
  1499. const int mem_size = src0t == GGML_TYPE_IQ3_XXS ? 256*4+128 : 512*4;
  1500. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1501. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1502. }
  1503. else if (src0t == GGML_TYPE_IQ4_NL || src0t == GGML_TYPE_IQ4_XS) {
  1504. const int mem_size = 32*sizeof(float);
  1505. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1506. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1507. }
  1508. else if (src0t == GGML_TYPE_Q4_K) {
  1509. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1510. }
  1511. else if (src0t == GGML_TYPE_Q3_K) {
  1512. #ifdef GGML_QKK_64
  1513. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 1)/2, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1514. #else
  1515. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1516. #endif
  1517. }
  1518. else if (src0t == GGML_TYPE_Q5_K) {
  1519. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1520. }
  1521. else if (src0t == GGML_TYPE_Q6_K) {
  1522. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 1)/2, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1523. } else {
  1524. const int64_t ny = (ne11 + nrows - 1)/nrows;
  1525. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ny, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1526. }
  1527. }
  1528. } break;
  1529. case GGML_OP_MUL_MAT_ID:
  1530. {
  1531. const int n_as = src0->ne[2];
  1532. // src2 = ids
  1533. const int64_t ne20 = src2->ne[0];
  1534. const int64_t ne21 = src2->ne[1];
  1535. const int64_t ne22 = src2->ne[2]; GGML_UNUSED(ne22);
  1536. const int64_t ne23 = src2->ne[3]; GGML_UNUSED(ne23);
  1537. const uint64_t nb20 = src2->nb[0]; GGML_UNUSED(nb20);
  1538. const uint64_t nb21 = src2->nb[1];
  1539. const uint64_t nb22 = src2->nb[2]; GGML_UNUSED(nb22);
  1540. const uint64_t nb23 = src2->nb[3]; GGML_UNUSED(nb23);
  1541. const enum ggml_type src2t = src2->type; GGML_UNUSED(src2t);
  1542. GGML_ASSERT(src2t == GGML_TYPE_I32);
  1543. GGML_ASSERT(!ggml_is_transposed(src0));
  1544. GGML_ASSERT(!ggml_is_transposed(src1));
  1545. GGML_ASSERT(src1t == GGML_TYPE_F32);
  1546. // find the break-even point where the matrix-matrix kernel becomes more efficient compared
  1547. // to the matrix-vector kernel
  1548. // ne20 = n_used_experts
  1549. // ne21 = n_rows
  1550. const int dst_rows = ne20*ne21;
  1551. const int dst_rows_min = n_as;
  1552. // max size of the rowids array in the kernel shared buffer
  1553. GGML_ASSERT(dst_rows <= 2048);
  1554. // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
  1555. // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
  1556. // !!!
  1557. // TODO: for now, always use mat-vec kernels until we figure out how to improve the
  1558. // indirect matrix multiplication
  1559. // !!!
  1560. if ([ctx->device supportsFamily:MTLGPUFamilyApple7] &&
  1561. ne00 % 32 == 0 && ne00 >= 64 &&
  1562. dst_rows > dst_rows_min) {
  1563. // some Metal matrix data types require aligned pointers
  1564. // ref: https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf (Table 2.5)
  1565. switch (src0->type) {
  1566. case GGML_TYPE_F32: GGML_ASSERT(nb01 % 16 == 0); break;
  1567. case GGML_TYPE_F16: GGML_ASSERT(nb01 % 8 == 0); break;
  1568. default: break;
  1569. }
  1570. id<MTLComputePipelineState> pipeline = nil;
  1571. switch (src0->type) {
  1572. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F32 ].pipeline; break;
  1573. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F16_F32 ].pipeline; break;
  1574. case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_0_F32 ].pipeline; break;
  1575. case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_1_F32 ].pipeline; break;
  1576. case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F32 ].pipeline; break;
  1577. case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F32 ].pipeline; break;
  1578. case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F32 ].pipeline; break;
  1579. case GGML_TYPE_Q2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F32 ].pipeline; break;
  1580. case GGML_TYPE_Q3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F32 ].pipeline; break;
  1581. case GGML_TYPE_Q4_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F32 ].pipeline; break;
  1582. case GGML_TYPE_Q5_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_K_F32 ].pipeline; break;
  1583. case GGML_TYPE_Q6_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q6_K_F32 ].pipeline; break;
  1584. case GGML_TYPE_IQ2_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XXS_F32].pipeline; break;
  1585. case GGML_TYPE_IQ2_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XS_F32 ].pipeline; break;
  1586. case GGML_TYPE_IQ3_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_XXS_F32].pipeline; break;
  1587. case GGML_TYPE_IQ3_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_S_F32 ].pipeline; break;
  1588. case GGML_TYPE_IQ2_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_S_F32 ].pipeline; break;
  1589. case GGML_TYPE_IQ1_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_S_F32 ].pipeline; break;
  1590. case GGML_TYPE_IQ1_M: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F32 ].pipeline; break;
  1591. case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32 ].pipeline; break;
  1592. case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32 ].pipeline; break;
  1593. default: GGML_ASSERT(false && "MUL_MAT_ID not implemented");
  1594. }
  1595. [encoder setComputePipelineState:pipeline];
  1596. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1597. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1598. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1599. [encoder setBuffer:id_src2 offset:offs_src2 atIndex:3];
  1600. [encoder setBytes:&ne20 length:sizeof(ne20) atIndex:4];
  1601. [encoder setBytes:&ne21 length:sizeof(ne21) atIndex:5];
  1602. [encoder setBytes:&nb21 length:sizeof(nb21) atIndex:6];
  1603. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:7];
  1604. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:8];
  1605. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:9];
  1606. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:10];
  1607. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:11];
  1608. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:12];
  1609. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:13];
  1610. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:14];
  1611. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:15];
  1612. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:16];
  1613. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:17];
  1614. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:18];
  1615. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:19];
  1616. [encoder setThreadgroupMemoryLength:GGML_PAD(8192 + dst_rows*4/*sizeof(ushort2)*/, 16) atIndex:0];
  1617. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 31)/32, (ne01 + 63)/64, n_as) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
  1618. } else {
  1619. int nth0 = 32;
  1620. int nth1 = 1;
  1621. int nrows = 1;
  1622. //printf("vector: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12);
  1623. id<MTLComputePipelineState> pipeline = nil;
  1624. // use custom matrix x vector kernel
  1625. switch (src0t) {
  1626. case GGML_TYPE_F32:
  1627. {
  1628. GGML_ASSERT(src1t == GGML_TYPE_F32);
  1629. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F32_F32].pipeline;
  1630. } break;
  1631. case GGML_TYPE_F16:
  1632. {
  1633. GGML_ASSERT(src1t == GGML_TYPE_F32);
  1634. nth0 = 32;
  1635. nth1 = 1;
  1636. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32].pipeline;
  1637. } break;
  1638. case GGML_TYPE_Q4_0:
  1639. {
  1640. nth0 = 8;
  1641. nth1 = 8;
  1642. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_0_F32].pipeline;
  1643. } break;
  1644. case GGML_TYPE_Q4_1:
  1645. {
  1646. nth0 = 8;
  1647. nth1 = 8;
  1648. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_1_F32].pipeline;
  1649. } break;
  1650. case GGML_TYPE_Q5_0:
  1651. {
  1652. nth0 = 8;
  1653. nth1 = 8;
  1654. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32].pipeline;
  1655. } break;
  1656. case GGML_TYPE_Q5_1:
  1657. {
  1658. nth0 = 8;
  1659. nth1 = 8;
  1660. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32].pipeline;
  1661. } break;
  1662. case GGML_TYPE_Q8_0:
  1663. {
  1664. nth0 = 8;
  1665. nth1 = 8;
  1666. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32].pipeline;
  1667. } break;
  1668. case GGML_TYPE_Q2_K:
  1669. {
  1670. nth0 = 2;
  1671. nth1 = 32;
  1672. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32].pipeline;
  1673. } break;
  1674. case GGML_TYPE_Q3_K:
  1675. {
  1676. nth0 = 2;
  1677. nth1 = 32;
  1678. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32].pipeline;
  1679. } break;
  1680. case GGML_TYPE_Q4_K:
  1681. {
  1682. nth0 = 4; //1;
  1683. nth1 = 8; //32;
  1684. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32].pipeline;
  1685. } break;
  1686. case GGML_TYPE_Q5_K:
  1687. {
  1688. nth0 = 2;
  1689. nth1 = 32;
  1690. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_K_F32].pipeline;
  1691. } break;
  1692. case GGML_TYPE_Q6_K:
  1693. {
  1694. nth0 = 2;
  1695. nth1 = 32;
  1696. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q6_K_F32].pipeline;
  1697. } break;
  1698. case GGML_TYPE_IQ2_XXS:
  1699. {
  1700. nth0 = 4;
  1701. nth1 = 16;
  1702. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XXS_F32].pipeline;
  1703. } break;
  1704. case GGML_TYPE_IQ2_XS:
  1705. {
  1706. nth0 = 4;
  1707. nth1 = 16;
  1708. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XS_F32].pipeline;
  1709. } break;
  1710. case GGML_TYPE_IQ3_XXS:
  1711. {
  1712. nth0 = 4;
  1713. nth1 = 16;
  1714. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_XXS_F32].pipeline;
  1715. } break;
  1716. case GGML_TYPE_IQ3_S:
  1717. {
  1718. nth0 = 4;
  1719. nth1 = 16;
  1720. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_S_F32].pipeline;
  1721. } break;
  1722. case GGML_TYPE_IQ2_S:
  1723. {
  1724. nth0 = 4;
  1725. nth1 = 16;
  1726. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_S_F32].pipeline;
  1727. } break;
  1728. case GGML_TYPE_IQ1_S:
  1729. {
  1730. nth0 = 4;
  1731. nth1 = 16;
  1732. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_S_F32].pipeline;
  1733. } break;
  1734. case GGML_TYPE_IQ1_M:
  1735. {
  1736. nth0 = 4;
  1737. nth1 = 16;
  1738. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_M_F32].pipeline;
  1739. } break;
  1740. case GGML_TYPE_IQ4_NL:
  1741. {
  1742. nth0 = 4;
  1743. nth1 = 16;
  1744. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32].pipeline;
  1745. } break;
  1746. case GGML_TYPE_IQ4_XS:
  1747. {
  1748. nth0 = 4;
  1749. nth1 = 16;
  1750. #if QK_K == 64
  1751. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32].pipeline;
  1752. #else
  1753. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32].pipeline;
  1754. #endif
  1755. } break;
  1756. default:
  1757. {
  1758. GGML_METAL_LOG_ERROR("Asserting on type %d\n", (int)src2t);
  1759. GGML_ASSERT(false && "not implemented");
  1760. }
  1761. };
  1762. if (ggml_is_quantized(src0t)) {
  1763. GGML_ASSERT(ne00 >= nth0*nth1);
  1764. }
  1765. [encoder setComputePipelineState:pipeline];
  1766. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1767. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1768. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1769. [encoder setBuffer:id_src2 offset:offs_src2 atIndex:3];
  1770. [encoder setBytes:&ne20 length:sizeof(ne20) atIndex:4];
  1771. [encoder setBytes:&ne21 length:sizeof(ne21) atIndex:5];
  1772. [encoder setBytes:&nb21 length:sizeof(nb21) atIndex:6];
  1773. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:7];
  1774. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:8];
  1775. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:9];
  1776. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:10];
  1777. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:11];
  1778. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:12];
  1779. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:13];
  1780. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:14];
  1781. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:15];
  1782. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:16];
  1783. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:17];
  1784. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:18];
  1785. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:19];
  1786. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:20];
  1787. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:21];
  1788. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:22];
  1789. const int64_t _ne1 = 1;
  1790. const int tgz = dst_rows;
  1791. if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 || src0t == GGML_TYPE_Q5_0 ||
  1792. src0t == GGML_TYPE_Q5_1 || src0t == GGML_TYPE_Q8_0 || src0t == GGML_TYPE_Q2_K ||
  1793. src0t == GGML_TYPE_IQ1_S || src0t == GGML_TYPE_IQ1_M || src0t == GGML_TYPE_IQ2_S) {
  1794. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, _ne1, tgz) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1795. }
  1796. else if (src0t == GGML_TYPE_IQ2_XXS || src0t == GGML_TYPE_IQ2_XS) {
  1797. const int mem_size = src0t == GGML_TYPE_IQ2_XXS ? 256*8+128 : 512*8+128;
  1798. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1799. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, _ne1, tgz) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1800. }
  1801. else if (src0t == GGML_TYPE_IQ3_XXS || src0t == GGML_TYPE_IQ3_S) {
  1802. const int mem_size = src0t == GGML_TYPE_IQ3_XXS ? 256*4+128 : 512*4;
  1803. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1804. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, _ne1, tgz) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1805. }
  1806. else if (src0t == GGML_TYPE_IQ4_NL || src0t == GGML_TYPE_IQ4_XS) {
  1807. const int mem_size = 32*sizeof(float);
  1808. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1809. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, _ne1, tgz) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1810. }
  1811. else if (src0t == GGML_TYPE_Q4_K) {
  1812. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, _ne1, tgz) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1813. }
  1814. else if (src0t == GGML_TYPE_Q3_K) {
  1815. #ifdef GGML_QKK_64
  1816. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 1)/2, _ne1, tgz) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1817. #else
  1818. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, _ne1, tgz) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1819. #endif
  1820. }
  1821. else if (src0t == GGML_TYPE_Q5_K) {
  1822. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, _ne1, tgz) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1823. }
  1824. else if (src0t == GGML_TYPE_Q6_K) {
  1825. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 1)/2, _ne1, tgz) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1826. } else {
  1827. const int64_t ny = (_ne1 + nrows - 1)/nrows; // = _ne1
  1828. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ny, tgz) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1829. }
  1830. }
  1831. } break;
  1832. case GGML_OP_GET_ROWS:
  1833. {
  1834. id<MTLComputePipelineState> pipeline = nil;
  1835. switch (src0->type) {
  1836. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_F32 ].pipeline; break;
  1837. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_F16 ].pipeline; break;
  1838. case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_0 ].pipeline; break;
  1839. case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_1 ].pipeline; break;
  1840. case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0 ].pipeline; break;
  1841. case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1 ].pipeline; break;
  1842. case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0 ].pipeline; break;
  1843. case GGML_TYPE_Q2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K ].pipeline; break;
  1844. case GGML_TYPE_Q3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K ].pipeline; break;
  1845. case GGML_TYPE_Q4_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K ].pipeline; break;
  1846. case GGML_TYPE_Q5_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_K ].pipeline; break;
  1847. case GGML_TYPE_Q6_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q6_K ].pipeline; break;
  1848. case GGML_TYPE_IQ2_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XXS].pipeline; break;
  1849. case GGML_TYPE_IQ2_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XS ].pipeline; break;
  1850. case GGML_TYPE_IQ3_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_XXS].pipeline; break;
  1851. case GGML_TYPE_IQ3_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_S ].pipeline; break;
  1852. case GGML_TYPE_IQ2_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_S ].pipeline; break;
  1853. case GGML_TYPE_IQ1_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_S ].pipeline; break;
  1854. case GGML_TYPE_IQ1_M: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_M ].pipeline; break;
  1855. case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL ].pipeline; break;
  1856. case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS ].pipeline; break;
  1857. case GGML_TYPE_I32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_I32 ].pipeline; break;
  1858. default: GGML_ASSERT(false && "not implemented");
  1859. }
  1860. [encoder setComputePipelineState:pipeline];
  1861. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1862. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1863. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1864. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:3];
  1865. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:4];
  1866. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:5];
  1867. [encoder setBytes:&ne10 length:sizeof( int64_t) atIndex:6];
  1868. [encoder setBytes:&nb10 length:sizeof( int64_t) atIndex:7];
  1869. [encoder setBytes:&nb11 length:sizeof( int64_t) atIndex:8];
  1870. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:9];
  1871. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:10];
  1872. [encoder dispatchThreadgroups:MTLSizeMake(ne10, ne11, 1) threadsPerThreadgroup:MTLSizeMake(32, 1, 1)];
  1873. } break;
  1874. case GGML_OP_RMS_NORM:
  1875. {
  1876. GGML_ASSERT(ne00 % 4 == 0);
  1877. float eps;
  1878. memcpy(&eps, dst->op_params, sizeof(float));
  1879. int nth = 32; // SIMD width
  1880. while (nth < ne00/4 && nth < 1024) {
  1881. nth *= 2;
  1882. }
  1883. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_RMS_NORM].pipeline;
  1884. [encoder setComputePipelineState:pipeline];
  1885. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1886. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1887. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  1888. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3];
  1889. [encoder setBytes:&eps length:sizeof( float) atIndex:4];
  1890. [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
  1891. const int64_t nrows = ggml_nrows(src0);
  1892. [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1893. } break;
  1894. case GGML_OP_GROUP_NORM:
  1895. {
  1896. GGML_ASSERT(ne00 % 4 == 0);
  1897. //float eps;
  1898. //memcpy(&eps, dst->op_params, sizeof(float));
  1899. const float eps = 1e-6f; // TODO: temporarily hardcoded
  1900. const int32_t n_groups = ((int32_t *) dst->op_params)[0];
  1901. int nth = 32; // SIMD width
  1902. //while (nth < ne00/4 && nth < 1024) {
  1903. // nth *= 2;
  1904. //}
  1905. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GROUP_NORM].pipeline;
  1906. [encoder setComputePipelineState:pipeline];
  1907. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1908. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1909. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  1910. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  1911. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  1912. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:5];
  1913. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:6];
  1914. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:7];
  1915. [encoder setBytes:&n_groups length:sizeof( int32_t) atIndex:8];
  1916. [encoder setBytes:&eps length:sizeof( float) atIndex:9];
  1917. [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
  1918. [encoder dispatchThreadgroups:MTLSizeMake(n_groups, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1919. } break;
  1920. case GGML_OP_NORM:
  1921. {
  1922. float eps;
  1923. memcpy(&eps, dst->op_params, sizeof(float));
  1924. const int nth = MIN(256, ne00);
  1925. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_NORM].pipeline;
  1926. [encoder setComputePipelineState:pipeline];
  1927. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1928. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1929. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  1930. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3];
  1931. [encoder setBytes:&eps length:sizeof( float) atIndex:4];
  1932. [encoder setThreadgroupMemoryLength:GGML_PAD(nth*sizeof(float), 16) atIndex:0];
  1933. const int64_t nrows = ggml_nrows(src0);
  1934. [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1935. } break;
  1936. case GGML_OP_ALIBI:
  1937. {
  1938. GGML_ASSERT((src0t == GGML_TYPE_F32));
  1939. const int nth = MIN(1024, ne00);
  1940. //const int n_past = ((int32_t *) dst->op_params)[0];
  1941. const int n_head = ((int32_t *) dst->op_params)[1];
  1942. float max_bias;
  1943. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  1944. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  1945. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  1946. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  1947. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ALIBI_F32].pipeline;
  1948. [encoder setComputePipelineState:pipeline];
  1949. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1950. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1951. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  1952. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  1953. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  1954. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
  1955. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
  1956. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
  1957. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
  1958. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
  1959. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
  1960. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
  1961. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
  1962. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
  1963. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
  1964. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
  1965. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
  1966. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
  1967. [encoder setBytes:&m0 length:sizeof( float) atIndex:18];
  1968. [encoder setBytes:&m1 length:sizeof( float) atIndex:19];
  1969. [encoder setBytes:&n_heads_log2_floor length:sizeof(int) atIndex:20];
  1970. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1971. } break;
  1972. case GGML_OP_ROPE:
  1973. {
  1974. GGML_ASSERT(ne10 == ne02);
  1975. const int nth = MIN(1024, ne00);
  1976. const int n_past = ((int32_t *) dst->op_params)[0];
  1977. const int n_dims = ((int32_t *) dst->op_params)[1];
  1978. const int mode = ((int32_t *) dst->op_params)[2];
  1979. // skip 3, n_ctx, used in GLM RoPE, unimplemented in metal
  1980. const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
  1981. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
  1982. memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
  1983. memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
  1984. memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
  1985. memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
  1986. memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
  1987. memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
  1988. id<MTLComputePipelineState> pipeline = nil;
  1989. switch (src0->type) {
  1990. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_F32].pipeline; break;
  1991. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_F16].pipeline; break;
  1992. default: GGML_ASSERT(false);
  1993. };
  1994. [encoder setComputePipelineState:pipeline];
  1995. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1996. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1997. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1998. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:3];
  1999. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:4];
  2000. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:5];
  2001. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:6];
  2002. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:7];
  2003. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:8];
  2004. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:9];
  2005. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:10];
  2006. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:11];
  2007. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:12];
  2008. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:13];
  2009. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:14];
  2010. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:15];
  2011. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:16];
  2012. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:17];
  2013. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:18];
  2014. [encoder setBytes:&n_past length:sizeof( int) atIndex:19];
  2015. [encoder setBytes:&n_dims length:sizeof( int) atIndex:20];
  2016. [encoder setBytes:&mode length:sizeof( int) atIndex:21];
  2017. [encoder setBytes:&n_orig_ctx length:sizeof( int) atIndex:22];
  2018. [encoder setBytes:&freq_base length:sizeof( float) atIndex:23];
  2019. [encoder setBytes:&freq_scale length:sizeof( float) atIndex:24];
  2020. [encoder setBytes:&ext_factor length:sizeof( float) atIndex:25];
  2021. [encoder setBytes:&attn_factor length:sizeof( float) atIndex:26];
  2022. [encoder setBytes:&beta_fast length:sizeof( float) atIndex:27];
  2023. [encoder setBytes:&beta_slow length:sizeof( float) atIndex:28];
  2024. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  2025. } break;
  2026. case GGML_OP_IM2COL:
  2027. {
  2028. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  2029. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  2030. GGML_ASSERT( dst->type == GGML_TYPE_F16 || dst->type == GGML_TYPE_F32);
  2031. const int32_t s0 = ((const int32_t *)(dst->op_params))[0];
  2032. const int32_t s1 = ((const int32_t *)(dst->op_params))[1];
  2033. const int32_t p0 = ((const int32_t *)(dst->op_params))[2];
  2034. const int32_t p1 = ((const int32_t *)(dst->op_params))[3];
  2035. const int32_t d0 = ((const int32_t *)(dst->op_params))[4];
  2036. const int32_t d1 = ((const int32_t *)(dst->op_params))[5];
  2037. const bool is_2D = ((const int32_t *)(dst->op_params))[6] == 1;
  2038. const int32_t N = src1->ne[is_2D ? 3 : 2];
  2039. const int32_t IC = src1->ne[is_2D ? 2 : 1];
  2040. const int32_t IH = is_2D ? src1->ne[1] : 1;
  2041. const int32_t IW = src1->ne[0];
  2042. const int32_t KH = is_2D ? src0->ne[1] : 1;
  2043. const int32_t KW = src0->ne[0];
  2044. const int32_t OH = is_2D ? dst->ne[2] : 1;
  2045. const int32_t OW = dst->ne[1];
  2046. const int32_t CHW = IC * KH * KW;
  2047. const int32_t ofs0 = src1->nb[is_2D ? 3 : 2] / 4;
  2048. const int32_t ofs1 = src1->nb[is_2D ? 2 : 1] / 4;
  2049. id<MTLComputePipelineState> pipeline = nil;
  2050. switch (dst->type) {
  2051. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_IM2COL_F32].pipeline; break;
  2052. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_IM2COL_F16].pipeline; break;
  2053. default: GGML_ASSERT(false);
  2054. };
  2055. [encoder setComputePipelineState:pipeline];
  2056. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:0];
  2057. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  2058. [encoder setBytes:&ofs0 length:sizeof( int32_t) atIndex:2];
  2059. [encoder setBytes:&ofs1 length:sizeof( int32_t) atIndex:3];
  2060. [encoder setBytes:&IW length:sizeof( int32_t) atIndex:4];
  2061. [encoder setBytes:&IH length:sizeof( int32_t) atIndex:5];
  2062. [encoder setBytes:&CHW length:sizeof( int32_t) atIndex:6];
  2063. [encoder setBytes:&s0 length:sizeof( int32_t) atIndex:7];
  2064. [encoder setBytes:&s1 length:sizeof( int32_t) atIndex:8];
  2065. [encoder setBytes:&p0 length:sizeof( int32_t) atIndex:9];
  2066. [encoder setBytes:&p1 length:sizeof( int32_t) atIndex:10];
  2067. [encoder setBytes:&d0 length:sizeof( int32_t) atIndex:11];
  2068. [encoder setBytes:&d1 length:sizeof( int32_t) atIndex:12];
  2069. [encoder dispatchThreadgroups:MTLSizeMake(IC, OH, OW) threadsPerThreadgroup:MTLSizeMake(N, KH, KW)];
  2070. } break;
  2071. case GGML_OP_UPSCALE:
  2072. {
  2073. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  2074. const int sf = dst->op_params[0];
  2075. const id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_UPSCALE_F32].pipeline;
  2076. [encoder setComputePipelineState:pipeline];
  2077. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  2078. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  2079. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
  2080. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
  2081. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
  2082. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:5];
  2083. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6];
  2084. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7];
  2085. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8];
  2086. [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:9];
  2087. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:10];
  2088. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:11];
  2089. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:12];
  2090. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:13];
  2091. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:14];
  2092. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:15];
  2093. [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:16];
  2094. [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:17];
  2095. [encoder setBytes:&sf length:sizeof(sf) atIndex:18];
  2096. const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne0);
  2097. [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  2098. } break;
  2099. case GGML_OP_PAD:
  2100. {
  2101. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  2102. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_PAD_F32].pipeline;
  2103. [encoder setComputePipelineState:pipeline];
  2104. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  2105. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  2106. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
  2107. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
  2108. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
  2109. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:5];
  2110. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6];
  2111. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7];
  2112. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8];
  2113. [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:9];
  2114. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:10];
  2115. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:11];
  2116. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:12];
  2117. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:13];
  2118. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:14];
  2119. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:15];
  2120. [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:16];
  2121. [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:17];
  2122. const int nth = MIN(1024, ne0);
  2123. [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  2124. } break;
  2125. case GGML_OP_ARANGE:
  2126. {
  2127. GGML_ASSERT(dst->type == GGML_TYPE_F32);
  2128. float start;
  2129. float step;
  2130. memcpy(&start, ((int32_t *) dst->op_params) + 0, sizeof(float));
  2131. memcpy(&step, ((int32_t *) dst->op_params) + 2, sizeof(float));
  2132. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ARANGE_F32].pipeline;
  2133. [encoder setComputePipelineState:pipeline];
  2134. [encoder setBuffer:id_dst offset:offs_dst atIndex:0];
  2135. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:1];
  2136. [encoder setBytes:&start length:sizeof(start) atIndex:2];
  2137. [encoder setBytes:&step length:sizeof(step) atIndex:3];
  2138. const int nth = MIN(1024, ne0);
  2139. [encoder dispatchThreadgroups:MTLSizeMake(1, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  2140. } break;
  2141. case GGML_OP_TIMESTEP_EMBEDDING:
  2142. {
  2143. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  2144. const int dim = dst->op_params[0];
  2145. const int max_period = dst->op_params[1];
  2146. const int half = dim / 2;
  2147. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_TIMESTEP_EMBEDDING_F32].pipeline;
  2148. [encoder setComputePipelineState:pipeline];
  2149. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  2150. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  2151. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:2];
  2152. [encoder setBytes:&dim length:sizeof(dim) atIndex:3];
  2153. [encoder setBytes:&max_period length:sizeof(max_period) atIndex:4];
  2154. const int nth = MIN(1024, half);
  2155. [encoder dispatchThreadgroups:MTLSizeMake(ne00, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  2156. } break;
  2157. case GGML_OP_ARGSORT:
  2158. {
  2159. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  2160. GGML_ASSERT( dst->type == GGML_TYPE_I32);
  2161. const int nrows = ggml_nrows(src0);
  2162. enum ggml_sort_order order = (enum ggml_sort_order) dst->op_params[0];
  2163. // bitonic sort requires the number of elements to be power of 2
  2164. int64_t ne00_padded = 1;
  2165. while (ne00_padded < ne00) {
  2166. ne00_padded *= 2;
  2167. }
  2168. // Metal kernels require the buffer size to be multiple of 16 bytes
  2169. // https://developer.apple.com/documentation/metal/mtlcomputecommandencoder/1443142-setthreadgroupmemorylength
  2170. const int mem_size = GGML_PAD(ne00_padded*sizeof(int32_t), 16);
  2171. id<MTLComputePipelineState> pipeline = nil;
  2172. switch (order) {
  2173. case GGML_SORT_ORDER_ASC: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC].pipeline; break;
  2174. case GGML_SORT_ORDER_DESC: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC].pipeline; break;
  2175. default: GGML_ASSERT(false);
  2176. };
  2177. [encoder setComputePipelineState:pipeline];
  2178. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  2179. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  2180. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  2181. [encoder setBytes:&ne00_padded length:sizeof( int64_t) atIndex:3];
  2182. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  2183. [encoder dispatchThreadgroups:MTLSizeMake(1, nrows, 1) threadsPerThreadgroup:MTLSizeMake(ne00_padded, 1, 1)];
  2184. } break;
  2185. case GGML_OP_LEAKY_RELU:
  2186. {
  2187. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  2188. float slope;
  2189. memcpy(&slope, dst->op_params, sizeof(float));
  2190. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32].pipeline;
  2191. [encoder setComputePipelineState:pipeline];
  2192. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  2193. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  2194. [encoder setBytes:&slope length:sizeof(slope) atIndex:2];
  2195. const int64_t n = ggml_nelements(dst);
  2196. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  2197. } break;
  2198. case GGML_OP_DUP:
  2199. case GGML_OP_CPY:
  2200. case GGML_OP_CONT:
  2201. {
  2202. GGML_ASSERT(ne00 % ggml_blck_size(src0->type) == 0);
  2203. int nth = MIN(1024, ne00/ggml_blck_size(src0->type));
  2204. id<MTLComputePipelineState> pipeline = nil;
  2205. switch (src0t) {
  2206. case GGML_TYPE_F32:
  2207. {
  2208. GGML_ASSERT(ne0 % ggml_blck_size(dst->type) == 0);
  2209. switch (dstt) {
  2210. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_F16].pipeline; break;
  2211. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_F32].pipeline; break;
  2212. case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0].pipeline; break;
  2213. case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_0].pipeline; break;
  2214. case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_1].pipeline; break;
  2215. case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_0].pipeline; break;
  2216. case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_1].pipeline; break;
  2217. case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_IQ4_NL].pipeline; break;
  2218. default: GGML_ASSERT(false && "not implemented");
  2219. };
  2220. } break;
  2221. case GGML_TYPE_F16:
  2222. {
  2223. switch (dstt) {
  2224. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F16_F16].pipeline; break;
  2225. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F16_F32].pipeline; break;
  2226. default: GGML_ASSERT(false && "not implemented");
  2227. };
  2228. } break;
  2229. default: GGML_ASSERT(false && "not implemented");
  2230. }
  2231. [encoder setComputePipelineState:pipeline];
  2232. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  2233. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  2234. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  2235. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  2236. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  2237. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
  2238. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
  2239. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
  2240. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
  2241. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
  2242. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
  2243. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
  2244. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
  2245. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
  2246. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
  2247. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
  2248. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
  2249. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
  2250. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  2251. } break;
  2252. default:
  2253. {
  2254. GGML_METAL_LOG_ERROR("%s: error: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
  2255. GGML_ASSERT(false);
  2256. }
  2257. }
  2258. if (should_capture) {
  2259. [encoder popDebugGroup];
  2260. }
  2261. }
  2262. [encoder endEncoding];
  2263. [command_buffer commit];
  2264. });
  2265. // Wait for completion and check status of each command buffer
  2266. // needed to detect if the device ran out-of-memory for example (#1881)
  2267. for (int i = 0; i < n_cb; ++i) {
  2268. id<MTLCommandBuffer> command_buffer = command_buffers[i];
  2269. [command_buffer waitUntilCompleted];
  2270. MTLCommandBufferStatus status = [command_buffer status];
  2271. if (status != MTLCommandBufferStatusCompleted) {
  2272. GGML_METAL_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, i, status);
  2273. return GGML_STATUS_FAILED;
  2274. }
  2275. }
  2276. if (should_capture) {
  2277. [[MTLCaptureManager sharedCaptureManager] stopCapture];
  2278. }
  2279. }
  2280. return GGML_STATUS_SUCCESS;
  2281. }
  2282. ////////////////////////////////////////////////////////////////////////////////
  2283. // backend interface
  2284. // default buffer
  2285. static id<MTLDevice> g_backend_device = nil;
  2286. static int g_backend_device_ref_count = 0;
  2287. static id<MTLDevice> ggml_backend_metal_get_device(void) {
  2288. if (g_backend_device == nil) {
  2289. g_backend_device = MTLCreateSystemDefaultDevice();
  2290. }
  2291. g_backend_device_ref_count++;
  2292. return g_backend_device;
  2293. }
  2294. static void ggml_backend_metal_free_device(void) {
  2295. assert(g_backend_device_ref_count > 0);
  2296. g_backend_device_ref_count--;
  2297. if (g_backend_device_ref_count == 0) {
  2298. [g_backend_device release];
  2299. g_backend_device = nil;
  2300. }
  2301. }
  2302. GGML_CALL static const char * ggml_backend_metal_buffer_get_name(ggml_backend_buffer_t buffer) {
  2303. return "Metal";
  2304. UNUSED(buffer);
  2305. }
  2306. GGML_CALL static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  2307. struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
  2308. for (int i = 0; i < ctx->n_buffers; i++) {
  2309. [ctx->buffers[i].metal release];
  2310. }
  2311. ggml_backend_metal_free_device();
  2312. if (ctx->owned) {
  2313. free(ctx->all_data);
  2314. }
  2315. free(ctx);
  2316. }
  2317. GGML_CALL static void * ggml_backend_metal_buffer_get_base(ggml_backend_buffer_t buffer) {
  2318. struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
  2319. return ctx->all_data;
  2320. }
  2321. GGML_CALL static void ggml_backend_metal_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  2322. memcpy((char *)tensor->data + offset, data, size);
  2323. UNUSED(buffer);
  2324. }
  2325. GGML_CALL static void ggml_backend_metal_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  2326. memcpy(data, (const char *)tensor->data + offset, size);
  2327. UNUSED(buffer);
  2328. }
  2329. GGML_CALL static bool ggml_backend_metal_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
  2330. if (ggml_backend_buffer_is_host(src->buffer)) {
  2331. memcpy(dst->data, src->data, ggml_nbytes(src));
  2332. return true;
  2333. }
  2334. return false;
  2335. UNUSED(buffer);
  2336. }
  2337. GGML_CALL static void ggml_backend_metal_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  2338. struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
  2339. memset(ctx->all_data, value, ctx->all_size);
  2340. }
  2341. static struct ggml_backend_buffer_i ggml_backend_metal_buffer_i = {
  2342. /* .get_name = */ ggml_backend_metal_buffer_get_name,
  2343. /* .free_buffer = */ ggml_backend_metal_buffer_free_buffer,
  2344. /* .get_base = */ ggml_backend_metal_buffer_get_base,
  2345. /* .init_tensor = */ NULL,
  2346. /* .set_tensor = */ ggml_backend_metal_buffer_set_tensor,
  2347. /* .get_tensor = */ ggml_backend_metal_buffer_get_tensor,
  2348. /* .cpy_tensor = */ ggml_backend_metal_buffer_cpy_tensor,
  2349. /* .clear = */ ggml_backend_metal_buffer_clear,
  2350. /* .reset = */ NULL,
  2351. };
  2352. // default buffer type
  2353. GGML_CALL static const char * ggml_backend_metal_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
  2354. return "Metal";
  2355. UNUSED(buft);
  2356. }
  2357. static void ggml_backend_metal_log_allocated_size(id<MTLDevice> device) {
  2358. #if TARGET_OS_OSX || (TARGET_OS_IOS && __clang_major__ >= 15)
  2359. if (@available(macOS 10.12, iOS 16.0, *)) {
  2360. GGML_METAL_LOG_INFO(", (%8.2f / %8.2f)",
  2361. device.currentAllocatedSize / 1024.0 / 1024.0,
  2362. device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
  2363. if (device.currentAllocatedSize > device.recommendedMaxWorkingSetSize) {
  2364. GGML_METAL_LOG_WARN("%s: warning: current allocated size is greater than the recommended max working set size\n", __func__);
  2365. } else {
  2366. GGML_METAL_LOG_INFO("\n");
  2367. }
  2368. } else {
  2369. GGML_METAL_LOG_INFO(", (%8.2f)\n", device.currentAllocatedSize / 1024.0 / 1024.0);
  2370. }
  2371. #endif
  2372. UNUSED(device);
  2373. }
  2374. GGML_CALL static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  2375. struct ggml_backend_metal_buffer_context * ctx = malloc(sizeof(struct ggml_backend_metal_buffer_context));
  2376. const size_t size_page = sysconf(_SC_PAGESIZE);
  2377. size_t size_aligned = size;
  2378. if ((size_aligned % size_page) != 0) {
  2379. size_aligned += (size_page - (size_aligned % size_page));
  2380. }
  2381. id<MTLDevice> device = ggml_backend_metal_get_device();
  2382. ctx->all_data = ggml_metal_host_malloc(size_aligned);
  2383. ctx->all_size = size_aligned;
  2384. ctx->owned = true;
  2385. ctx->n_buffers = 1;
  2386. ctx->buffers[0].data = ctx->all_data;
  2387. ctx->buffers[0].size = size;
  2388. ctx->buffers[0].metal = [device newBufferWithBytesNoCopy:ctx->all_data
  2389. length:size_aligned
  2390. options:MTLResourceStorageModeShared
  2391. deallocator:nil];
  2392. if (ctx->buffers[0].metal == nil) {
  2393. GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0);
  2394. free(ctx);
  2395. ggml_backend_metal_free_device();
  2396. return NULL;
  2397. }
  2398. GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB", __func__, size_aligned / 1024.0 / 1024.0);
  2399. ggml_backend_metal_log_allocated_size(device);
  2400. return ggml_backend_buffer_init(buft, ggml_backend_metal_buffer_i, ctx, size);
  2401. }
  2402. GGML_CALL static size_t ggml_backend_metal_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  2403. return 32;
  2404. UNUSED(buft);
  2405. }
  2406. GGML_CALL static size_t ggml_backend_metal_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) {
  2407. id<MTLDevice> device = ggml_backend_metal_get_device();
  2408. size_t max_size = device.maxBufferLength;
  2409. ggml_backend_metal_free_device();
  2410. return max_size;
  2411. UNUSED(buft);
  2412. }
  2413. GGML_CALL static bool ggml_backend_metal_buffer_type_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) {
  2414. return ggml_backend_is_metal(backend) || ggml_backend_is_cpu(backend);
  2415. UNUSED(buft);
  2416. }
  2417. GGML_CALL static bool ggml_backend_metal_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
  2418. return true;
  2419. UNUSED(buft);
  2420. }
  2421. GGML_CALL ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void) {
  2422. static struct ggml_backend_buffer_type ggml_backend_buffer_type_metal = {
  2423. /* .iface = */ {
  2424. /* .get_name = */ ggml_backend_metal_buffer_type_get_name,
  2425. /* .alloc_buffer = */ ggml_backend_metal_buffer_type_alloc_buffer,
  2426. /* .get_alignment = */ ggml_backend_metal_buffer_type_get_alignment,
  2427. /* .get_max_size = */ ggml_backend_metal_buffer_type_get_max_size,
  2428. /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
  2429. /* .supports_backend = */ ggml_backend_metal_buffer_type_supports_backend,
  2430. /* .is_host = */ ggml_backend_metal_buffer_type_is_host,
  2431. },
  2432. /* .context = */ NULL,
  2433. };
  2434. return &ggml_backend_buffer_type_metal;
  2435. }
  2436. // buffer from ptr
  2437. GGML_CALL ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size) {
  2438. struct ggml_backend_metal_buffer_context * ctx = malloc(sizeof(struct ggml_backend_metal_buffer_context));
  2439. ctx->all_data = data;
  2440. ctx->all_size = size;
  2441. ctx->owned = false;
  2442. ctx->n_buffers = 0;
  2443. const size_t size_page = sysconf(_SC_PAGESIZE);
  2444. // page-align the data ptr
  2445. {
  2446. const uintptr_t offs = (uintptr_t) data % size_page;
  2447. data = (void *) ((char *) data - offs);
  2448. size += offs;
  2449. }
  2450. size_t size_aligned = size;
  2451. if ((size_aligned % size_page) != 0) {
  2452. size_aligned += (size_page - (size_aligned % size_page));
  2453. }
  2454. id<MTLDevice> device = ggml_backend_metal_get_device();
  2455. // the buffer fits into the max buffer size allowed by the device
  2456. if (size_aligned <= device.maxBufferLength) {
  2457. ctx->buffers[ctx->n_buffers].data = data;
  2458. ctx->buffers[ctx->n_buffers].size = size;
  2459. ctx->buffers[ctx->n_buffers].metal = [device newBufferWithBytesNoCopy:data length:size_aligned options:MTLResourceStorageModeShared deallocator:nil];
  2460. if (ctx->buffers[ctx->n_buffers].metal == nil) {
  2461. GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0);
  2462. return false;
  2463. }
  2464. GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB", __func__, size_aligned / 1024.0 / 1024.0);
  2465. ++ctx->n_buffers;
  2466. } else {
  2467. // this overlap between the views will guarantee that the tensor with the maximum size will fully fit into
  2468. // one of the views
  2469. const size_t size_ovlp = ((max_size + size_page - 1) / size_page + 1) * size_page; // round-up 2 pages just in case
  2470. const size_t size_step = device.maxBufferLength - size_ovlp;
  2471. const size_t size_view = device.maxBufferLength;
  2472. for (size_t i = 0; i < size; i += size_step) {
  2473. const size_t size_step_aligned = (i + size_view <= size) ? size_view : (size_aligned - i);
  2474. ctx->buffers[ctx->n_buffers].data = (void *) ((uint8_t *) data + i);
  2475. ctx->buffers[ctx->n_buffers].size = size_step_aligned;
  2476. ctx->buffers[ctx->n_buffers].metal = [device newBufferWithBytesNoCopy:(void *) ((uint8_t *) data + i) length:size_step_aligned options:MTLResourceStorageModeShared deallocator:nil];
  2477. if (ctx->buffers[ctx->n_buffers].metal == nil) {
  2478. GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_step_aligned / 1024.0 / 1024.0);
  2479. return false;
  2480. }
  2481. GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB, offs = %12ld", __func__, size_step_aligned / 1024.0 / 1024.0, i);
  2482. if (i + size_step < size) {
  2483. GGML_METAL_LOG_INFO("\n");
  2484. }
  2485. ++ctx->n_buffers;
  2486. }
  2487. }
  2488. ggml_backend_metal_log_allocated_size(device);
  2489. return ggml_backend_buffer_init(ggml_backend_metal_buffer_type(), ggml_backend_metal_buffer_i, ctx, size);
  2490. }
  2491. // backend
  2492. GGML_CALL static const char * ggml_backend_metal_name(ggml_backend_t backend) {
  2493. return "Metal";
  2494. UNUSED(backend);
  2495. }
  2496. GGML_CALL static void ggml_backend_metal_free(ggml_backend_t backend) {
  2497. struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context;
  2498. ggml_metal_free(ctx);
  2499. free(backend);
  2500. }
  2501. GGML_CALL static ggml_backend_buffer_type_t ggml_backend_metal_get_default_buffer_type(ggml_backend_t backend) {
  2502. return ggml_backend_metal_buffer_type();
  2503. UNUSED(backend);
  2504. }
  2505. GGML_CALL static enum ggml_status ggml_backend_metal_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
  2506. struct ggml_metal_context * metal_ctx = (struct ggml_metal_context *)backend->context;
  2507. return ggml_metal_graph_compute(metal_ctx, cgraph);
  2508. }
  2509. GGML_CALL static bool ggml_backend_metal_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
  2510. struct ggml_metal_context * metal_ctx = (struct ggml_metal_context *)backend->context;
  2511. return ggml_metal_supports_op(metal_ctx, op);
  2512. }
  2513. static struct ggml_backend_i ggml_backend_metal_i = {
  2514. /* .get_name = */ ggml_backend_metal_name,
  2515. /* .free = */ ggml_backend_metal_free,
  2516. /* .get_default_buffer_type = */ ggml_backend_metal_get_default_buffer_type,
  2517. /* .set_tensor_async = */ NULL,
  2518. /* .get_tensor_async = */ NULL,
  2519. /* .cpy_tensor_async = */ NULL,
  2520. /* .synchronize = */ NULL,
  2521. /* .graph_plan_create = */ NULL,
  2522. /* .graph_plan_free = */ NULL,
  2523. /* .graph_plan_compute = */ NULL,
  2524. /* .graph_compute = */ ggml_backend_metal_graph_compute,
  2525. /* .supports_op = */ ggml_backend_metal_supports_op,
  2526. /* .offload_op = */ NULL,
  2527. /* .event_new = */ NULL,
  2528. /* .event_free = */ NULL,
  2529. /* .event_record = */ NULL,
  2530. /* .event_wait = */ NULL,
  2531. /* .event_synchronize = */ NULL,
  2532. };
  2533. void ggml_backend_metal_log_set_callback(ggml_log_callback log_callback, void * user_data) {
  2534. ggml_metal_log_callback = log_callback;
  2535. ggml_metal_log_user_data = user_data;
  2536. }
  2537. static ggml_guid_t ggml_backend_metal_guid(void) {
  2538. static ggml_guid guid = { 0x81, 0xa1, 0x8b, 0x1e, 0x71, 0xec, 0x79, 0xed, 0x2b, 0x85, 0xdc, 0x8a, 0x61, 0x98, 0x30, 0xe6 };
  2539. return &guid;
  2540. }
  2541. ggml_backend_t ggml_backend_metal_init(void) {
  2542. struct ggml_metal_context * ctx = ggml_metal_init(GGML_DEFAULT_N_THREADS);
  2543. if (ctx == NULL) {
  2544. return NULL;
  2545. }
  2546. ggml_backend_t metal_backend = malloc(sizeof(struct ggml_backend));
  2547. *metal_backend = (struct ggml_backend) {
  2548. /* .guid = */ ggml_backend_metal_guid(),
  2549. /* .interface = */ ggml_backend_metal_i,
  2550. /* .context = */ ctx,
  2551. };
  2552. return metal_backend;
  2553. }
  2554. bool ggml_backend_is_metal(ggml_backend_t backend) {
  2555. return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_metal_guid());
  2556. }
  2557. void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb) {
  2558. GGML_ASSERT(ggml_backend_is_metal(backend));
  2559. struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context;
  2560. ctx->n_cb = MIN(n_cb, GGML_METAL_MAX_BUFFERS);
  2561. }
  2562. bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family) {
  2563. GGML_ASSERT(ggml_backend_is_metal(backend));
  2564. struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context;
  2565. return [ctx->device supportsFamily:(MTLGPUFamilyApple1 + family - 1)];
  2566. }
  2567. void ggml_backend_metal_capture_next_compute(ggml_backend_t backend) {
  2568. GGML_ASSERT(ggml_backend_is_metal(backend));
  2569. struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context;
  2570. ctx->should_capture_next_compute = true;
  2571. }
  2572. GGML_CALL ggml_backend_t ggml_backend_reg_metal_init(const char * params, void * user_data); // silence warning
  2573. GGML_CALL ggml_backend_t ggml_backend_reg_metal_init(const char * params, void * user_data) {
  2574. return ggml_backend_metal_init();
  2575. GGML_UNUSED(params);
  2576. GGML_UNUSED(user_data);
  2577. }