ggml-metal.m 167 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957
  1. #import "ggml-metal.h"
  2. #import "ggml-backend-impl.h"
  3. #import "ggml.h"
  4. #import <Foundation/Foundation.h>
  5. #import <Metal/Metal.h>
  6. #undef MIN
  7. #undef MAX
  8. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  9. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  10. #ifdef GGML_METAL_NDEBUG
  11. #define GGML_METAL_LOG_INFO(...)
  12. #define GGML_METAL_LOG_WARN(...)
  13. #define GGML_METAL_LOG_ERROR(...)
  14. #else
  15. #define GGML_METAL_LOG_INFO(...) ggml_metal_log(GGML_LOG_LEVEL_INFO, __VA_ARGS__)
  16. #define GGML_METAL_LOG_WARN(...) ggml_metal_log(GGML_LOG_LEVEL_WARN, __VA_ARGS__)
  17. #define GGML_METAL_LOG_ERROR(...) ggml_metal_log(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
  18. #endif
  19. #define UNUSED(x) (void)(x)
  20. struct ggml_metal_kernel {
  21. id<MTLComputePipelineState> pipeline;
  22. };
  23. enum ggml_metal_kernel_type {
  24. GGML_METAL_KERNEL_TYPE_ADD,
  25. GGML_METAL_KERNEL_TYPE_ADD_ROW,
  26. GGML_METAL_KERNEL_TYPE_MUL,
  27. GGML_METAL_KERNEL_TYPE_MUL_ROW,
  28. GGML_METAL_KERNEL_TYPE_DIV,
  29. GGML_METAL_KERNEL_TYPE_DIV_ROW,
  30. GGML_METAL_KERNEL_TYPE_SCALE,
  31. GGML_METAL_KERNEL_TYPE_SCALE_4,
  32. GGML_METAL_KERNEL_TYPE_TANH,
  33. GGML_METAL_KERNEL_TYPE_RELU,
  34. GGML_METAL_KERNEL_TYPE_GELU,
  35. GGML_METAL_KERNEL_TYPE_GELU_QUICK,
  36. GGML_METAL_KERNEL_TYPE_SILU,
  37. GGML_METAL_KERNEL_TYPE_SOFT_MAX,
  38. GGML_METAL_KERNEL_TYPE_SOFT_MAX_4,
  39. GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF,
  40. GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8,
  41. GGML_METAL_KERNEL_TYPE_GET_ROWS_F32,
  42. GGML_METAL_KERNEL_TYPE_GET_ROWS_F16,
  43. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_0,
  44. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_1,
  45. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0,
  46. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1,
  47. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0,
  48. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K,
  49. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K,
  50. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K,
  51. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_K,
  52. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q6_K,
  53. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XXS,
  54. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XS,
  55. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_XXS,
  56. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_S,
  57. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_S,
  58. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_S,
  59. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_M,
  60. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL,
  61. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS,
  62. GGML_METAL_KERNEL_TYPE_GET_ROWS_I32,
  63. GGML_METAL_KERNEL_TYPE_RMS_NORM,
  64. GGML_METAL_KERNEL_TYPE_GROUP_NORM,
  65. GGML_METAL_KERNEL_TYPE_NORM,
  66. GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32,
  67. GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16,
  68. GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32,
  69. GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW,
  70. GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4,
  71. GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32,
  72. GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32,
  73. GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32,
  74. GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32,
  75. GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32,
  76. GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32,
  77. GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32,
  78. GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32,
  79. GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32,
  80. GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32,
  81. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32,
  82. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32,
  83. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32,
  84. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_S_F32,
  85. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_S_F32,
  86. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32,
  87. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_M_F32,
  88. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32,
  89. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32,
  90. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F32_F32,
  91. //GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F16,
  92. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32,
  93. //GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_1ROW,
  94. //GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_L4,
  95. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_0_F32,
  96. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_1_F32,
  97. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32,
  98. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32,
  99. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32,
  100. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32,
  101. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32,
  102. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32,
  103. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_K_F32,
  104. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q6_K_F32,
  105. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XXS_F32,
  106. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XS_F32,
  107. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_XXS_F32,
  108. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_S_F32,
  109. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_S_F32,
  110. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_S_F32,
  111. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_M_F32,
  112. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32,
  113. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32,
  114. GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32,
  115. GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32,
  116. GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32,
  117. GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32,
  118. GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32,
  119. GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32,
  120. GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32,
  121. GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32,
  122. GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32,
  123. GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32,
  124. GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32,
  125. GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32,
  126. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32,
  127. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32,
  128. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32,
  129. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_S_F32,
  130. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_S_F32,
  131. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32,
  132. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32,
  133. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32,
  134. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32,
  135. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F32,
  136. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F16_F32,
  137. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_0_F32,
  138. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_1_F32,
  139. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F32,
  140. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F32,
  141. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F32,
  142. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F32,
  143. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F32,
  144. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F32,
  145. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_K_F32,
  146. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q6_K_F32,
  147. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XXS_F32,
  148. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XS_F32,
  149. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_XXS_F32,
  150. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_S_F32,
  151. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_S_F32,
  152. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_S_F32,
  153. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F32,
  154. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32,
  155. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32,
  156. GGML_METAL_KERNEL_TYPE_ROPE_F32,
  157. GGML_METAL_KERNEL_TYPE_ROPE_F16,
  158. GGML_METAL_KERNEL_TYPE_ALIBI_F32,
  159. GGML_METAL_KERNEL_TYPE_IM2COL_F16,
  160. GGML_METAL_KERNEL_TYPE_IM2COL_F32,
  161. GGML_METAL_KERNEL_TYPE_UPSCALE_F32,
  162. GGML_METAL_KERNEL_TYPE_PAD_F32,
  163. GGML_METAL_KERNEL_TYPE_ARANGE_F32,
  164. GGML_METAL_KERNEL_TYPE_TIMESTEP_EMBEDDING_F32,
  165. GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC,
  166. GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC,
  167. GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32,
  168. GGML_METAL_KERNEL_TYPE_CPY_F32_F16,
  169. GGML_METAL_KERNEL_TYPE_CPY_F32_F32,
  170. GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0,
  171. GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_0,
  172. GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_1,
  173. GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_0,
  174. GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_1,
  175. GGML_METAL_KERNEL_TYPE_CPY_F32_IQ4_NL,
  176. GGML_METAL_KERNEL_TYPE_CPY_F16_F16,
  177. GGML_METAL_KERNEL_TYPE_CPY_F16_F32,
  178. GGML_METAL_KERNEL_TYPE_CONCAT,
  179. GGML_METAL_KERNEL_TYPE_SQR,
  180. GGML_METAL_KERNEL_TYPE_SUM_ROWS,
  181. GGML_METAL_KERNEL_TYPE_COUNT
  182. };
  183. struct ggml_metal_context {
  184. int n_cb;
  185. id<MTLDevice> device;
  186. id<MTLCommandQueue> queue;
  187. dispatch_queue_t d_queue;
  188. struct ggml_metal_kernel kernels[GGML_METAL_KERNEL_TYPE_COUNT];
  189. bool support_simdgroup_reduction;
  190. bool support_simdgroup_mm;
  191. bool should_capture_next_compute;
  192. };
  193. // MSL code
  194. // TODO: move the contents here when ready
  195. // for now it is easier to work in a separate file
  196. // static NSString * const msl_library_source = @"see metal.metal";
  197. // Here to assist with NSBundle Path Hack
  198. @interface GGMLMetalClass : NSObject
  199. @end
  200. @implementation GGMLMetalClass
  201. @end
  202. static void ggml_metal_default_log_callback(enum ggml_log_level level, const char * msg, void * user_data) {
  203. fprintf(stderr, "%s", msg);
  204. UNUSED(level);
  205. UNUSED(user_data);
  206. }
  207. ggml_log_callback ggml_metal_log_callback = ggml_metal_default_log_callback;
  208. void * ggml_metal_log_user_data = NULL;
  209. GGML_ATTRIBUTE_FORMAT(2, 3)
  210. static void ggml_metal_log(enum ggml_log_level level, const char * format, ...){
  211. if (ggml_metal_log_callback != NULL) {
  212. va_list args;
  213. va_start(args, format);
  214. char buffer[128];
  215. int len = vsnprintf(buffer, 128, format, args);
  216. if (len < 128) {
  217. ggml_metal_log_callback(level, buffer, ggml_metal_log_user_data);
  218. } else {
  219. char* buffer2 = malloc(len+1);
  220. va_end(args);
  221. va_start(args, format);
  222. vsnprintf(buffer2, len+1, format, args);
  223. buffer2[len] = 0;
  224. ggml_metal_log_callback(level, buffer2, ggml_metal_log_user_data);
  225. free(buffer2);
  226. }
  227. va_end(args);
  228. }
  229. }
  230. static void * ggml_metal_host_malloc(size_t n) {
  231. void * data = NULL;
  232. const int result = posix_memalign((void **) &data, sysconf(_SC_PAGESIZE), n);
  233. if (result != 0) {
  234. GGML_METAL_LOG_ERROR("%s: error: posix_memalign failed\n", __func__);
  235. return NULL;
  236. }
  237. return data;
  238. }
  239. static struct ggml_metal_context * ggml_metal_init(int n_cb) {
  240. GGML_METAL_LOG_INFO("%s: allocating\n", __func__);
  241. #if TARGET_OS_OSX && !GGML_METAL_NDEBUG
  242. // Show all the Metal device instances in the system
  243. NSArray * devices = MTLCopyAllDevices();
  244. for (id<MTLDevice> device in devices) {
  245. GGML_METAL_LOG_INFO("%s: found device: %s\n", __func__, [[device name] UTF8String]);
  246. }
  247. [devices release]; // since it was created by a *Copy* C method
  248. #endif
  249. // Pick and show default Metal device
  250. id<MTLDevice> device = MTLCreateSystemDefaultDevice();
  251. GGML_METAL_LOG_INFO("%s: picking default device: %s\n", __func__, [[device name] UTF8String]);
  252. // Configure context
  253. struct ggml_metal_context * ctx = malloc(sizeof(struct ggml_metal_context));
  254. ctx->device = device;
  255. ctx->n_cb = MIN(n_cb, GGML_METAL_MAX_BUFFERS);
  256. ctx->queue = [ctx->device newCommandQueue];
  257. ctx->d_queue = dispatch_queue_create("ggml-metal", DISPATCH_QUEUE_CONCURRENT);
  258. id<MTLLibrary> metal_library;
  259. // load library
  260. //
  261. // - first check if the library is embedded
  262. // - then check if the library is in the bundle
  263. // - if not found, load the source and compile it
  264. // - if that fails, return NULL
  265. {
  266. NSBundle * bundle = nil;
  267. #ifdef SWIFT_PACKAGE
  268. bundle = SWIFTPM_MODULE_BUNDLE;
  269. #else
  270. bundle = [NSBundle bundleForClass:[GGMLMetalClass class]];
  271. #endif
  272. NSError * error = nil;
  273. #if GGML_METAL_EMBED_LIBRARY
  274. const bool try_metallib = false;
  275. #else
  276. const bool try_metallib = true;
  277. #endif
  278. NSString * path_lib = [bundle pathForResource:@"default" ofType:@"metallib"];
  279. if (try_metallib && path_lib != nil) {
  280. // pre-compiled library found
  281. NSURL * libURL = [NSURL fileURLWithPath:path_lib];
  282. GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [path_lib UTF8String]);
  283. metal_library = [ctx->device newLibraryWithURL:libURL error:&error];
  284. if (error) {
  285. GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
  286. return NULL;
  287. }
  288. } else {
  289. #if GGML_METAL_EMBED_LIBRARY
  290. GGML_METAL_LOG_INFO("%s: using embedded metal library\n", __func__);
  291. extern const char ggml_metallib_start[];
  292. extern const char ggml_metallib_end[];
  293. NSString * src = [[NSString alloc] initWithBytes:ggml_metallib_start length:(ggml_metallib_end-ggml_metallib_start) encoding:NSUTF8StringEncoding];
  294. #else
  295. GGML_METAL_LOG_INFO("%s: default.metallib not found, loading from source\n", __func__);
  296. NSString * path_source;
  297. NSString * path_resource = [[NSProcessInfo processInfo].environment objectForKey:@"GGML_METAL_PATH_RESOURCES"];
  298. GGML_METAL_LOG_INFO("%s: GGML_METAL_PATH_RESOURCES = %s\n", __func__, path_resource ? [path_resource UTF8String] : "nil");
  299. if (path_resource) {
  300. path_source = [path_resource stringByAppendingPathComponent:@"ggml-metal.metal"];
  301. } else {
  302. path_source = [bundle pathForResource:@"ggml-metal" ofType:@"metal"];
  303. }
  304. if (path_source == nil) {
  305. GGML_METAL_LOG_WARN("%s: error: could not use bundle path to find ggml-metal.metal, falling back to trying cwd\n", __func__);
  306. path_source = @"ggml-metal.metal";
  307. }
  308. GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [path_source UTF8String]);
  309. NSString * src = [NSString stringWithContentsOfFile:path_source encoding:NSUTF8StringEncoding error:&error];
  310. if (error) {
  311. GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
  312. return NULL;
  313. }
  314. #endif // GGML_METAL_EMBED_LIBRARY
  315. @autoreleasepool {
  316. // dictionary of preprocessor macros
  317. NSMutableDictionary * prep = [NSMutableDictionary dictionary];
  318. #ifdef GGML_QKK_64
  319. prep[@"GGML_QKK_64"] = @(1);
  320. #endif
  321. MTLCompileOptions* options = [MTLCompileOptions new];
  322. options.preprocessorMacros = prep;
  323. //[options setFastMathEnabled:false];
  324. metal_library = [ctx->device newLibraryWithSource:src options:options error:&error];
  325. if (error) {
  326. GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
  327. return NULL;
  328. }
  329. }
  330. }
  331. }
  332. // print MTL GPU family:
  333. GGML_METAL_LOG_INFO("%s: GPU name: %s\n", __func__, [[ctx->device name] UTF8String]);
  334. const NSInteger MTLGPUFamilyMetal3 = 5001;
  335. // determine max supported GPU family
  336. // https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf
  337. // https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
  338. {
  339. for (int i = MTLGPUFamilyApple1 + 20; i >= MTLGPUFamilyApple1; --i) {
  340. if ([ctx->device supportsFamily:i]) {
  341. GGML_METAL_LOG_INFO("%s: GPU family: MTLGPUFamilyApple%d (%d)\n", __func__, i - (int) MTLGPUFamilyApple1 + 1, i);
  342. break;
  343. }
  344. }
  345. for (int i = MTLGPUFamilyCommon1 + 5; i >= MTLGPUFamilyCommon1; --i) {
  346. if ([ctx->device supportsFamily:i]) {
  347. GGML_METAL_LOG_INFO("%s: GPU family: MTLGPUFamilyCommon%d (%d)\n", __func__, i - (int) MTLGPUFamilyCommon1 + 1, i);
  348. break;
  349. }
  350. }
  351. for (int i = MTLGPUFamilyMetal3 + 5; i >= MTLGPUFamilyMetal3; --i) {
  352. if ([ctx->device supportsFamily:i]) {
  353. GGML_METAL_LOG_INFO("%s: GPU family: MTLGPUFamilyMetal%d (%d)\n", __func__, i - (int) MTLGPUFamilyMetal3 + 3, i);
  354. break;
  355. }
  356. }
  357. }
  358. ctx->support_simdgroup_reduction = [ctx->device supportsFamily:MTLGPUFamilyApple7];
  359. ctx->support_simdgroup_reduction |= [ctx->device supportsFamily:MTLGPUFamilyMetal3];
  360. ctx->support_simdgroup_mm = [ctx->device supportsFamily:MTLGPUFamilyApple7];
  361. GGML_METAL_LOG_INFO("%s: simdgroup reduction support = %s\n", __func__, ctx->support_simdgroup_reduction ? "true" : "false");
  362. GGML_METAL_LOG_INFO("%s: simdgroup matrix mul. support = %s\n", __func__, ctx->support_simdgroup_mm ? "true" : "false");
  363. GGML_METAL_LOG_INFO("%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false");
  364. ctx->should_capture_next_compute = false;
  365. #if TARGET_OS_OSX || (TARGET_OS_IOS && __clang_major__ >= 15)
  366. if (@available(macOS 10.12, iOS 16.0, *)) {
  367. GGML_METAL_LOG_INFO("%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1e6);
  368. }
  369. #elif TARGET_OS_OSX
  370. if (ctx->device.maxTransferRate != 0) {
  371. GGML_METAL_LOG_INFO("%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1e6);
  372. } else {
  373. GGML_METAL_LOG_INFO("%s: maxTransferRate = built-in GPU\n", __func__);
  374. }
  375. #endif
  376. // load kernels
  377. {
  378. NSError * error = nil;
  379. for (int i = 0; i < GGML_METAL_KERNEL_TYPE_COUNT; ++i) {
  380. ctx->kernels[i].pipeline = nil;
  381. }
  382. /*
  383. GGML_METAL_LOG_INFO("%s: loaded %-32s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) kernel->pipeline, \
  384. (int) kernel->pipeline.maxTotalThreadsPerThreadgroup, \
  385. (int) kernel->pipeline.threadExecutionWidth); \
  386. */
  387. #define GGML_METAL_ADD_KERNEL(e, name, supported) \
  388. if (supported) { \
  389. struct ggml_metal_kernel * kernel = &ctx->kernels[e]; \
  390. id<MTLFunction> metal_function = [metal_library newFunctionWithName:@"kernel_"#name]; \
  391. kernel->pipeline = [ctx->device newComputePipelineStateWithFunction:metal_function error:&error]; \
  392. [metal_function release]; \
  393. if (error) { \
  394. GGML_METAL_LOG_ERROR("%s: error: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \
  395. [metal_library release]; \
  396. return NULL; \
  397. } \
  398. } else { \
  399. GGML_METAL_LOG_WARN("%s: skipping %-32s (not supported)\n", __func__, "kernel_"#name); \
  400. }
  401. // simd_sum and simd_max requires MTLGPUFamilyApple7
  402. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD, add, true);
  403. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_ROW, add_row, true);
  404. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL, mul, true);
  405. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_ROW, mul_row, true);
  406. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIV, div, true);
  407. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIV_ROW, div_row, true);
  408. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SCALE, scale, true);
  409. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SCALE_4, scale_4, true);
  410. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_TANH, tanh, true);
  411. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RELU, relu, true);
  412. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU, gelu, true);
  413. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_QUICK, gelu_quick, true);
  414. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SILU, silu, true);
  415. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX, soft_max, ctx->support_simdgroup_reduction);
  416. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_4, soft_max_4, ctx->support_simdgroup_reduction);
  417. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF, diag_mask_inf, true);
  418. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8, diag_mask_inf_8, true);
  419. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_F32, get_rows_f32, true);
  420. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_F16, get_rows_f16, true);
  421. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_0, get_rows_q4_0, true);
  422. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_1, get_rows_q4_1, true);
  423. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0, get_rows_q5_0, true);
  424. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1, get_rows_q5_1, true);
  425. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0, get_rows_q8_0, true);
  426. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K, get_rows_q2_K, true);
  427. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K, get_rows_q3_K, true);
  428. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K, get_rows_q4_K, true);
  429. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_K, get_rows_q5_K, true);
  430. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q6_K, get_rows_q6_K, true);
  431. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XXS, get_rows_iq2_xxs, true);
  432. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XS, get_rows_iq2_xs, true);
  433. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_XXS, get_rows_iq3_xxs, true);
  434. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_S, get_rows_iq3_s, true);
  435. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_S, get_rows_iq2_s, true);
  436. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_S, get_rows_iq1_s, true);
  437. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_M, get_rows_iq1_m, true);
  438. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL, get_rows_iq4_nl, true);
  439. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS, get_rows_iq4_xs, true);
  440. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_I32, get_rows_i32, true);
  441. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RMS_NORM, rms_norm, ctx->support_simdgroup_reduction);
  442. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GROUP_NORM, group_norm, ctx->support_simdgroup_reduction);
  443. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NORM, norm, true);
  444. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32, mul_mv_f32_f32, ctx->support_simdgroup_reduction);
  445. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16, mul_mv_f16_f16, ctx->support_simdgroup_reduction);
  446. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32, mul_mv_f16_f32, ctx->support_simdgroup_reduction);
  447. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW, mul_mv_f16_f32_1row, ctx->support_simdgroup_reduction);
  448. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4, mul_mv_f16_f32_l4, ctx->support_simdgroup_reduction);
  449. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32, mul_mv_q4_0_f32, ctx->support_simdgroup_reduction);
  450. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32, mul_mv_q4_1_f32, ctx->support_simdgroup_reduction);
  451. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32, mul_mv_q5_0_f32, ctx->support_simdgroup_reduction);
  452. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32, mul_mv_q5_1_f32, ctx->support_simdgroup_reduction);
  453. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32, mul_mv_q8_0_f32, ctx->support_simdgroup_reduction);
  454. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32, mul_mv_q2_K_f32, ctx->support_simdgroup_reduction);
  455. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32, mul_mv_q3_K_f32, ctx->support_simdgroup_reduction);
  456. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32, mul_mv_q4_K_f32, ctx->support_simdgroup_reduction);
  457. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32, mul_mv_q5_K_f32, ctx->support_simdgroup_reduction);
  458. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32, mul_mv_q6_K_f32, ctx->support_simdgroup_reduction);
  459. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32, mul_mv_iq2_xxs_f32, ctx->support_simdgroup_reduction);
  460. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32, mul_mv_iq2_xs_f32, ctx->support_simdgroup_reduction);
  461. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32, mul_mv_iq3_xxs_f32, ctx->support_simdgroup_reduction);
  462. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_S_F32, mul_mv_iq3_s_f32, ctx->support_simdgroup_reduction);
  463. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_S_F32, mul_mv_iq2_s_f32, ctx->support_simdgroup_reduction);
  464. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32, mul_mv_iq1_s_f32, ctx->support_simdgroup_reduction);
  465. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_M_F32, mul_mv_iq1_m_f32, ctx->support_simdgroup_reduction);
  466. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32, mul_mv_iq4_nl_f32, ctx->support_simdgroup_reduction);
  467. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32, mul_mv_iq4_xs_f32, ctx->support_simdgroup_reduction);
  468. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F32_F32, mul_mv_id_f32_f32, ctx->support_simdgroup_reduction);
  469. //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F16, mul_mv_id_f16_f16, ctx->support_simdgroup_reduction);
  470. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32, mul_mv_id_f16_f32, ctx->support_simdgroup_reduction);
  471. //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_1ROW, mul_mv_id_f16_f32_1row, ctx->support_simdgroup_reduction);
  472. //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_L4, mul_mv_id_f16_f32_l4, ctx->support_simdgroup_reduction);
  473. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_0_F32, mul_mv_id_q4_0_f32, ctx->support_simdgroup_reduction);
  474. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_1_F32, mul_mv_id_q4_1_f32, ctx->support_simdgroup_reduction);
  475. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32, mul_mv_id_q5_0_f32, ctx->support_simdgroup_reduction);
  476. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32, mul_mv_id_q5_1_f32, ctx->support_simdgroup_reduction);
  477. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32, mul_mv_id_q8_0_f32, ctx->support_simdgroup_reduction);
  478. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32, mul_mv_id_q2_K_f32, ctx->support_simdgroup_reduction);
  479. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32, mul_mv_id_q3_K_f32, ctx->support_simdgroup_reduction);
  480. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32, mul_mv_id_q4_K_f32, ctx->support_simdgroup_reduction);
  481. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_K_F32, mul_mv_id_q5_K_f32, ctx->support_simdgroup_reduction);
  482. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q6_K_F32, mul_mv_id_q6_K_f32, ctx->support_simdgroup_reduction);
  483. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XXS_F32, mul_mv_id_iq2_xxs_f32, ctx->support_simdgroup_reduction);
  484. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XS_F32, mul_mv_id_iq2_xs_f32, ctx->support_simdgroup_reduction);
  485. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_XXS_F32, mul_mv_id_iq3_xxs_f32, ctx->support_simdgroup_reduction);
  486. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_S_F32, mul_mv_id_iq3_s_f32, ctx->support_simdgroup_reduction);
  487. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_S_F32, mul_mv_id_iq2_s_f32, ctx->support_simdgroup_reduction);
  488. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_S_F32, mul_mv_id_iq1_s_f32, ctx->support_simdgroup_reduction);
  489. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_M_F32, mul_mv_id_iq1_m_f32, ctx->support_simdgroup_reduction);
  490. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32, mul_mv_id_iq4_nl_f32, ctx->support_simdgroup_reduction);
  491. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32, mul_mv_id_iq4_xs_f32, ctx->support_simdgroup_reduction);
  492. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32, mul_mm_f32_f32, ctx->support_simdgroup_mm);
  493. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32, mul_mm_f16_f32, ctx->support_simdgroup_mm);
  494. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32, mul_mm_q4_0_f32, ctx->support_simdgroup_mm);
  495. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32, mul_mm_q4_1_f32, ctx->support_simdgroup_mm);
  496. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32, mul_mm_q5_0_f32, ctx->support_simdgroup_mm);
  497. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32, mul_mm_q5_1_f32, ctx->support_simdgroup_mm);
  498. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32, mul_mm_q8_0_f32, ctx->support_simdgroup_mm);
  499. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32, mul_mm_q2_K_f32, ctx->support_simdgroup_mm);
  500. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32, mul_mm_q3_K_f32, ctx->support_simdgroup_mm);
  501. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32, mul_mm_q4_K_f32, ctx->support_simdgroup_mm);
  502. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32, mul_mm_q5_K_f32, ctx->support_simdgroup_mm);
  503. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32, mul_mm_q6_K_f32, ctx->support_simdgroup_mm);
  504. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32, mul_mm_iq2_xxs_f32, ctx->support_simdgroup_mm);
  505. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32, mul_mm_iq2_xs_f32, ctx->support_simdgroup_mm);
  506. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32, mul_mm_iq3_xxs_f32, ctx->support_simdgroup_mm);
  507. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_S_F32, mul_mm_iq3_s_f32, ctx->support_simdgroup_mm);
  508. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_S_F32, mul_mm_iq2_s_f32, ctx->support_simdgroup_mm);
  509. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32, mul_mm_iq1_s_f32, ctx->support_simdgroup_mm);
  510. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32, mul_mm_iq1_m_f32, ctx->support_simdgroup_mm);
  511. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32, mul_mm_iq4_nl_f32, ctx->support_simdgroup_mm);
  512. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32, mul_mm_iq4_xs_f32, ctx->support_simdgroup_mm);
  513. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F32, mul_mm_id_f32_f32, ctx->support_simdgroup_mm);
  514. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F16_F32, mul_mm_id_f16_f32, ctx->support_simdgroup_mm);
  515. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_0_F32, mul_mm_id_q4_0_f32, ctx->support_simdgroup_mm);
  516. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_1_F32, mul_mm_id_q4_1_f32, ctx->support_simdgroup_mm);
  517. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F32, mul_mm_id_q5_0_f32, ctx->support_simdgroup_mm);
  518. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F32, mul_mm_id_q5_1_f32, ctx->support_simdgroup_mm);
  519. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F32, mul_mm_id_q8_0_f32, ctx->support_simdgroup_mm);
  520. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F32, mul_mm_id_q2_K_f32, ctx->support_simdgroup_mm);
  521. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F32, mul_mm_id_q3_K_f32, ctx->support_simdgroup_mm);
  522. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F32, mul_mm_id_q4_K_f32, ctx->support_simdgroup_mm);
  523. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_K_F32, mul_mm_id_q5_K_f32, ctx->support_simdgroup_mm);
  524. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q6_K_F32, mul_mm_id_q6_K_f32, ctx->support_simdgroup_mm);
  525. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XXS_F32, mul_mm_id_iq2_xxs_f32, ctx->support_simdgroup_mm);
  526. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XS_F32, mul_mm_id_iq2_xs_f32, ctx->support_simdgroup_mm);
  527. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_XXS_F32, mul_mm_id_iq3_xxs_f32, ctx->support_simdgroup_mm);
  528. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_S_F32, mul_mm_id_iq3_s_f32, ctx->support_simdgroup_mm);
  529. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_S_F32, mul_mm_id_iq2_s_f32, ctx->support_simdgroup_mm);
  530. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_S_F32, mul_mm_id_iq1_s_f32, ctx->support_simdgroup_mm);
  531. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F32, mul_mm_id_iq1_m_f32, ctx->support_simdgroup_mm);
  532. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32, mul_mm_id_iq4_nl_f32, ctx->support_simdgroup_mm);
  533. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32, mul_mm_id_iq4_xs_f32, ctx->support_simdgroup_mm);
  534. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F32, rope_f32, true);
  535. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F16, rope_f16, true);
  536. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ALIBI_F32, alibi_f32, true);
  537. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F16, im2col_f16, true);
  538. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F32, im2col_f32, true);
  539. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_UPSCALE_F32, upscale_f32, true);
  540. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_PAD_F32, pad_f32, true);
  541. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_TIMESTEP_EMBEDDING_F32, timestep_embedding_f32, true);
  542. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARANGE_F32, arange_f32, true);
  543. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC, argsort_f32_i32_asc, true);
  544. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC, argsort_f32_i32_desc, true);
  545. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32, leaky_relu_f32, true);
  546. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F16, cpy_f32_f16, true);
  547. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F32, cpy_f32_f32, true);
  548. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0, cpy_f32_q8_0, true);
  549. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_0, cpy_f32_q4_0, true);
  550. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_1, cpy_f32_q4_1, true);
  551. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_0, cpy_f32_q5_0, true);
  552. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_1, cpy_f32_q5_1, true);
  553. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_IQ4_NL, cpy_f32_iq4_nl, true);
  554. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F16_F16, cpy_f16_f16, true);
  555. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F16_F32, cpy_f16_f32, true);
  556. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CONCAT, concat, true);
  557. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SQR, sqr, true);
  558. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUM_ROWS, sum_rows, true);
  559. }
  560. [metal_library release];
  561. return ctx;
  562. }
  563. static void ggml_metal_free(struct ggml_metal_context * ctx) {
  564. GGML_METAL_LOG_INFO("%s: deallocating\n", __func__);
  565. for (int i = 0; i < GGML_METAL_KERNEL_TYPE_COUNT; ++i) {
  566. [ctx->kernels[i].pipeline release];
  567. }
  568. [ctx->queue release];
  569. [ctx->device release];
  570. dispatch_release(ctx->d_queue);
  571. free(ctx);
  572. }
  573. // temporarily defined here for compatibility between ggml-backend and the old API
  574. struct ggml_backend_metal_buffer {
  575. void * data;
  576. size_t size;
  577. id<MTLBuffer> metal;
  578. };
  579. struct ggml_backend_metal_buffer_context {
  580. void * all_data;
  581. size_t all_size;
  582. bool owned;
  583. // multiple buffers are used only to avoid the maximum buffer size limitation when using mmap
  584. int n_buffers;
  585. struct ggml_backend_metal_buffer buffers[GGML_METAL_MAX_BUFFERS];
  586. };
  587. // finds the Metal buffer that contains the tensor data on the GPU device
  588. // the assumption is that there is 1-to-1 mapping between the host and device memory buffers, so we can find the
  589. // Metal buffer based on the host memory pointer
  590. //
  591. static id<MTLBuffer> ggml_metal_get_buffer(struct ggml_tensor * t, size_t * offs) {
  592. //GGML_METAL_LOG_INFO("%s: data tensor '%16s', offs_data = %8ld, offs_eval = %8ld, offs_cach = %8ld\n", __func__, t->name, offs_data, offs_eval, offs_cach);
  593. const int64_t tsize = ggml_nbytes(t);
  594. ggml_backend_buffer_t buffer = t->view_src ? t->view_src->buffer : t->buffer;
  595. struct ggml_backend_metal_buffer_context * buf_ctx = (struct ggml_backend_metal_buffer_context *) buffer->context;
  596. // find the view that contains the tensor fully
  597. for (int i = 0; i < buf_ctx->n_buffers; ++i) {
  598. const int64_t ioffs = (int64_t) t->data - (int64_t) buf_ctx->buffers[i].data;
  599. //GGML_METAL_LOG_INFO("ioffs = %10ld, tsize = %10ld, sum = %10ld, buf_ctx->buffers[%d].size = %10ld\n", ioffs, tsize, ioffs + tsize, i, buf_ctx->buffers[i].size);
  600. if (ioffs >= 0 && ioffs + tsize <= (int64_t) buf_ctx->buffers[i].size) {
  601. *offs = (size_t) ioffs;
  602. //GGML_METAL_LOG_INFO("%s: tensor '%16s', offs = %8ld\n", __func__, t->name, *offs);
  603. return buf_ctx->buffers[i].metal;
  604. }
  605. }
  606. GGML_METAL_LOG_ERROR("%s: error: tensor '%s' buffer is nil\n", __func__, t->name);
  607. return nil;
  608. }
  609. static bool ggml_metal_supports_op(const struct ggml_metal_context * ctx, const struct ggml_tensor * op) {
  610. switch (op->op) {
  611. case GGML_OP_UNARY:
  612. switch (ggml_get_unary_op(op)) {
  613. case GGML_UNARY_OP_TANH:
  614. case GGML_UNARY_OP_RELU:
  615. case GGML_UNARY_OP_GELU:
  616. case GGML_UNARY_OP_GELU_QUICK:
  617. case GGML_UNARY_OP_SILU:
  618. return true;
  619. default:
  620. return false;
  621. }
  622. case GGML_OP_NONE:
  623. case GGML_OP_RESHAPE:
  624. case GGML_OP_VIEW:
  625. case GGML_OP_TRANSPOSE:
  626. case GGML_OP_PERMUTE:
  627. case GGML_OP_CONCAT:
  628. case GGML_OP_ADD:
  629. case GGML_OP_ACC:
  630. case GGML_OP_MUL:
  631. case GGML_OP_DIV:
  632. case GGML_OP_SCALE:
  633. case GGML_OP_SQR:
  634. case GGML_OP_SUM_ROWS:
  635. return true;
  636. case GGML_OP_SOFT_MAX:
  637. case GGML_OP_RMS_NORM:
  638. case GGML_OP_GROUP_NORM:
  639. return ctx->support_simdgroup_reduction;
  640. case GGML_OP_NORM:
  641. case GGML_OP_ALIBI:
  642. case GGML_OP_ROPE:
  643. case GGML_OP_IM2COL:
  644. return true;
  645. case GGML_OP_POOL_1D:
  646. case GGML_OP_POOL_2D:
  647. return false;
  648. case GGML_OP_UPSCALE:
  649. case GGML_OP_PAD:
  650. case GGML_OP_ARANGE:
  651. case GGML_OP_TIMESTEP_EMBEDDING:
  652. case GGML_OP_ARGSORT:
  653. case GGML_OP_LEAKY_RELU:
  654. return true;
  655. case GGML_OP_MUL_MAT:
  656. case GGML_OP_MUL_MAT_ID:
  657. return ctx->support_simdgroup_reduction &&
  658. (op->src[0]->type != GGML_TYPE_F32 || op->src[1]->type == GGML_TYPE_F32);
  659. case GGML_OP_CPY:
  660. case GGML_OP_DUP:
  661. case GGML_OP_CONT:
  662. {
  663. switch (op->src[0]->type) {
  664. case GGML_TYPE_F32:
  665. switch (op->type) {
  666. case GGML_TYPE_F16:
  667. case GGML_TYPE_F32:
  668. case GGML_TYPE_Q8_0:
  669. case GGML_TYPE_Q4_0:
  670. case GGML_TYPE_Q4_1:
  671. case GGML_TYPE_Q5_0:
  672. case GGML_TYPE_Q5_1:
  673. case GGML_TYPE_IQ4_NL:
  674. return true;
  675. default:
  676. return false;
  677. }
  678. case GGML_TYPE_F16:
  679. switch (op->type) {
  680. case GGML_TYPE_F16:
  681. case GGML_TYPE_F32:
  682. return true;
  683. default:
  684. return false;
  685. }
  686. default:
  687. return false;
  688. };
  689. }
  690. case GGML_OP_DIAG_MASK_INF:
  691. case GGML_OP_GET_ROWS:
  692. {
  693. return op->ne[3] == 1;
  694. }
  695. default:
  696. return false;
  697. }
  698. }
  699. static enum ggml_status ggml_metal_graph_compute(
  700. struct ggml_metal_context * ctx,
  701. struct ggml_cgraph * gf) {
  702. @autoreleasepool {
  703. MTLComputePassDescriptor * edesc = MTLComputePassDescriptor.computePassDescriptor;
  704. edesc.dispatchType = MTLDispatchTypeSerial;
  705. // create multiple command buffers and enqueue them
  706. // then, we encode the graph into the command buffers in parallel
  707. const int n_nodes = gf->n_nodes;
  708. const int n_cb = ctx->n_cb;
  709. const int n_nodes_per_cb = (n_nodes + n_cb - 1) / n_cb;
  710. const bool should_capture = ctx->should_capture_next_compute;
  711. if (should_capture) {
  712. ctx->should_capture_next_compute = false;
  713. MTLCaptureDescriptor * descriptor = [MTLCaptureDescriptor new];
  714. descriptor.captureObject = ctx->queue;
  715. NSError * error = nil;
  716. if (![[MTLCaptureManager sharedCaptureManager] startCaptureWithDescriptor:descriptor error:&error]) {
  717. GGML_METAL_LOG_ERROR("%s: error: unable to start capture '%s'\n", __func__, [[error localizedDescription] UTF8String]);
  718. GGML_ASSERT(!"capture failed");
  719. }
  720. }
  721. id<MTLCommandBuffer> command_buffer_builder[n_cb];
  722. for (int cb_idx = 0; cb_idx < n_cb; ++cb_idx) {
  723. id<MTLCommandBuffer> command_buffer = [ctx->queue commandBufferWithUnretainedReferences];
  724. command_buffer_builder[cb_idx] = command_buffer;
  725. // enqueue the command buffers in order to specify their execution order
  726. [command_buffer enqueue];
  727. }
  728. const id<MTLCommandBuffer> *command_buffers = command_buffer_builder;
  729. dispatch_apply(n_cb, ctx->d_queue, ^(size_t iter) {
  730. const int cb_idx = iter;
  731. size_t offs_src0 = 0;
  732. size_t offs_src1 = 0;
  733. size_t offs_src2 = 0;
  734. size_t offs_dst = 0;
  735. id<MTLCommandBuffer> command_buffer = command_buffers[cb_idx];
  736. id<MTLComputeCommandEncoder> encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
  737. const int node_start = (cb_idx + 0) * n_nodes_per_cb;
  738. const int node_end = MIN((cb_idx == n_cb - 1) ? n_nodes : (cb_idx + 1) * n_nodes_per_cb, n_nodes);
  739. for (int i = node_start; i < node_end; ++i) {
  740. if (i == -1) {
  741. [encoder memoryBarrierWithScope:MTLBarrierScopeBuffers];
  742. continue;
  743. }
  744. //GGML_METAL_LOG_INFO("%s: encoding node %3d, op = %8s\n", __func__, i, ggml_op_name(gf->nodes[i]->op));
  745. struct ggml_tensor * src0 = gf->nodes[i]->src[0];
  746. struct ggml_tensor * src1 = gf->nodes[i]->src[1];
  747. struct ggml_tensor * src2 = gf->nodes[i]->src[2];
  748. struct ggml_tensor * dst = gf->nodes[i];
  749. switch (dst->op) {
  750. case GGML_OP_NONE:
  751. case GGML_OP_RESHAPE:
  752. case GGML_OP_VIEW:
  753. case GGML_OP_TRANSPOSE:
  754. case GGML_OP_PERMUTE:
  755. {
  756. // noop -> next node
  757. } continue;
  758. default:
  759. {
  760. } break;
  761. }
  762. if (!ggml_metal_supports_op(ctx, dst)) {
  763. GGML_METAL_LOG_ERROR("%s: error: unsupported op '%s'\n", __func__, ggml_op_desc(dst));
  764. GGML_ASSERT(!"unsupported op");
  765. }
  766. if (should_capture) {
  767. [encoder pushDebugGroup:[NSString stringWithCString:ggml_op_desc(dst) encoding:NSUTF8StringEncoding]];
  768. }
  769. const int64_t ne00 = src0 ? src0->ne[0] : 0;
  770. const int64_t ne01 = src0 ? src0->ne[1] : 0;
  771. const int64_t ne02 = src0 ? src0->ne[2] : 0;
  772. const int64_t ne03 = src0 ? src0->ne[3] : 0;
  773. const uint64_t nb00 = src0 ? src0->nb[0] : 0;
  774. const uint64_t nb01 = src0 ? src0->nb[1] : 0;
  775. const uint64_t nb02 = src0 ? src0->nb[2] : 0;
  776. const uint64_t nb03 = src0 ? src0->nb[3] : 0;
  777. const int64_t ne10 = src1 ? src1->ne[0] : 0;
  778. const int64_t ne11 = src1 ? src1->ne[1] : 0;
  779. const int64_t ne12 = src1 ? src1->ne[2] : 0;
  780. const int64_t ne13 = src1 ? src1->ne[3] : 0; UNUSED(ne13);
  781. const uint64_t nb10 = src1 ? src1->nb[0] : 0;
  782. const uint64_t nb11 = src1 ? src1->nb[1] : 0;
  783. const uint64_t nb12 = src1 ? src1->nb[2] : 0;
  784. const uint64_t nb13 = src1 ? src1->nb[3] : 0; UNUSED(nb13);
  785. const int64_t ne0 = dst ? dst->ne[0] : 0;
  786. const int64_t ne1 = dst ? dst->ne[1] : 0;
  787. const int64_t ne2 = dst ? dst->ne[2] : 0;
  788. const int64_t ne3 = dst ? dst->ne[3] : 0;
  789. const uint64_t nb0 = dst ? dst->nb[0] : 0;
  790. const uint64_t nb1 = dst ? dst->nb[1] : 0;
  791. const uint64_t nb2 = dst ? dst->nb[2] : 0;
  792. const uint64_t nb3 = dst ? dst->nb[3] : 0;
  793. const enum ggml_type src0t = src0 ? src0->type : GGML_TYPE_COUNT;
  794. const enum ggml_type src1t = src1 ? src1->type : GGML_TYPE_COUNT;
  795. const enum ggml_type dstt = dst ? dst->type : GGML_TYPE_COUNT;
  796. id<MTLBuffer> id_src0 = src0 ? ggml_metal_get_buffer(src0, &offs_src0) : nil;
  797. id<MTLBuffer> id_src1 = src1 ? ggml_metal_get_buffer(src1, &offs_src1) : nil;
  798. id<MTLBuffer> id_src2 = src2 ? ggml_metal_get_buffer(src2, &offs_src2) : nil;
  799. id<MTLBuffer> id_dst = dst ? ggml_metal_get_buffer(dst, &offs_dst) : nil;
  800. //GGML_METAL_LOG_INFO("%s: op - %s\n", __func__, ggml_op_name(dst->op));
  801. //if (src0) {
  802. // GGML_METAL_LOG_INFO("%s: src0 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src0t), ne00, ne01, ne02,
  803. // ggml_is_contiguous(src0), src0->name);
  804. //}
  805. //if (src1) {
  806. // GGML_METAL_LOG_INFO("%s: src1 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src1t), ne10, ne11, ne12,
  807. // ggml_is_contiguous(src1), src1->name);
  808. //}
  809. //if (dst) {
  810. // GGML_METAL_LOG_INFO("%s: dst - %4s [%5lld, %5lld, %5lld], 1, %s\n", __func__, ggml_type_name(dstt), ne0, ne1, ne2,
  811. // dst->name);
  812. //}
  813. switch (dst->op) {
  814. case GGML_OP_CONCAT:
  815. {
  816. const int64_t nb = ne00;
  817. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CONCAT].pipeline;
  818. [encoder setComputePipelineState:pipeline];
  819. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  820. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  821. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  822. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  823. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
  824. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
  825. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:6];
  826. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:7];
  827. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:8];
  828. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:9];
  829. [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:10];
  830. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:11];
  831. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:12];
  832. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:13];
  833. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:14];
  834. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:15];
  835. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:16];
  836. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:17];
  837. [encoder setBytes:&nb13 length:sizeof(nb13) atIndex:18];
  838. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:19];
  839. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:20];
  840. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:21];
  841. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:22];
  842. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:23];
  843. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:24];
  844. [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:25];
  845. [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:26];
  846. [encoder setBytes:&nb length:sizeof(nb) atIndex:27];
  847. const int nth = MIN(1024, ne0);
  848. [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  849. } break;
  850. case GGML_OP_ADD:
  851. case GGML_OP_MUL:
  852. case GGML_OP_DIV:
  853. {
  854. const size_t offs = 0;
  855. bool bcast_row = false;
  856. int64_t nb = ne00;
  857. id<MTLComputePipelineState> pipeline = nil;
  858. if (ggml_nelements(src1) == ne10 && ggml_is_contiguous(src1) && ne00 % 4 == 0 && ne10 % 4 == 0) {
  859. GGML_ASSERT(ggml_is_contiguous(src0));
  860. // src1 is a row
  861. GGML_ASSERT(ne11 == 1);
  862. nb = ne00 / 4;
  863. switch (dst->op) {
  864. case GGML_OP_ADD: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_ROW].pipeline; break;
  865. case GGML_OP_MUL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_ROW].pipeline; break;
  866. case GGML_OP_DIV: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIV_ROW].pipeline; break;
  867. default: GGML_ASSERT(false);
  868. }
  869. bcast_row = true;
  870. } else {
  871. switch (dst->op) {
  872. case GGML_OP_ADD: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD].pipeline; break;
  873. case GGML_OP_MUL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL].pipeline; break;
  874. case GGML_OP_DIV: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIV].pipeline; break;
  875. default: GGML_ASSERT(false);
  876. }
  877. }
  878. [encoder setComputePipelineState:pipeline];
  879. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  880. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  881. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  882. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  883. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
  884. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
  885. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:6];
  886. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:7];
  887. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:8];
  888. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:9];
  889. [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:10];
  890. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:11];
  891. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:12];
  892. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:13];
  893. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:14];
  894. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:15];
  895. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:16];
  896. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:17];
  897. [encoder setBytes:&nb13 length:sizeof(nb13) atIndex:18];
  898. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:19];
  899. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:20];
  900. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:21];
  901. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:22];
  902. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:23];
  903. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:24];
  904. [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:25];
  905. [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:26];
  906. [encoder setBytes:&offs length:sizeof(offs) atIndex:27];
  907. [encoder setBytes:&nb length:sizeof(nb) atIndex:28];
  908. if (bcast_row) {
  909. const int64_t n = ggml_nelements(dst)/4;
  910. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  911. } else {
  912. const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne0);
  913. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  914. }
  915. } break;
  916. case GGML_OP_ACC:
  917. {
  918. GGML_ASSERT(src0t == GGML_TYPE_F32);
  919. GGML_ASSERT(src1t == GGML_TYPE_F32);
  920. GGML_ASSERT(dstt == GGML_TYPE_F32);
  921. GGML_ASSERT(ggml_is_contiguous(src0));
  922. GGML_ASSERT(ggml_is_contiguous(src1));
  923. const size_t pnb1 = ((int32_t *) dst->op_params)[0];
  924. const size_t pnb2 = ((int32_t *) dst->op_params)[1];
  925. const size_t pnb3 = ((int32_t *) dst->op_params)[2];
  926. const size_t offs = ((int32_t *) dst->op_params)[3];
  927. const bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  928. if (!inplace) {
  929. // run a separete kernel to cpy src->dst
  930. // not sure how to avoid this
  931. // TODO: make a simpler cpy_bytes kernel
  932. const id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_F32].pipeline;
  933. [encoder setComputePipelineState:pipeline];
  934. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  935. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  936. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  937. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  938. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  939. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
  940. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
  941. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
  942. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
  943. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
  944. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
  945. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
  946. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
  947. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
  948. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
  949. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
  950. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
  951. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
  952. const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne00);
  953. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  954. }
  955. const id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD].pipeline;
  956. [encoder setComputePipelineState:pipeline];
  957. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  958. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  959. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  960. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  961. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
  962. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
  963. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:6];
  964. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:7];
  965. [encoder setBytes:&pnb1 length:sizeof(pnb1) atIndex:8];
  966. [encoder setBytes:&pnb2 length:sizeof(pnb2) atIndex:9];
  967. [encoder setBytes:&pnb3 length:sizeof(pnb3) atIndex:10];
  968. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:11];
  969. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:12];
  970. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:13];
  971. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:14];
  972. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:15];
  973. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:16];
  974. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:17];
  975. [encoder setBytes:&nb13 length:sizeof(nb13) atIndex:18];
  976. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:19];
  977. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:20];
  978. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:21];
  979. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:22];
  980. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:23];
  981. [encoder setBytes:&pnb1 length:sizeof(pnb1) atIndex:24];
  982. [encoder setBytes:&pnb2 length:sizeof(pnb2) atIndex:25];
  983. [encoder setBytes:&pnb3 length:sizeof(pnb3) atIndex:26];
  984. [encoder setBytes:&offs length:sizeof(offs) atIndex:27];
  985. const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne00);
  986. [encoder dispatchThreadgroups:MTLSizeMake(ne11, ne12, ne13) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  987. } break;
  988. case GGML_OP_SCALE:
  989. {
  990. GGML_ASSERT(ggml_is_contiguous(src0));
  991. float scale;
  992. memcpy(&scale, dst->op_params, sizeof(scale));
  993. int64_t n = ggml_nelements(dst);
  994. id<MTLComputePipelineState> pipeline = nil;
  995. if (n % 4 == 0) {
  996. n /= 4;
  997. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SCALE_4].pipeline;
  998. } else {
  999. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SCALE].pipeline;
  1000. }
  1001. [encoder setComputePipelineState:pipeline];
  1002. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1003. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1004. [encoder setBytes:&scale length:sizeof(scale) atIndex:2];
  1005. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1006. } break;
  1007. case GGML_OP_UNARY:
  1008. switch (ggml_get_unary_op(gf->nodes[i])) {
  1009. case GGML_UNARY_OP_TANH:
  1010. {
  1011. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_TANH].pipeline;
  1012. [encoder setComputePipelineState:pipeline];
  1013. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1014. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1015. const int64_t n = ggml_nelements(dst);
  1016. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1017. } break;
  1018. case GGML_UNARY_OP_RELU:
  1019. {
  1020. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_RELU].pipeline;
  1021. [encoder setComputePipelineState:pipeline];
  1022. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1023. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1024. const int64_t n = ggml_nelements(dst);
  1025. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1026. } break;
  1027. case GGML_UNARY_OP_GELU:
  1028. {
  1029. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GELU].pipeline;
  1030. [encoder setComputePipelineState:pipeline];
  1031. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1032. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1033. const int64_t n = ggml_nelements(dst);
  1034. GGML_ASSERT(n % 4 == 0);
  1035. [encoder dispatchThreadgroups:MTLSizeMake(n/4, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1036. } break;
  1037. case GGML_UNARY_OP_GELU_QUICK:
  1038. {
  1039. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GELU_QUICK].pipeline;
  1040. [encoder setComputePipelineState:pipeline];
  1041. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1042. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1043. const int64_t n = ggml_nelements(dst);
  1044. GGML_ASSERT(n % 4 == 0);
  1045. [encoder dispatchThreadgroups:MTLSizeMake(n/4, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1046. } break;
  1047. case GGML_UNARY_OP_SILU:
  1048. {
  1049. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SILU].pipeline;
  1050. [encoder setComputePipelineState:pipeline];
  1051. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1052. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1053. const int64_t n = ggml_nelements(dst);
  1054. GGML_ASSERT(n % 4 == 0);
  1055. [encoder dispatchThreadgroups:MTLSizeMake(n/4, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1056. } break;
  1057. default:
  1058. {
  1059. GGML_METAL_LOG_WARN("%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
  1060. GGML_ASSERT(false);
  1061. }
  1062. } break;
  1063. case GGML_OP_SQR:
  1064. {
  1065. GGML_ASSERT(ggml_is_contiguous(src0));
  1066. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SQR].pipeline;
  1067. [encoder setComputePipelineState:pipeline];
  1068. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1069. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1070. const int64_t n = ggml_nelements(dst);
  1071. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1072. } break;
  1073. case GGML_OP_SUM_ROWS:
  1074. {
  1075. GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type));
  1076. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SUM_ROWS].pipeline;
  1077. [encoder setComputePipelineState:pipeline];
  1078. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1079. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1080. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
  1081. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
  1082. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
  1083. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:5];
  1084. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6];
  1085. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7];
  1086. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8];
  1087. [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:9];
  1088. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:10];
  1089. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:11];
  1090. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:12];
  1091. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:13];
  1092. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:14];
  1093. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:15];
  1094. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:16];
  1095. [encoder setBytes:&nb13 length:sizeof(nb13) atIndex:17];
  1096. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:18];
  1097. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:19];
  1098. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:20];
  1099. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:21];
  1100. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:22];
  1101. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:23];
  1102. [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:24];
  1103. [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:25];
  1104. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1105. } break;
  1106. case GGML_OP_SOFT_MAX:
  1107. {
  1108. int nth = 32; // SIMD width
  1109. id<MTLComputePipelineState> pipeline = nil;
  1110. if (ne00%4 == 0) {
  1111. while (nth < ne00/4 && nth < 256) {
  1112. nth *= 2;
  1113. }
  1114. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX_4].pipeline;
  1115. } else {
  1116. while (nth < ne00 && nth < 1024) {
  1117. nth *= 2;
  1118. }
  1119. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX].pipeline;
  1120. }
  1121. float scale;
  1122. float max_bias;
  1123. memcpy(&scale, ((int32_t *) dst->op_params) + 0, sizeof(scale));
  1124. memcpy(&max_bias, ((int32_t *) dst->op_params) + 1, sizeof(max_bias));
  1125. const int64_t nrows_x = ggml_nrows(src0);
  1126. const int64_t nrows_y = src0->ne[1];
  1127. const uint32_t n_head_kv = nrows_x/nrows_y;
  1128. const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv));
  1129. const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
  1130. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
  1131. [encoder setComputePipelineState:pipeline];
  1132. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1133. if (id_src1) {
  1134. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1135. } else {
  1136. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
  1137. }
  1138. if (id_src2) {
  1139. [encoder setBuffer:id_src2 offset:offs_src2 atIndex:2];
  1140. } else {
  1141. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:2];
  1142. }
  1143. [encoder setBuffer:id_dst offset:offs_dst atIndex:3];
  1144. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:4];
  1145. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:5];
  1146. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:6];
  1147. [encoder setBytes:&scale length:sizeof(scale) atIndex:7];
  1148. [encoder setBytes:&max_bias length:sizeof(max_bias) atIndex:8];
  1149. [encoder setBytes:&m0 length:sizeof(m0) atIndex:9];
  1150. [encoder setBytes:&m1 length:sizeof(m1) atIndex:10];
  1151. [encoder setBytes:&n_head_log2 length:sizeof(n_head_log2) atIndex:11];
  1152. [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
  1153. [encoder dispatchThreadgroups:MTLSizeMake(ne01*ne02*ne03, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1154. } break;
  1155. case GGML_OP_DIAG_MASK_INF:
  1156. {
  1157. const int n_past = ((int32_t *)(dst->op_params))[0];
  1158. id<MTLComputePipelineState> pipeline = nil;
  1159. if (ne00%8 == 0) {
  1160. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8].pipeline;
  1161. } else {
  1162. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF].pipeline;
  1163. }
  1164. [encoder setComputePipelineState:pipeline];
  1165. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1166. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1167. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
  1168. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
  1169. [encoder setBytes:&n_past length:sizeof(int) atIndex:4];
  1170. if (ne00%8 == 0) {
  1171. [encoder dispatchThreadgroups:MTLSizeMake(ne00*ne01*ne02/8, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1172. }
  1173. else {
  1174. [encoder dispatchThreadgroups:MTLSizeMake(ne00, ne01, ne02) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1175. }
  1176. } break;
  1177. case GGML_OP_MUL_MAT:
  1178. {
  1179. GGML_ASSERT(ne00 == ne10);
  1180. // TODO: assert that dim2 and dim3 are contiguous
  1181. GGML_ASSERT(ne12 % ne02 == 0);
  1182. GGML_ASSERT(ne13 % ne03 == 0);
  1183. const uint r2 = ne12/ne02;
  1184. const uint r3 = ne13/ne03;
  1185. // find the break-even point where the matrix-matrix kernel becomes more efficient compared
  1186. // to the matrix-vector kernel
  1187. int ne11_mm_min = 1;
  1188. #if 0
  1189. // the numbers below are measured on M2 Ultra for 7B and 13B models
  1190. // these numbers do not translate to other devices or model sizes
  1191. // TODO: need to find a better approach
  1192. if ([ctx->device.name isEqualToString:@"Apple M2 Ultra"]) {
  1193. switch (src0t) {
  1194. case GGML_TYPE_F16: ne11_mm_min = 2; break;
  1195. case GGML_TYPE_Q8_0: ne11_mm_min = 7; break;
  1196. case GGML_TYPE_Q2_K: ne11_mm_min = 15; break;
  1197. case GGML_TYPE_Q3_K: ne11_mm_min = 7; break;
  1198. case GGML_TYPE_Q4_0:
  1199. case GGML_TYPE_Q4_1: ne11_mm_min = 15; break;
  1200. case GGML_TYPE_Q4_K: ne11_mm_min = 11; break;
  1201. case GGML_TYPE_Q5_0: // not tested yet
  1202. case GGML_TYPE_Q5_1: ne11_mm_min = 13; break; // not tested yet
  1203. case GGML_TYPE_Q5_K: ne11_mm_min = 7; break;
  1204. case GGML_TYPE_Q6_K: ne11_mm_min = 7; break;
  1205. default: ne11_mm_min = 1; break;
  1206. }
  1207. }
  1208. #endif
  1209. // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
  1210. // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
  1211. if ([ctx->device supportsFamily:MTLGPUFamilyApple7] &&
  1212. !ggml_is_transposed(src0) &&
  1213. !ggml_is_transposed(src1) &&
  1214. src1t == GGML_TYPE_F32 &&
  1215. ne00 % 32 == 0 && ne00 >= 64 &&
  1216. (ne11 > ne11_mm_min || (ggml_is_quantized(src0t) && ne12 > 1))) {
  1217. //printf("matrix: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12);
  1218. // some Metal matrix data types require aligned pointers
  1219. // ref: https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf (Table 2.5)
  1220. switch (src0->type) {
  1221. case GGML_TYPE_F32: GGML_ASSERT(nb01 % 16 == 0); break;
  1222. case GGML_TYPE_F16: GGML_ASSERT(nb01 % 8 == 0); break;
  1223. default: break;
  1224. }
  1225. id<MTLComputePipelineState> pipeline = nil;
  1226. switch (src0->type) {
  1227. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32 ].pipeline; break;
  1228. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32 ].pipeline; break;
  1229. case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32 ].pipeline; break;
  1230. case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32 ].pipeline; break;
  1231. case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32 ].pipeline; break;
  1232. case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32 ].pipeline; break;
  1233. case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32 ].pipeline; break;
  1234. case GGML_TYPE_Q2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32 ].pipeline; break;
  1235. case GGML_TYPE_Q3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32 ].pipeline; break;
  1236. case GGML_TYPE_Q4_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32 ].pipeline; break;
  1237. case GGML_TYPE_Q5_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32 ].pipeline; break;
  1238. case GGML_TYPE_Q6_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32 ].pipeline; break;
  1239. case GGML_TYPE_IQ2_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32].pipeline; break;
  1240. case GGML_TYPE_IQ2_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32 ].pipeline; break;
  1241. case GGML_TYPE_IQ3_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32].pipeline; break;
  1242. case GGML_TYPE_IQ3_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_S_F32 ].pipeline; break;
  1243. case GGML_TYPE_IQ2_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_S_F32 ].pipeline; break;
  1244. case GGML_TYPE_IQ1_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32 ].pipeline; break;
  1245. case GGML_TYPE_IQ1_M: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_M_F32 ].pipeline; break;
  1246. case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32 ].pipeline; break;
  1247. case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32 ].pipeline; break;
  1248. default: GGML_ASSERT(false && "MUL MAT-MAT not implemented");
  1249. }
  1250. [encoder setComputePipelineState:pipeline];
  1251. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1252. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1253. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1254. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  1255. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
  1256. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:5];
  1257. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:6];
  1258. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:7];
  1259. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:8];
  1260. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:9];
  1261. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:10];
  1262. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:11];
  1263. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:12];
  1264. [encoder setBytes:&r2 length:sizeof(r2) atIndex:13];
  1265. [encoder setBytes:&r3 length:sizeof(r3) atIndex:14];
  1266. [encoder setThreadgroupMemoryLength:8192 atIndex:0];
  1267. [encoder dispatchThreadgroups:MTLSizeMake( (ne11 + 31)/32, (ne01 + 63)/64, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
  1268. } else {
  1269. int nth0 = 32;
  1270. int nth1 = 1;
  1271. int nrows = 1;
  1272. //printf("vector: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12);
  1273. id<MTLComputePipelineState> pipeline = nil;
  1274. // use custom matrix x vector kernel
  1275. switch (src0t) {
  1276. case GGML_TYPE_F32:
  1277. {
  1278. GGML_ASSERT(src1t == GGML_TYPE_F32);
  1279. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32].pipeline;
  1280. nrows = 4;
  1281. } break;
  1282. case GGML_TYPE_F16:
  1283. {
  1284. nth0 = 32;
  1285. nth1 = 1;
  1286. if (src1t == GGML_TYPE_F32) {
  1287. if (ne11 * ne12 < 4) {
  1288. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW].pipeline;
  1289. } else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) {
  1290. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4].pipeline;
  1291. nrows = ne11;
  1292. } else {
  1293. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32].pipeline;
  1294. nrows = 4;
  1295. }
  1296. } else {
  1297. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16].pipeline;
  1298. nrows = 4;
  1299. }
  1300. } break;
  1301. case GGML_TYPE_Q4_0:
  1302. {
  1303. nth0 = 8;
  1304. nth1 = 8;
  1305. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32].pipeline;
  1306. } break;
  1307. case GGML_TYPE_Q4_1:
  1308. {
  1309. nth0 = 8;
  1310. nth1 = 8;
  1311. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32].pipeline;
  1312. } break;
  1313. case GGML_TYPE_Q5_0:
  1314. {
  1315. nth0 = 8;
  1316. nth1 = 8;
  1317. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32].pipeline;
  1318. } break;
  1319. case GGML_TYPE_Q5_1:
  1320. {
  1321. nth0 = 8;
  1322. nth1 = 8;
  1323. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32].pipeline;
  1324. } break;
  1325. case GGML_TYPE_Q8_0:
  1326. {
  1327. nth0 = 8;
  1328. nth1 = 8;
  1329. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32].pipeline;
  1330. } break;
  1331. case GGML_TYPE_Q2_K:
  1332. {
  1333. nth0 = 2;
  1334. nth1 = 32;
  1335. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32].pipeline;
  1336. } break;
  1337. case GGML_TYPE_Q3_K:
  1338. {
  1339. nth0 = 2;
  1340. nth1 = 32;
  1341. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32].pipeline;
  1342. } break;
  1343. case GGML_TYPE_Q4_K:
  1344. {
  1345. nth0 = 4; //1;
  1346. nth1 = 8; //32;
  1347. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32].pipeline;
  1348. } break;
  1349. case GGML_TYPE_Q5_K:
  1350. {
  1351. nth0 = 2;
  1352. nth1 = 32;
  1353. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32].pipeline;
  1354. } break;
  1355. case GGML_TYPE_Q6_K:
  1356. {
  1357. nth0 = 2;
  1358. nth1 = 32;
  1359. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32].pipeline;
  1360. } break;
  1361. case GGML_TYPE_IQ2_XXS:
  1362. {
  1363. nth0 = 4;
  1364. nth1 = 16;
  1365. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32].pipeline;
  1366. } break;
  1367. case GGML_TYPE_IQ2_XS:
  1368. {
  1369. nth0 = 4;
  1370. nth1 = 16;
  1371. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32].pipeline;
  1372. } break;
  1373. case GGML_TYPE_IQ3_XXS:
  1374. {
  1375. nth0 = 4;
  1376. nth1 = 16;
  1377. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32].pipeline;
  1378. } break;
  1379. case GGML_TYPE_IQ3_S:
  1380. {
  1381. nth0 = 4;
  1382. nth1 = 16;
  1383. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_S_F32].pipeline;
  1384. } break;
  1385. case GGML_TYPE_IQ2_S:
  1386. {
  1387. nth0 = 4;
  1388. nth1 = 16;
  1389. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_S_F32].pipeline;
  1390. } break;
  1391. case GGML_TYPE_IQ1_S:
  1392. {
  1393. nth0 = 4;
  1394. nth1 = 16;
  1395. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32].pipeline;
  1396. } break;
  1397. case GGML_TYPE_IQ1_M:
  1398. {
  1399. nth0 = 4;
  1400. nth1 = 16;
  1401. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_M_F32].pipeline;
  1402. } break;
  1403. case GGML_TYPE_IQ4_NL:
  1404. {
  1405. nth0 = 4;
  1406. nth1 = 16;
  1407. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32].pipeline;
  1408. } break;
  1409. case GGML_TYPE_IQ4_XS:
  1410. {
  1411. nth0 = 4;
  1412. nth1 = 16;
  1413. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32].pipeline;
  1414. } break;
  1415. default:
  1416. {
  1417. GGML_METAL_LOG_ERROR("Asserting on type %d\n", (int)src0t);
  1418. GGML_ASSERT(false && "not implemented");
  1419. }
  1420. };
  1421. if (ggml_is_quantized(src0t)) {
  1422. GGML_ASSERT(ne00 >= nth0*nth1);
  1423. }
  1424. [encoder setComputePipelineState:pipeline];
  1425. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1426. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1427. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1428. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  1429. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
  1430. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
  1431. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6];
  1432. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7];
  1433. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8];
  1434. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:9];
  1435. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:10];
  1436. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:11];
  1437. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:12];
  1438. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:13];
  1439. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:14];
  1440. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:15];
  1441. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:16];
  1442. [encoder setBytes:&r2 length:sizeof(r2) atIndex:17];
  1443. [encoder setBytes:&r3 length:sizeof(r3) atIndex:18];
  1444. if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 || src0t == GGML_TYPE_Q5_0 ||
  1445. src0t == GGML_TYPE_Q5_1 || src0t == GGML_TYPE_Q8_0 || src0t == GGML_TYPE_Q2_K ||
  1446. src0t == GGML_TYPE_IQ1_S || src0t == GGML_TYPE_IQ1_M || src0t == GGML_TYPE_IQ2_S) {
  1447. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1448. }
  1449. else if (src0t == GGML_TYPE_IQ2_XXS || src0t == GGML_TYPE_IQ2_XS) {
  1450. const int mem_size = src0t == GGML_TYPE_IQ2_XXS ? 256*8+128 : 512*8+128;
  1451. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1452. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1453. }
  1454. else if (src0t == GGML_TYPE_IQ3_XXS || src0t == GGML_TYPE_IQ3_S) {
  1455. const int mem_size = src0t == GGML_TYPE_IQ3_XXS ? 256*4+128 : 512*4;
  1456. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1457. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1458. }
  1459. else if (src0t == GGML_TYPE_IQ4_NL || src0t == GGML_TYPE_IQ4_XS) {
  1460. const int mem_size = 32*sizeof(float);
  1461. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1462. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1463. }
  1464. else if (src0t == GGML_TYPE_Q4_K) {
  1465. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1466. }
  1467. else if (src0t == GGML_TYPE_Q3_K) {
  1468. #ifdef GGML_QKK_64
  1469. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 1)/2, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1470. #else
  1471. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1472. #endif
  1473. }
  1474. else if (src0t == GGML_TYPE_Q5_K) {
  1475. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1476. }
  1477. else if (src0t == GGML_TYPE_Q6_K) {
  1478. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 1)/2, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1479. } else {
  1480. const int64_t ny = (ne11 + nrows - 1)/nrows;
  1481. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ny, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1482. }
  1483. }
  1484. } break;
  1485. case GGML_OP_MUL_MAT_ID:
  1486. {
  1487. //GGML_ASSERT(ne00 == ne10);
  1488. //GGML_ASSERT(ne03 == ne13);
  1489. GGML_ASSERT(src0t == GGML_TYPE_I32);
  1490. const int n_as = ((int32_t *) dst->op_params)[1];
  1491. // TODO: make this more general
  1492. GGML_ASSERT(n_as <= 8);
  1493. // max size of the src1ids array in the kernel shared buffer
  1494. GGML_ASSERT(ne11 <= 4096);
  1495. const int64_t ne20 = src2 ? src2->ne[0] : 0;
  1496. const int64_t ne21 = src2 ? src2->ne[1] : 0;
  1497. const int64_t ne22 = src2 ? src2->ne[2] : 0;
  1498. const int64_t ne23 = src2 ? src2->ne[3] : 0; GGML_UNUSED(ne23);
  1499. const uint64_t nb20 = src2 ? src2->nb[0] : 0; GGML_UNUSED(nb20);
  1500. const uint64_t nb21 = src2 ? src2->nb[1] : 0;
  1501. const uint64_t nb22 = src2 ? src2->nb[2] : 0;
  1502. const uint64_t nb23 = src2 ? src2->nb[3] : 0; GGML_UNUSED(nb23);
  1503. const enum ggml_type src2t = src2 ? src2->type : GGML_TYPE_COUNT; GGML_UNUSED(src2t);
  1504. GGML_ASSERT(!ggml_is_transposed(src2));
  1505. GGML_ASSERT(!ggml_is_transposed(src1));
  1506. GGML_ASSERT(src1t == GGML_TYPE_F32);
  1507. const uint r2 = ne12/ne22;
  1508. const uint r3 = ne13/ne23;
  1509. // find the break-even point where the matrix-matrix kernel becomes more efficient compared
  1510. // to the matrix-vector kernel
  1511. int ne11_mm_min = n_as;
  1512. const int idx = ((int32_t *) dst->op_params)[0];
  1513. // batch size
  1514. GGML_ASSERT(ne01 == ne11);
  1515. // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
  1516. // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
  1517. // !!!
  1518. // TODO: for now, always use mat-vec kernels until we figure out how to improve the
  1519. // indirect matrix multiplication
  1520. // !!!
  1521. if ([ctx->device supportsFamily:MTLGPUFamilyApple7] &&
  1522. ne20 % 32 == 0 && ne20 >= 64 &&
  1523. ne11 > ne11_mm_min) {
  1524. // some Metal matrix data types require aligned pointers
  1525. // ref: https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf (Table 2.5)
  1526. switch (src0->type) {
  1527. case GGML_TYPE_F32: GGML_ASSERT(nb01 % 16 == 0); break;
  1528. case GGML_TYPE_F16: GGML_ASSERT(nb01 % 8 == 0); break;
  1529. default: break;
  1530. }
  1531. id<MTLComputePipelineState> pipeline = nil;
  1532. switch (src2->type) {
  1533. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F32 ].pipeline; break;
  1534. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F16_F32 ].pipeline; break;
  1535. case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_0_F32 ].pipeline; break;
  1536. case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_1_F32 ].pipeline; break;
  1537. case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F32 ].pipeline; break;
  1538. case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F32 ].pipeline; break;
  1539. case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F32 ].pipeline; break;
  1540. case GGML_TYPE_Q2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F32 ].pipeline; break;
  1541. case GGML_TYPE_Q3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F32 ].pipeline; break;
  1542. case GGML_TYPE_Q4_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F32 ].pipeline; break;
  1543. case GGML_TYPE_Q5_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_K_F32 ].pipeline; break;
  1544. case GGML_TYPE_Q6_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q6_K_F32 ].pipeline; break;
  1545. case GGML_TYPE_IQ2_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XXS_F32].pipeline; break;
  1546. case GGML_TYPE_IQ2_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XS_F32 ].pipeline; break;
  1547. case GGML_TYPE_IQ3_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_XXS_F32].pipeline; break;
  1548. case GGML_TYPE_IQ3_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_S_F32 ].pipeline; break;
  1549. case GGML_TYPE_IQ2_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_S_F32 ].pipeline; break;
  1550. case GGML_TYPE_IQ1_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_S_F32 ].pipeline; break;
  1551. case GGML_TYPE_IQ1_M: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_M_F32 ].pipeline; break;
  1552. case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32 ].pipeline; break;
  1553. case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32 ].pipeline; break;
  1554. default: GGML_ASSERT(false && "MUL_MAT_ID not implemented");
  1555. }
  1556. [encoder setComputePipelineState:pipeline];
  1557. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1558. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1559. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1560. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:3];
  1561. [encoder setBytes:&ne20 length:sizeof(ne20) atIndex:4];
  1562. [encoder setBytes:&ne22 length:sizeof(ne22) atIndex:5];
  1563. [encoder setBytes:&nb21 length:sizeof(nb21) atIndex:6];
  1564. [encoder setBytes:&nb22 length:sizeof(nb22) atIndex:7];
  1565. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:8];
  1566. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:9];
  1567. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:10];
  1568. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:11];
  1569. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:12];
  1570. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:13];
  1571. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:14];
  1572. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:15];
  1573. [encoder setBytes:&r2 length:sizeof(r2) atIndex:16];
  1574. [encoder setBytes:&r3 length:sizeof(r3) atIndex:17];
  1575. [encoder setBytes:&idx length:sizeof(idx) atIndex:18];
  1576. // TODO: how to make this an array? read Metal docs
  1577. for (int j = 0; j < 8; ++j) {
  1578. // NOTE: this is done like this to avoid uninitialized kernel arguments when n_as < 8
  1579. struct ggml_tensor * src_cur = dst->src[2 + (j % n_as)];
  1580. size_t offs_src_cur = 0;
  1581. id<MTLBuffer> id_src_cur = ggml_metal_get_buffer(src_cur, &offs_src_cur);
  1582. [encoder setBuffer:id_src_cur offset:offs_src_cur atIndex:19 + j];
  1583. }
  1584. [encoder setThreadgroupMemoryLength:GGML_PAD(8192 + 2*ne11, 16) atIndex:0];
  1585. [encoder dispatchThreadgroups:MTLSizeMake((ne11 + 31)/32, (ne21 + 63)/64, n_as*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
  1586. } else {
  1587. int nth0 = 32;
  1588. int nth1 = 1;
  1589. int nrows = 1;
  1590. //printf("vector: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12);
  1591. id<MTLComputePipelineState> pipeline = nil;
  1592. // use custom matrix x vector kernel
  1593. switch (src2t) {
  1594. case GGML_TYPE_F32:
  1595. {
  1596. GGML_ASSERT(src1t == GGML_TYPE_F32);
  1597. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F32_F32].pipeline;
  1598. } break;
  1599. case GGML_TYPE_F16:
  1600. {
  1601. GGML_ASSERT(src1t == GGML_TYPE_F32);
  1602. nth0 = 32;
  1603. nth1 = 1;
  1604. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32].pipeline;
  1605. } break;
  1606. case GGML_TYPE_Q4_0:
  1607. {
  1608. nth0 = 8;
  1609. nth1 = 8;
  1610. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_0_F32].pipeline;
  1611. } break;
  1612. case GGML_TYPE_Q4_1:
  1613. {
  1614. nth0 = 8;
  1615. nth1 = 8;
  1616. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_1_F32].pipeline;
  1617. } break;
  1618. case GGML_TYPE_Q5_0:
  1619. {
  1620. nth0 = 8;
  1621. nth1 = 8;
  1622. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32].pipeline;
  1623. } break;
  1624. case GGML_TYPE_Q5_1:
  1625. {
  1626. nth0 = 8;
  1627. nth1 = 8;
  1628. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32].pipeline;
  1629. } break;
  1630. case GGML_TYPE_Q8_0:
  1631. {
  1632. nth0 = 8;
  1633. nth1 = 8;
  1634. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32].pipeline;
  1635. } break;
  1636. case GGML_TYPE_Q2_K:
  1637. {
  1638. nth0 = 2;
  1639. nth1 = 32;
  1640. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32].pipeline;
  1641. } break;
  1642. case GGML_TYPE_Q3_K:
  1643. {
  1644. nth0 = 2;
  1645. nth1 = 32;
  1646. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32].pipeline;
  1647. } break;
  1648. case GGML_TYPE_Q4_K:
  1649. {
  1650. nth0 = 4; //1;
  1651. nth1 = 8; //32;
  1652. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32].pipeline;
  1653. } break;
  1654. case GGML_TYPE_Q5_K:
  1655. {
  1656. nth0 = 2;
  1657. nth1 = 32;
  1658. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_K_F32].pipeline;
  1659. } break;
  1660. case GGML_TYPE_Q6_K:
  1661. {
  1662. nth0 = 2;
  1663. nth1 = 32;
  1664. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q6_K_F32].pipeline;
  1665. } break;
  1666. case GGML_TYPE_IQ2_XXS:
  1667. {
  1668. nth0 = 4;
  1669. nth1 = 16;
  1670. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XXS_F32].pipeline;
  1671. } break;
  1672. case GGML_TYPE_IQ2_XS:
  1673. {
  1674. nth0 = 4;
  1675. nth1 = 16;
  1676. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XS_F32].pipeline;
  1677. } break;
  1678. case GGML_TYPE_IQ3_XXS:
  1679. {
  1680. nth0 = 4;
  1681. nth1 = 16;
  1682. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_XXS_F32].pipeline;
  1683. } break;
  1684. case GGML_TYPE_IQ3_S:
  1685. {
  1686. nth0 = 4;
  1687. nth1 = 16;
  1688. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_S_F32].pipeline;
  1689. } break;
  1690. case GGML_TYPE_IQ2_S:
  1691. {
  1692. nth0 = 4;
  1693. nth1 = 16;
  1694. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_S_F32].pipeline;
  1695. } break;
  1696. case GGML_TYPE_IQ1_S:
  1697. {
  1698. nth0 = 4;
  1699. nth1 = 16;
  1700. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_S_F32].pipeline;
  1701. } break;
  1702. case GGML_TYPE_IQ1_M:
  1703. {
  1704. nth0 = 4;
  1705. nth1 = 16;
  1706. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_M_F32].pipeline;
  1707. } break;
  1708. case GGML_TYPE_IQ4_NL:
  1709. {
  1710. nth0 = 4;
  1711. nth1 = 16;
  1712. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32].pipeline;
  1713. } break;
  1714. case GGML_TYPE_IQ4_XS:
  1715. {
  1716. nth0 = 4;
  1717. nth1 = 16;
  1718. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32].pipeline;
  1719. } break;
  1720. default:
  1721. {
  1722. GGML_METAL_LOG_ERROR("Asserting on type %d\n", (int)src2t);
  1723. GGML_ASSERT(false && "not implemented");
  1724. }
  1725. };
  1726. if (ggml_is_quantized(src2t)) {
  1727. GGML_ASSERT(ne20 >= nth0*nth1);
  1728. }
  1729. const int64_t _ne1 = 1; // kernels needs a reference in constant memory
  1730. [encoder setComputePipelineState:pipeline];
  1731. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1732. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1733. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1734. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:3];
  1735. [encoder setBytes:&ne20 length:sizeof(ne20) atIndex:4];
  1736. [encoder setBytes:&ne21 length:sizeof(ne21) atIndex:5];
  1737. [encoder setBytes:&ne22 length:sizeof(ne22) atIndex:6];
  1738. [encoder setBytes:&nb20 length:sizeof(nb20) atIndex:7];
  1739. [encoder setBytes:&nb21 length:sizeof(nb21) atIndex:8];
  1740. [encoder setBytes:&nb22 length:sizeof(nb22) atIndex:9];
  1741. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:10];
  1742. [encoder setBytes:&_ne1 length:sizeof(_ne1) atIndex:11];
  1743. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:12];
  1744. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:13];
  1745. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:14];
  1746. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:15];
  1747. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:16];
  1748. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:17];
  1749. [encoder setBytes:&_ne1 length:sizeof(_ne1) atIndex:18];
  1750. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:19];
  1751. [encoder setBytes:&r2 length:sizeof(r2) atIndex:20];
  1752. [encoder setBytes:&r3 length:sizeof(r3) atIndex:21];
  1753. [encoder setBytes:&idx length:sizeof(idx) atIndex:22];
  1754. // TODO: how to make this an array? read Metal docs
  1755. for (int j = 0; j < 8; ++j) {
  1756. // NOTE: this is done like this to avoid uninitialized kernel arguments when n_as < 8
  1757. struct ggml_tensor * src_cur = dst->src[2 + (j % n_as)];
  1758. size_t offs_src_cur = 0;
  1759. id<MTLBuffer> id_src_cur = ggml_metal_get_buffer(src_cur, &offs_src_cur);
  1760. [encoder setBuffer:id_src_cur offset:offs_src_cur atIndex:23 + j];
  1761. }
  1762. if (src2t == GGML_TYPE_Q4_0 || src2t == GGML_TYPE_Q4_1 || src2t == GGML_TYPE_Q5_0 ||
  1763. src2t == GGML_TYPE_Q5_1 || src2t == GGML_TYPE_Q8_0 || src2t == GGML_TYPE_Q2_K ||
  1764. src2t == GGML_TYPE_IQ1_S || src2t == GGML_TYPE_IQ1_M || src2t == GGML_TYPE_IQ2_S) {
  1765. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 7)/8, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1766. }
  1767. else if (src2t == GGML_TYPE_IQ2_XXS || src2t == GGML_TYPE_IQ2_XS) {
  1768. const int mem_size = src2t == GGML_TYPE_IQ2_XXS ? 256*8+128 : 512*8+128;
  1769. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1770. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 7)/8, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1771. }
  1772. else if (src2t == GGML_TYPE_IQ3_XXS || src2t == GGML_TYPE_IQ3_S) {
  1773. const int mem_size = src2t == GGML_TYPE_IQ3_XXS ? 256*4+128 : 512*4;
  1774. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1775. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 7)/8, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1776. }
  1777. else if (src2t == GGML_TYPE_IQ4_NL || src2t == GGML_TYPE_IQ4_XS) {
  1778. const int mem_size = 32*sizeof(float);
  1779. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1780. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 3)/4, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1781. }
  1782. else if (src2t == GGML_TYPE_Q4_K) {
  1783. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 3)/4, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1784. }
  1785. else if (src2t == GGML_TYPE_Q3_K) {
  1786. #ifdef GGML_QKK_64
  1787. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 1)/2, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1788. #else
  1789. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 3)/4, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1790. #endif
  1791. }
  1792. else if (src2t == GGML_TYPE_Q5_K) {
  1793. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 3)/4, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1794. }
  1795. else if (src2t == GGML_TYPE_Q6_K) {
  1796. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 1)/2, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1797. } else {
  1798. const int64_t ny = (_ne1 + nrows - 1)/nrows;
  1799. [encoder dispatchThreadgroups:MTLSizeMake(ne21, ny, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1800. }
  1801. }
  1802. } break;
  1803. case GGML_OP_GET_ROWS:
  1804. {
  1805. id<MTLComputePipelineState> pipeline = nil;
  1806. switch (src0->type) {
  1807. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_F32 ].pipeline; break;
  1808. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_F16 ].pipeline; break;
  1809. case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_0 ].pipeline; break;
  1810. case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_1 ].pipeline; break;
  1811. case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0 ].pipeline; break;
  1812. case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1 ].pipeline; break;
  1813. case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0 ].pipeline; break;
  1814. case GGML_TYPE_Q2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K ].pipeline; break;
  1815. case GGML_TYPE_Q3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K ].pipeline; break;
  1816. case GGML_TYPE_Q4_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K ].pipeline; break;
  1817. case GGML_TYPE_Q5_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_K ].pipeline; break;
  1818. case GGML_TYPE_Q6_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q6_K ].pipeline; break;
  1819. case GGML_TYPE_IQ2_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XXS].pipeline; break;
  1820. case GGML_TYPE_IQ2_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XS ].pipeline; break;
  1821. case GGML_TYPE_IQ3_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_XXS].pipeline; break;
  1822. case GGML_TYPE_IQ3_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_S ].pipeline; break;
  1823. case GGML_TYPE_IQ2_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_S ].pipeline; break;
  1824. case GGML_TYPE_IQ1_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_S ].pipeline; break;
  1825. case GGML_TYPE_IQ1_M: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_M ].pipeline; break;
  1826. case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL ].pipeline; break;
  1827. case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS ].pipeline; break;
  1828. case GGML_TYPE_I32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_I32 ].pipeline; break;
  1829. default: GGML_ASSERT(false && "not implemented");
  1830. }
  1831. [encoder setComputePipelineState:pipeline];
  1832. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1833. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1834. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1835. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:3];
  1836. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:4];
  1837. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:5];
  1838. [encoder setBytes:&ne10 length:sizeof( int64_t) atIndex:6];
  1839. [encoder setBytes:&nb10 length:sizeof( int64_t) atIndex:7];
  1840. [encoder setBytes:&nb11 length:sizeof( int64_t) atIndex:8];
  1841. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:9];
  1842. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:10];
  1843. [encoder dispatchThreadgroups:MTLSizeMake(ne10, ne11, 1) threadsPerThreadgroup:MTLSizeMake(32, 1, 1)];
  1844. } break;
  1845. case GGML_OP_RMS_NORM:
  1846. {
  1847. GGML_ASSERT(ne00 % 4 == 0);
  1848. float eps;
  1849. memcpy(&eps, dst->op_params, sizeof(float));
  1850. int nth = 32; // SIMD width
  1851. while (nth < ne00/4 && nth < 1024) {
  1852. nth *= 2;
  1853. }
  1854. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_RMS_NORM].pipeline;
  1855. [encoder setComputePipelineState:pipeline];
  1856. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1857. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1858. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  1859. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3];
  1860. [encoder setBytes:&eps length:sizeof( float) atIndex:4];
  1861. [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
  1862. const int64_t nrows = ggml_nrows(src0);
  1863. [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1864. } break;
  1865. case GGML_OP_GROUP_NORM:
  1866. {
  1867. GGML_ASSERT(ne00 % 4 == 0);
  1868. //float eps;
  1869. //memcpy(&eps, dst->op_params, sizeof(float));
  1870. const float eps = 1e-6f; // TODO: temporarily hardcoded
  1871. const int32_t n_groups = ((int32_t *) dst->op_params)[0];
  1872. int nth = 32; // SIMD width
  1873. //while (nth < ne00/4 && nth < 1024) {
  1874. // nth *= 2;
  1875. //}
  1876. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GROUP_NORM].pipeline;
  1877. [encoder setComputePipelineState:pipeline];
  1878. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1879. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1880. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  1881. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  1882. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  1883. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:5];
  1884. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:6];
  1885. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:7];
  1886. [encoder setBytes:&n_groups length:sizeof( int32_t) atIndex:8];
  1887. [encoder setBytes:&eps length:sizeof( float) atIndex:9];
  1888. [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
  1889. [encoder dispatchThreadgroups:MTLSizeMake(n_groups, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1890. } break;
  1891. case GGML_OP_NORM:
  1892. {
  1893. float eps;
  1894. memcpy(&eps, dst->op_params, sizeof(float));
  1895. const int nth = MIN(256, ne00);
  1896. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_NORM].pipeline;
  1897. [encoder setComputePipelineState:pipeline];
  1898. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1899. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1900. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  1901. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3];
  1902. [encoder setBytes:&eps length:sizeof( float) atIndex:4];
  1903. [encoder setThreadgroupMemoryLength:GGML_PAD(nth*sizeof(float), 16) atIndex:0];
  1904. const int64_t nrows = ggml_nrows(src0);
  1905. [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1906. } break;
  1907. case GGML_OP_ALIBI:
  1908. {
  1909. GGML_ASSERT((src0t == GGML_TYPE_F32));
  1910. const int nth = MIN(1024, ne00);
  1911. //const int n_past = ((int32_t *) dst->op_params)[0];
  1912. const int n_head = ((int32_t *) dst->op_params)[1];
  1913. float max_bias;
  1914. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  1915. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  1916. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  1917. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  1918. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ALIBI_F32].pipeline;
  1919. [encoder setComputePipelineState:pipeline];
  1920. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1921. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1922. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  1923. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  1924. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  1925. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
  1926. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
  1927. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
  1928. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
  1929. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
  1930. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
  1931. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
  1932. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
  1933. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
  1934. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
  1935. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
  1936. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
  1937. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
  1938. [encoder setBytes:&m0 length:sizeof( float) atIndex:18];
  1939. [encoder setBytes:&m1 length:sizeof( float) atIndex:19];
  1940. [encoder setBytes:&n_heads_log2_floor length:sizeof(int) atIndex:20];
  1941. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1942. } break;
  1943. case GGML_OP_ROPE:
  1944. {
  1945. GGML_ASSERT(ne10 == ne02);
  1946. const int nth = MIN(1024, ne00);
  1947. const int n_past = ((int32_t *) dst->op_params)[0];
  1948. const int n_dims = ((int32_t *) dst->op_params)[1];
  1949. const int mode = ((int32_t *) dst->op_params)[2];
  1950. // skip 3, n_ctx, used in GLM RoPE, unimplemented in metal
  1951. const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
  1952. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
  1953. memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
  1954. memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
  1955. memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
  1956. memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
  1957. memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
  1958. memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
  1959. id<MTLComputePipelineState> pipeline = nil;
  1960. switch (src0->type) {
  1961. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_F32].pipeline; break;
  1962. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_F16].pipeline; break;
  1963. default: GGML_ASSERT(false);
  1964. };
  1965. [encoder setComputePipelineState:pipeline];
  1966. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1967. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1968. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1969. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:3];
  1970. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:4];
  1971. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:5];
  1972. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:6];
  1973. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:7];
  1974. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:8];
  1975. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:9];
  1976. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:10];
  1977. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:11];
  1978. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:12];
  1979. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:13];
  1980. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:14];
  1981. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:15];
  1982. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:16];
  1983. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:17];
  1984. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:18];
  1985. [encoder setBytes:&n_past length:sizeof( int) atIndex:19];
  1986. [encoder setBytes:&n_dims length:sizeof( int) atIndex:20];
  1987. [encoder setBytes:&mode length:sizeof( int) atIndex:21];
  1988. [encoder setBytes:&n_orig_ctx length:sizeof( int) atIndex:22];
  1989. [encoder setBytes:&freq_base length:sizeof( float) atIndex:23];
  1990. [encoder setBytes:&freq_scale length:sizeof( float) atIndex:24];
  1991. [encoder setBytes:&ext_factor length:sizeof( float) atIndex:25];
  1992. [encoder setBytes:&attn_factor length:sizeof( float) atIndex:26];
  1993. [encoder setBytes:&beta_fast length:sizeof( float) atIndex:27];
  1994. [encoder setBytes:&beta_slow length:sizeof( float) atIndex:28];
  1995. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1996. } break;
  1997. case GGML_OP_IM2COL:
  1998. {
  1999. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  2000. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  2001. GGML_ASSERT( dst->type == GGML_TYPE_F16 || dst->type == GGML_TYPE_F32);
  2002. const int32_t s0 = ((const int32_t *)(dst->op_params))[0];
  2003. const int32_t s1 = ((const int32_t *)(dst->op_params))[1];
  2004. const int32_t p0 = ((const int32_t *)(dst->op_params))[2];
  2005. const int32_t p1 = ((const int32_t *)(dst->op_params))[3];
  2006. const int32_t d0 = ((const int32_t *)(dst->op_params))[4];
  2007. const int32_t d1 = ((const int32_t *)(dst->op_params))[5];
  2008. const bool is_2D = ((const int32_t *)(dst->op_params))[6] == 1;
  2009. const int32_t N = src1->ne[is_2D ? 3 : 2];
  2010. const int32_t IC = src1->ne[is_2D ? 2 : 1];
  2011. const int32_t IH = is_2D ? src1->ne[1] : 1;
  2012. const int32_t IW = src1->ne[0];
  2013. const int32_t KH = is_2D ? src0->ne[1] : 1;
  2014. const int32_t KW = src0->ne[0];
  2015. const int32_t OH = is_2D ? dst->ne[2] : 1;
  2016. const int32_t OW = dst->ne[1];
  2017. const int32_t CHW = IC * KH * KW;
  2018. const int32_t ofs0 = src1->nb[is_2D ? 3 : 2] / 4;
  2019. const int32_t ofs1 = src1->nb[is_2D ? 2 : 1] / 4;
  2020. id<MTLComputePipelineState> pipeline = nil;
  2021. switch (dst->type) {
  2022. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_IM2COL_F32].pipeline; break;
  2023. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_IM2COL_F16].pipeline; break;
  2024. default: GGML_ASSERT(false);
  2025. };
  2026. [encoder setComputePipelineState:pipeline];
  2027. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:0];
  2028. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  2029. [encoder setBytes:&ofs0 length:sizeof( int32_t) atIndex:2];
  2030. [encoder setBytes:&ofs1 length:sizeof( int32_t) atIndex:3];
  2031. [encoder setBytes:&IW length:sizeof( int32_t) atIndex:4];
  2032. [encoder setBytes:&IH length:sizeof( int32_t) atIndex:5];
  2033. [encoder setBytes:&CHW length:sizeof( int32_t) atIndex:6];
  2034. [encoder setBytes:&s0 length:sizeof( int32_t) atIndex:7];
  2035. [encoder setBytes:&s1 length:sizeof( int32_t) atIndex:8];
  2036. [encoder setBytes:&p0 length:sizeof( int32_t) atIndex:9];
  2037. [encoder setBytes:&p1 length:sizeof( int32_t) atIndex:10];
  2038. [encoder setBytes:&d0 length:sizeof( int32_t) atIndex:11];
  2039. [encoder setBytes:&d1 length:sizeof( int32_t) atIndex:12];
  2040. [encoder dispatchThreadgroups:MTLSizeMake(IC, OH, OW) threadsPerThreadgroup:MTLSizeMake(N, KH, KW)];
  2041. } break;
  2042. case GGML_OP_UPSCALE:
  2043. {
  2044. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  2045. const int sf = dst->op_params[0];
  2046. const id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_UPSCALE_F32].pipeline;
  2047. [encoder setComputePipelineState:pipeline];
  2048. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  2049. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  2050. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
  2051. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
  2052. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
  2053. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:5];
  2054. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6];
  2055. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7];
  2056. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8];
  2057. [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:9];
  2058. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:10];
  2059. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:11];
  2060. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:12];
  2061. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:13];
  2062. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:14];
  2063. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:15];
  2064. [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:16];
  2065. [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:17];
  2066. [encoder setBytes:&sf length:sizeof(sf) atIndex:18];
  2067. const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne0);
  2068. [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  2069. } break;
  2070. case GGML_OP_PAD:
  2071. {
  2072. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  2073. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_PAD_F32].pipeline;
  2074. [encoder setComputePipelineState:pipeline];
  2075. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  2076. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  2077. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
  2078. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
  2079. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
  2080. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:5];
  2081. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6];
  2082. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7];
  2083. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8];
  2084. [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:9];
  2085. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:10];
  2086. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:11];
  2087. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:12];
  2088. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:13];
  2089. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:14];
  2090. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:15];
  2091. [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:16];
  2092. [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:17];
  2093. const int nth = MIN(1024, ne0);
  2094. [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  2095. } break;
  2096. case GGML_OP_ARANGE:
  2097. {
  2098. GGML_ASSERT(dst->type == GGML_TYPE_F32);
  2099. float start;
  2100. float step;
  2101. memcpy(&start, ((int32_t *) dst->op_params) + 0, sizeof(float));
  2102. memcpy(&step, ((int32_t *) dst->op_params) + 2, sizeof(float));
  2103. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ARANGE_F32].pipeline;
  2104. [encoder setComputePipelineState:pipeline];
  2105. [encoder setBuffer:id_dst offset:offs_dst atIndex:0];
  2106. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:1];
  2107. [encoder setBytes:&start length:sizeof(start) atIndex:2];
  2108. [encoder setBytes:&step length:sizeof(step) atIndex:3];
  2109. const int nth = MIN(1024, ne0);
  2110. [encoder dispatchThreadgroups:MTLSizeMake(1, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  2111. } break;
  2112. case GGML_OP_TIMESTEP_EMBEDDING:
  2113. {
  2114. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  2115. const int dim = dst->op_params[0];
  2116. const int max_period = dst->op_params[1];
  2117. const int half = dim / 2;
  2118. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_TIMESTEP_EMBEDDING_F32].pipeline;
  2119. [encoder setComputePipelineState:pipeline];
  2120. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  2121. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  2122. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:2];
  2123. [encoder setBytes:&dim length:sizeof(dim) atIndex:3];
  2124. [encoder setBytes:&max_period length:sizeof(max_period) atIndex:4];
  2125. const int nth = MIN(1024, half);
  2126. [encoder dispatchThreadgroups:MTLSizeMake(ne00, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  2127. } break;
  2128. case GGML_OP_ARGSORT:
  2129. {
  2130. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  2131. GGML_ASSERT( dst->type == GGML_TYPE_I32);
  2132. const int nrows = ggml_nrows(src0);
  2133. enum ggml_sort_order order = (enum ggml_sort_order) dst->op_params[0];
  2134. id<MTLComputePipelineState> pipeline = nil;
  2135. switch (order) {
  2136. case GGML_SORT_ORDER_ASC: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC].pipeline; break;
  2137. case GGML_SORT_ORDER_DESC: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC].pipeline; break;
  2138. default: GGML_ASSERT(false);
  2139. };
  2140. [encoder setComputePipelineState:pipeline];
  2141. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  2142. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  2143. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  2144. [encoder dispatchThreadgroups:MTLSizeMake(1, nrows, 1) threadsPerThreadgroup:MTLSizeMake(ne00, 1, 1)];
  2145. } break;
  2146. case GGML_OP_LEAKY_RELU:
  2147. {
  2148. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  2149. float slope;
  2150. memcpy(&slope, dst->op_params, sizeof(float));
  2151. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32].pipeline;
  2152. [encoder setComputePipelineState:pipeline];
  2153. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  2154. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  2155. [encoder setBytes:&slope length:sizeof(slope) atIndex:2];
  2156. const int64_t n = ggml_nelements(dst);
  2157. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  2158. } break;
  2159. case GGML_OP_DUP:
  2160. case GGML_OP_CPY:
  2161. case GGML_OP_CONT:
  2162. {
  2163. GGML_ASSERT(ne00 % ggml_blck_size(src0->type) == 0);
  2164. int nth = MIN(1024, ne00/ggml_blck_size(src0->type));
  2165. id<MTLComputePipelineState> pipeline = nil;
  2166. switch (src0t) {
  2167. case GGML_TYPE_F32:
  2168. {
  2169. GGML_ASSERT(ne0 % ggml_blck_size(dst->type) == 0);
  2170. switch (dstt) {
  2171. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_F16].pipeline; break;
  2172. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_F32].pipeline; break;
  2173. case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0].pipeline; break;
  2174. case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_0].pipeline; break;
  2175. case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_1].pipeline; break;
  2176. case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_0].pipeline; break;
  2177. case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_1].pipeline; break;
  2178. case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_IQ4_NL].pipeline; break;
  2179. default: GGML_ASSERT(false && "not implemented");
  2180. };
  2181. } break;
  2182. case GGML_TYPE_F16:
  2183. {
  2184. switch (dstt) {
  2185. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F16_F16].pipeline; break;
  2186. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F16_F32].pipeline; break;
  2187. default: GGML_ASSERT(false && "not implemented");
  2188. };
  2189. } break;
  2190. default: GGML_ASSERT(false && "not implemented");
  2191. }
  2192. [encoder setComputePipelineState:pipeline];
  2193. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  2194. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  2195. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  2196. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  2197. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  2198. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
  2199. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
  2200. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
  2201. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
  2202. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
  2203. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
  2204. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
  2205. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
  2206. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
  2207. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
  2208. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
  2209. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
  2210. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
  2211. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  2212. } break;
  2213. default:
  2214. {
  2215. GGML_METAL_LOG_ERROR("%s: error: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
  2216. GGML_ASSERT(false);
  2217. }
  2218. }
  2219. if (should_capture) {
  2220. [encoder popDebugGroup];
  2221. }
  2222. }
  2223. [encoder endEncoding];
  2224. [command_buffer commit];
  2225. });
  2226. // Wait for completion and check status of each command buffer
  2227. // needed to detect if the device ran out-of-memory for example (#1881)
  2228. for (int i = 0; i < n_cb; ++i) {
  2229. id<MTLCommandBuffer> command_buffer = command_buffers[i];
  2230. [command_buffer waitUntilCompleted];
  2231. MTLCommandBufferStatus status = [command_buffer status];
  2232. if (status != MTLCommandBufferStatusCompleted) {
  2233. GGML_METAL_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, i, status);
  2234. return GGML_STATUS_FAILED;
  2235. }
  2236. }
  2237. if (should_capture) {
  2238. [[MTLCaptureManager sharedCaptureManager] stopCapture];
  2239. }
  2240. }
  2241. return GGML_STATUS_SUCCESS;
  2242. }
  2243. ////////////////////////////////////////////////////////////////////////////////
  2244. // backend interface
  2245. // default buffer
  2246. static id<MTLDevice> g_backend_device = nil;
  2247. static int g_backend_device_ref_count = 0;
  2248. static id<MTLDevice> ggml_backend_metal_get_device(void) {
  2249. if (g_backend_device == nil) {
  2250. g_backend_device = MTLCreateSystemDefaultDevice();
  2251. }
  2252. g_backend_device_ref_count++;
  2253. return g_backend_device;
  2254. }
  2255. static void ggml_backend_metal_free_device(void) {
  2256. assert(g_backend_device_ref_count > 0);
  2257. g_backend_device_ref_count--;
  2258. if (g_backend_device_ref_count == 0) {
  2259. [g_backend_device release];
  2260. g_backend_device = nil;
  2261. }
  2262. }
  2263. GGML_CALL static const char * ggml_backend_metal_buffer_get_name(ggml_backend_buffer_t buffer) {
  2264. return "Metal";
  2265. UNUSED(buffer);
  2266. }
  2267. GGML_CALL static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  2268. struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
  2269. for (int i = 0; i < ctx->n_buffers; i++) {
  2270. [ctx->buffers[i].metal release];
  2271. }
  2272. ggml_backend_metal_free_device();
  2273. if (ctx->owned) {
  2274. free(ctx->all_data);
  2275. }
  2276. free(ctx);
  2277. }
  2278. GGML_CALL static void * ggml_backend_metal_buffer_get_base(ggml_backend_buffer_t buffer) {
  2279. struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
  2280. return ctx->all_data;
  2281. }
  2282. GGML_CALL static void ggml_backend_metal_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  2283. memcpy((char *)tensor->data + offset, data, size);
  2284. UNUSED(buffer);
  2285. }
  2286. GGML_CALL static void ggml_backend_metal_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  2287. memcpy(data, (const char *)tensor->data + offset, size);
  2288. UNUSED(buffer);
  2289. }
  2290. GGML_CALL static bool ggml_backend_metal_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
  2291. if (ggml_backend_buffer_is_host(src->buffer)) {
  2292. memcpy(dst->data, src->data, ggml_nbytes(src));
  2293. return true;
  2294. }
  2295. return false;
  2296. UNUSED(buffer);
  2297. }
  2298. GGML_CALL static void ggml_backend_metal_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  2299. struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
  2300. memset(ctx->all_data, value, ctx->all_size);
  2301. }
  2302. static struct ggml_backend_buffer_i ggml_backend_metal_buffer_i = {
  2303. /* .get_name = */ ggml_backend_metal_buffer_get_name,
  2304. /* .free_buffer = */ ggml_backend_metal_buffer_free_buffer,
  2305. /* .get_base = */ ggml_backend_metal_buffer_get_base,
  2306. /* .init_tensor = */ NULL,
  2307. /* .set_tensor = */ ggml_backend_metal_buffer_set_tensor,
  2308. /* .get_tensor = */ ggml_backend_metal_buffer_get_tensor,
  2309. /* .cpy_tensor = */ ggml_backend_metal_buffer_cpy_tensor,
  2310. /* .clear = */ ggml_backend_metal_buffer_clear,
  2311. /* .reset = */ NULL,
  2312. };
  2313. // default buffer type
  2314. GGML_CALL static const char * ggml_backend_metal_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
  2315. return "Metal";
  2316. UNUSED(buft);
  2317. }
  2318. static void ggml_backend_metal_log_allocated_size(id<MTLDevice> device) {
  2319. #if TARGET_OS_OSX || (TARGET_OS_IOS && __clang_major__ >= 15)
  2320. if (@available(macOS 10.12, iOS 16.0, *)) {
  2321. GGML_METAL_LOG_INFO(", (%8.2f / %8.2f)",
  2322. device.currentAllocatedSize / 1024.0 / 1024.0,
  2323. device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
  2324. if (device.currentAllocatedSize > device.recommendedMaxWorkingSetSize) {
  2325. GGML_METAL_LOG_WARN("%s: warning: current allocated size is greater than the recommended max working set size\n", __func__);
  2326. } else {
  2327. GGML_METAL_LOG_INFO("\n");
  2328. }
  2329. } else {
  2330. GGML_METAL_LOG_INFO(", (%8.2f)\n", device.currentAllocatedSize / 1024.0 / 1024.0);
  2331. }
  2332. #endif
  2333. UNUSED(device);
  2334. }
  2335. GGML_CALL static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  2336. struct ggml_backend_metal_buffer_context * ctx = malloc(sizeof(struct ggml_backend_metal_buffer_context));
  2337. const size_t size_page = sysconf(_SC_PAGESIZE);
  2338. size_t size_aligned = size;
  2339. if ((size_aligned % size_page) != 0) {
  2340. size_aligned += (size_page - (size_aligned % size_page));
  2341. }
  2342. id<MTLDevice> device = ggml_backend_metal_get_device();
  2343. ctx->all_data = ggml_metal_host_malloc(size_aligned);
  2344. ctx->all_size = size_aligned;
  2345. ctx->owned = true;
  2346. ctx->n_buffers = 1;
  2347. ctx->buffers[0].data = ctx->all_data;
  2348. ctx->buffers[0].size = size;
  2349. ctx->buffers[0].metal = [device newBufferWithBytesNoCopy:ctx->all_data
  2350. length:size_aligned
  2351. options:MTLResourceStorageModeShared
  2352. deallocator:nil];
  2353. if (ctx->buffers[0].metal == nil) {
  2354. GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0);
  2355. free(ctx);
  2356. ggml_backend_metal_free_device();
  2357. return NULL;
  2358. }
  2359. GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB", __func__, size_aligned / 1024.0 / 1024.0);
  2360. ggml_backend_metal_log_allocated_size(device);
  2361. return ggml_backend_buffer_init(buft, ggml_backend_metal_buffer_i, ctx, size);
  2362. }
  2363. GGML_CALL static size_t ggml_backend_metal_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  2364. return 32;
  2365. UNUSED(buft);
  2366. }
  2367. GGML_CALL static size_t ggml_backend_metal_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) {
  2368. id<MTLDevice> device = ggml_backend_metal_get_device();
  2369. size_t max_size = device.maxBufferLength;
  2370. ggml_backend_metal_free_device();
  2371. return max_size;
  2372. UNUSED(buft);
  2373. }
  2374. GGML_CALL static bool ggml_backend_metal_buffer_type_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) {
  2375. return ggml_backend_is_metal(backend) || ggml_backend_is_cpu(backend);
  2376. UNUSED(buft);
  2377. }
  2378. GGML_CALL static bool ggml_backend_metal_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
  2379. return true;
  2380. UNUSED(buft);
  2381. }
  2382. GGML_CALL ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void) {
  2383. static struct ggml_backend_buffer_type ggml_backend_buffer_type_metal = {
  2384. /* .iface = */ {
  2385. /* .get_name = */ ggml_backend_metal_buffer_type_get_name,
  2386. /* .alloc_buffer = */ ggml_backend_metal_buffer_type_alloc_buffer,
  2387. /* .get_alignment = */ ggml_backend_metal_buffer_type_get_alignment,
  2388. /* .get_max_size = */ ggml_backend_metal_buffer_type_get_max_size,
  2389. /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
  2390. /* .supports_backend = */ ggml_backend_metal_buffer_type_supports_backend,
  2391. /* .is_host = */ ggml_backend_metal_buffer_type_is_host,
  2392. },
  2393. /* .context = */ NULL,
  2394. };
  2395. return &ggml_backend_buffer_type_metal;
  2396. }
  2397. // buffer from ptr
  2398. GGML_CALL ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size) {
  2399. struct ggml_backend_metal_buffer_context * ctx = malloc(sizeof(struct ggml_backend_metal_buffer_context));
  2400. ctx->all_data = data;
  2401. ctx->all_size = size;
  2402. ctx->owned = false;
  2403. ctx->n_buffers = 0;
  2404. const size_t size_page = sysconf(_SC_PAGESIZE);
  2405. // page-align the data ptr
  2406. {
  2407. const uintptr_t offs = (uintptr_t) data % size_page;
  2408. data = (void *) ((char *) data - offs);
  2409. size += offs;
  2410. }
  2411. size_t size_aligned = size;
  2412. if ((size_aligned % size_page) != 0) {
  2413. size_aligned += (size_page - (size_aligned % size_page));
  2414. }
  2415. id<MTLDevice> device = ggml_backend_metal_get_device();
  2416. // the buffer fits into the max buffer size allowed by the device
  2417. if (size_aligned <= device.maxBufferLength) {
  2418. ctx->buffers[ctx->n_buffers].data = data;
  2419. ctx->buffers[ctx->n_buffers].size = size;
  2420. ctx->buffers[ctx->n_buffers].metal = [device newBufferWithBytesNoCopy:data length:size_aligned options:MTLResourceStorageModeShared deallocator:nil];
  2421. if (ctx->buffers[ctx->n_buffers].metal == nil) {
  2422. GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0);
  2423. return false;
  2424. }
  2425. GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB", __func__, size_aligned / 1024.0 / 1024.0);
  2426. ++ctx->n_buffers;
  2427. } else {
  2428. // this overlap between the views will guarantee that the tensor with the maximum size will fully fit into
  2429. // one of the views
  2430. const size_t size_ovlp = ((max_size + size_page - 1) / size_page + 1) * size_page; // round-up 2 pages just in case
  2431. const size_t size_step = device.maxBufferLength - size_ovlp;
  2432. const size_t size_view = device.maxBufferLength;
  2433. for (size_t i = 0; i < size; i += size_step) {
  2434. const size_t size_step_aligned = (i + size_view <= size) ? size_view : (size_aligned - i);
  2435. ctx->buffers[ctx->n_buffers].data = (void *) ((uint8_t *) data + i);
  2436. ctx->buffers[ctx->n_buffers].size = size_step_aligned;
  2437. ctx->buffers[ctx->n_buffers].metal = [device newBufferWithBytesNoCopy:(void *) ((uint8_t *) data + i) length:size_step_aligned options:MTLResourceStorageModeShared deallocator:nil];
  2438. if (ctx->buffers[ctx->n_buffers].metal == nil) {
  2439. GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_step_aligned / 1024.0 / 1024.0);
  2440. return false;
  2441. }
  2442. GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB, offs = %12ld", __func__, size_step_aligned / 1024.0 / 1024.0, i);
  2443. if (i + size_step < size) {
  2444. GGML_METAL_LOG_INFO("\n");
  2445. }
  2446. ++ctx->n_buffers;
  2447. }
  2448. }
  2449. ggml_backend_metal_log_allocated_size(device);
  2450. return ggml_backend_buffer_init(ggml_backend_metal_buffer_type(), ggml_backend_metal_buffer_i, ctx, size);
  2451. }
  2452. // backend
  2453. GGML_CALL static const char * ggml_backend_metal_name(ggml_backend_t backend) {
  2454. return "Metal";
  2455. UNUSED(backend);
  2456. }
  2457. GGML_CALL static void ggml_backend_metal_free(ggml_backend_t backend) {
  2458. struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context;
  2459. ggml_metal_free(ctx);
  2460. free(backend);
  2461. }
  2462. GGML_CALL static ggml_backend_buffer_type_t ggml_backend_metal_get_default_buffer_type(ggml_backend_t backend) {
  2463. return ggml_backend_metal_buffer_type();
  2464. UNUSED(backend);
  2465. }
  2466. GGML_CALL static enum ggml_status ggml_backend_metal_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
  2467. struct ggml_metal_context * metal_ctx = (struct ggml_metal_context *)backend->context;
  2468. return ggml_metal_graph_compute(metal_ctx, cgraph);
  2469. }
  2470. GGML_CALL static bool ggml_backend_metal_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
  2471. struct ggml_metal_context * metal_ctx = (struct ggml_metal_context *)backend->context;
  2472. return ggml_metal_supports_op(metal_ctx, op);
  2473. }
  2474. static struct ggml_backend_i ggml_backend_metal_i = {
  2475. /* .get_name = */ ggml_backend_metal_name,
  2476. /* .free = */ ggml_backend_metal_free,
  2477. /* .get_default_buffer_type = */ ggml_backend_metal_get_default_buffer_type,
  2478. /* .set_tensor_async = */ NULL,
  2479. /* .get_tensor_async = */ NULL,
  2480. /* .cpy_tensor_async = */ NULL,
  2481. /* .synchronize = */ NULL,
  2482. /* .graph_plan_create = */ NULL,
  2483. /* .graph_plan_free = */ NULL,
  2484. /* .graph_plan_compute = */ NULL,
  2485. /* .graph_compute = */ ggml_backend_metal_graph_compute,
  2486. /* .supports_op = */ ggml_backend_metal_supports_op,
  2487. /* .offload_op = */ NULL,
  2488. /* .event_new = */ NULL,
  2489. /* .event_free = */ NULL,
  2490. /* .event_record = */ NULL,
  2491. /* .event_wait = */ NULL,
  2492. /* .event_synchronize = */ NULL,
  2493. };
  2494. void ggml_backend_metal_log_set_callback(ggml_log_callback log_callback, void * user_data) {
  2495. ggml_metal_log_callback = log_callback;
  2496. ggml_metal_log_user_data = user_data;
  2497. }
  2498. static ggml_guid_t ggml_backend_metal_guid(void) {
  2499. static ggml_guid guid = { 0x81, 0xa1, 0x8b, 0x1e, 0x71, 0xec, 0x79, 0xed, 0x2b, 0x85, 0xdc, 0x8a, 0x61, 0x98, 0x30, 0xe6 };
  2500. return &guid;
  2501. }
  2502. ggml_backend_t ggml_backend_metal_init(void) {
  2503. struct ggml_metal_context * ctx = ggml_metal_init(GGML_DEFAULT_N_THREADS);
  2504. if (ctx == NULL) {
  2505. return NULL;
  2506. }
  2507. ggml_backend_t metal_backend = malloc(sizeof(struct ggml_backend));
  2508. *metal_backend = (struct ggml_backend) {
  2509. /* .guid = */ ggml_backend_metal_guid(),
  2510. /* .interface = */ ggml_backend_metal_i,
  2511. /* .context = */ ctx,
  2512. };
  2513. return metal_backend;
  2514. }
  2515. bool ggml_backend_is_metal(ggml_backend_t backend) {
  2516. return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_metal_guid());
  2517. }
  2518. void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb) {
  2519. GGML_ASSERT(ggml_backend_is_metal(backend));
  2520. struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context;
  2521. ctx->n_cb = MIN(n_cb, GGML_METAL_MAX_BUFFERS);
  2522. }
  2523. bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family) {
  2524. GGML_ASSERT(ggml_backend_is_metal(backend));
  2525. struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context;
  2526. return [ctx->device supportsFamily:(MTLGPUFamilyApple1 + family - 1)];
  2527. }
  2528. void ggml_backend_metal_capture_next_compute(ggml_backend_t backend) {
  2529. GGML_ASSERT(ggml_backend_is_metal(backend));
  2530. struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context;
  2531. ctx->should_capture_next_compute = true;
  2532. }
  2533. GGML_CALL ggml_backend_t ggml_backend_reg_metal_init(const char * params, void * user_data); // silence warning
  2534. GGML_CALL ggml_backend_t ggml_backend_reg_metal_init(const char * params, void * user_data) {
  2535. return ggml_backend_metal_init();
  2536. GGML_UNUSED(params);
  2537. GGML_UNUSED(user_data);
  2538. }