ggml-metal.m 160 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831
  1. #import "ggml-metal.h"
  2. #import "ggml-backend-impl.h"
  3. #import "ggml.h"
  4. #import <Foundation/Foundation.h>
  5. #import <Metal/Metal.h>
  6. #undef MIN
  7. #undef MAX
  8. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  9. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  10. #ifdef GGML_METAL_NDEBUG
  11. #define GGML_METAL_LOG_INFO(...)
  12. #define GGML_METAL_LOG_WARN(...)
  13. #define GGML_METAL_LOG_ERROR(...)
  14. #else
  15. #define GGML_METAL_LOG_INFO(...) ggml_metal_log(GGML_LOG_LEVEL_INFO, __VA_ARGS__)
  16. #define GGML_METAL_LOG_WARN(...) ggml_metal_log(GGML_LOG_LEVEL_WARN, __VA_ARGS__)
  17. #define GGML_METAL_LOG_ERROR(...) ggml_metal_log(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
  18. #endif
  19. #define UNUSED(x) (void)(x)
  20. struct ggml_metal_kernel {
  21. id<MTLComputePipelineState> pipeline;
  22. };
  23. enum ggml_metal_kernel_type {
  24. GGML_METAL_KERNEL_TYPE_ADD,
  25. GGML_METAL_KERNEL_TYPE_ADD_ROW,
  26. GGML_METAL_KERNEL_TYPE_MUL,
  27. GGML_METAL_KERNEL_TYPE_MUL_ROW,
  28. GGML_METAL_KERNEL_TYPE_DIV,
  29. GGML_METAL_KERNEL_TYPE_DIV_ROW,
  30. GGML_METAL_KERNEL_TYPE_SCALE,
  31. GGML_METAL_KERNEL_TYPE_SCALE_4,
  32. GGML_METAL_KERNEL_TYPE_TANH,
  33. GGML_METAL_KERNEL_TYPE_RELU,
  34. GGML_METAL_KERNEL_TYPE_GELU,
  35. GGML_METAL_KERNEL_TYPE_GELU_QUICK,
  36. GGML_METAL_KERNEL_TYPE_SILU,
  37. GGML_METAL_KERNEL_TYPE_SOFT_MAX,
  38. GGML_METAL_KERNEL_TYPE_SOFT_MAX_4,
  39. GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF,
  40. GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8,
  41. GGML_METAL_KERNEL_TYPE_GET_ROWS_F32,
  42. GGML_METAL_KERNEL_TYPE_GET_ROWS_F16,
  43. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_0,
  44. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_1,
  45. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0,
  46. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1,
  47. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0,
  48. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K,
  49. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K,
  50. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K,
  51. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_K,
  52. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q6_K,
  53. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XXS,
  54. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XS,
  55. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_XXS,
  56. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_S,
  57. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_S,
  58. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_S,
  59. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL,
  60. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS,
  61. GGML_METAL_KERNEL_TYPE_GET_ROWS_I32,
  62. GGML_METAL_KERNEL_TYPE_RMS_NORM,
  63. GGML_METAL_KERNEL_TYPE_GROUP_NORM,
  64. GGML_METAL_KERNEL_TYPE_NORM,
  65. GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32,
  66. GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16,
  67. GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32,
  68. GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW,
  69. GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4,
  70. GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32,
  71. GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32,
  72. GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32,
  73. GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32,
  74. GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32,
  75. GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32,
  76. GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32,
  77. GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32,
  78. GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32,
  79. GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32,
  80. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32,
  81. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32,
  82. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32,
  83. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_S_F32,
  84. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_S_F32,
  85. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32,
  86. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32,
  87. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32,
  88. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F32_F32,
  89. //GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F16,
  90. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32,
  91. //GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_1ROW,
  92. //GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_L4,
  93. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_0_F32,
  94. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_1_F32,
  95. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32,
  96. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32,
  97. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32,
  98. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32,
  99. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32,
  100. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32,
  101. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_K_F32,
  102. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q6_K_F32,
  103. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XXS_F32,
  104. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XS_F32,
  105. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_XXS_F32,
  106. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_S_F32,
  107. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_S_F32,
  108. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_S_F32,
  109. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32,
  110. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32,
  111. GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32,
  112. GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32,
  113. GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32,
  114. GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32,
  115. GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32,
  116. GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32,
  117. GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32,
  118. GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32,
  119. GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32,
  120. GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32,
  121. GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32,
  122. GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32,
  123. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32,
  124. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32,
  125. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32,
  126. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_S_F32,
  127. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_S_F32,
  128. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32,
  129. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32,
  130. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32,
  131. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F32,
  132. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F16_F32,
  133. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_0_F32,
  134. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_1_F32,
  135. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F32,
  136. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F32,
  137. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F32,
  138. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F32,
  139. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F32,
  140. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F32,
  141. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_K_F32,
  142. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q6_K_F32,
  143. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XXS_F32,
  144. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XS_F32,
  145. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_XXS_F32,
  146. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_S_F32,
  147. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_S_F32,
  148. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_S_F32,
  149. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32,
  150. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32,
  151. GGML_METAL_KERNEL_TYPE_ROPE_F32,
  152. GGML_METAL_KERNEL_TYPE_ROPE_F16,
  153. GGML_METAL_KERNEL_TYPE_ALIBI_F32,
  154. GGML_METAL_KERNEL_TYPE_IM2COL_F16,
  155. GGML_METAL_KERNEL_TYPE_IM2COL_F32,
  156. GGML_METAL_KERNEL_TYPE_UPSCALE_F32,
  157. GGML_METAL_KERNEL_TYPE_PAD_F32,
  158. GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC,
  159. GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC,
  160. GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32,
  161. GGML_METAL_KERNEL_TYPE_CPY_F32_F16,
  162. GGML_METAL_KERNEL_TYPE_CPY_F32_F32,
  163. GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0,
  164. GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_0,
  165. GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_1,
  166. //GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_0,
  167. //GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_1,
  168. GGML_METAL_KERNEL_TYPE_CPY_F16_F16,
  169. GGML_METAL_KERNEL_TYPE_CPY_F16_F32,
  170. GGML_METAL_KERNEL_TYPE_CONCAT,
  171. GGML_METAL_KERNEL_TYPE_SQR,
  172. GGML_METAL_KERNEL_TYPE_SUM_ROWS,
  173. GGML_METAL_KERNEL_TYPE_COUNT
  174. };
  175. struct ggml_metal_context {
  176. int n_cb;
  177. id<MTLDevice> device;
  178. id<MTLCommandQueue> queue;
  179. dispatch_queue_t d_queue;
  180. struct ggml_metal_kernel kernels[GGML_METAL_KERNEL_TYPE_COUNT];
  181. bool support_simdgroup_reduction;
  182. bool support_simdgroup_mm;
  183. bool should_capture_next_compute;
  184. };
  185. // MSL code
  186. // TODO: move the contents here when ready
  187. // for now it is easier to work in a separate file
  188. // static NSString * const msl_library_source = @"see metal.metal";
  189. // Here to assist with NSBundle Path Hack
  190. @interface GGMLMetalClass : NSObject
  191. @end
  192. @implementation GGMLMetalClass
  193. @end
  194. static void ggml_metal_default_log_callback(enum ggml_log_level level, const char * msg, void * user_data) {
  195. fprintf(stderr, "%s", msg);
  196. UNUSED(level);
  197. UNUSED(user_data);
  198. }
  199. ggml_log_callback ggml_metal_log_callback = ggml_metal_default_log_callback;
  200. void * ggml_metal_log_user_data = NULL;
  201. GGML_ATTRIBUTE_FORMAT(2, 3)
  202. static void ggml_metal_log(enum ggml_log_level level, const char * format, ...){
  203. if (ggml_metal_log_callback != NULL) {
  204. va_list args;
  205. va_start(args, format);
  206. char buffer[128];
  207. int len = vsnprintf(buffer, 128, format, args);
  208. if (len < 128) {
  209. ggml_metal_log_callback(level, buffer, ggml_metal_log_user_data);
  210. } else {
  211. char* buffer2 = malloc(len+1);
  212. va_end(args);
  213. va_start(args, format);
  214. vsnprintf(buffer2, len+1, format, args);
  215. buffer2[len] = 0;
  216. ggml_metal_log_callback(level, buffer2, ggml_metal_log_user_data);
  217. free(buffer2);
  218. }
  219. va_end(args);
  220. }
  221. }
  222. static void * ggml_metal_host_malloc(size_t n) {
  223. void * data = NULL;
  224. const int result = posix_memalign((void **) &data, sysconf(_SC_PAGESIZE), n);
  225. if (result != 0) {
  226. GGML_METAL_LOG_ERROR("%s: error: posix_memalign failed\n", __func__);
  227. return NULL;
  228. }
  229. return data;
  230. }
  231. static struct ggml_metal_context * ggml_metal_init(int n_cb) {
  232. GGML_METAL_LOG_INFO("%s: allocating\n", __func__);
  233. #if TARGET_OS_OSX && !GGML_METAL_NDEBUG
  234. // Show all the Metal device instances in the system
  235. NSArray * devices = MTLCopyAllDevices();
  236. for (id<MTLDevice> device in devices) {
  237. GGML_METAL_LOG_INFO("%s: found device: %s\n", __func__, [[device name] UTF8String]);
  238. }
  239. [devices release]; // since it was created by a *Copy* C method
  240. #endif
  241. // Pick and show default Metal device
  242. id<MTLDevice> device = MTLCreateSystemDefaultDevice();
  243. GGML_METAL_LOG_INFO("%s: picking default device: %s\n", __func__, [[device name] UTF8String]);
  244. // Configure context
  245. struct ggml_metal_context * ctx = malloc(sizeof(struct ggml_metal_context));
  246. ctx->device = device;
  247. ctx->n_cb = MIN(n_cb, GGML_METAL_MAX_BUFFERS);
  248. ctx->queue = [ctx->device newCommandQueue];
  249. ctx->d_queue = dispatch_queue_create("ggml-metal", DISPATCH_QUEUE_CONCURRENT);
  250. id<MTLLibrary> metal_library;
  251. // load library
  252. {
  253. NSBundle * bundle = nil;
  254. #ifdef SWIFT_PACKAGE
  255. bundle = SWIFTPM_MODULE_BUNDLE;
  256. #else
  257. bundle = [NSBundle bundleForClass:[GGMLMetalClass class]];
  258. #endif
  259. NSError * error = nil;
  260. NSString * libPath = [bundle pathForResource:@"default" ofType:@"metallib"];
  261. if (libPath != nil) {
  262. // pre-compiled library found
  263. NSURL * libURL = [NSURL fileURLWithPath:libPath];
  264. GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [libPath UTF8String]);
  265. metal_library = [ctx->device newLibraryWithURL:libURL error:&error];
  266. if (error) {
  267. GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
  268. return NULL;
  269. }
  270. } else {
  271. #if GGML_METAL_EMBED_LIBRARY
  272. GGML_METAL_LOG_INFO("%s: using embedded metal library\n", __func__);
  273. extern const char ggml_metallib_start[];
  274. extern const char ggml_metallib_end[];
  275. NSString * src = [[NSString alloc] initWithBytes:ggml_metallib_start length:(ggml_metallib_end-ggml_metallib_start) encoding:NSUTF8StringEncoding];
  276. #else
  277. GGML_METAL_LOG_INFO("%s: default.metallib not found, loading from source\n", __func__);
  278. NSString * sourcePath;
  279. NSString * ggmlMetalPathResources = [[NSProcessInfo processInfo].environment objectForKey:@"GGML_METAL_PATH_RESOURCES"];
  280. GGML_METAL_LOG_INFO("%s: GGML_METAL_PATH_RESOURCES = %s\n", __func__, ggmlMetalPathResources ? [ggmlMetalPathResources UTF8String] : "nil");
  281. if (ggmlMetalPathResources) {
  282. sourcePath = [ggmlMetalPathResources stringByAppendingPathComponent:@"ggml-metal.metal"];
  283. } else {
  284. sourcePath = [bundle pathForResource:@"ggml-metal" ofType:@"metal"];
  285. }
  286. if (sourcePath == nil) {
  287. GGML_METAL_LOG_WARN("%s: error: could not use bundle path to find ggml-metal.metal, falling back to trying cwd\n", __func__);
  288. sourcePath = @"ggml-metal.metal";
  289. }
  290. GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [sourcePath UTF8String]);
  291. NSString * src = [NSString stringWithContentsOfFile:sourcePath encoding:NSUTF8StringEncoding error:&error];
  292. if (error) {
  293. GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
  294. return NULL;
  295. }
  296. #endif
  297. @autoreleasepool {
  298. // dictionary of preprocessor macros
  299. NSMutableDictionary * prep = [NSMutableDictionary dictionary];
  300. #ifdef GGML_QKK_64
  301. prep[@"QK_K"] = @(64);
  302. #endif
  303. MTLCompileOptions* options = [MTLCompileOptions new];
  304. options.preprocessorMacros = prep;
  305. //[options setFastMathEnabled:false];
  306. metal_library = [ctx->device newLibraryWithSource:src options:options error:&error];
  307. if (error) {
  308. GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
  309. return NULL;
  310. }
  311. }
  312. }
  313. }
  314. // print MTL GPU family:
  315. GGML_METAL_LOG_INFO("%s: GPU name: %s\n", __func__, [[ctx->device name] UTF8String]);
  316. const NSInteger MTLGPUFamilyMetal3 = 5001;
  317. // determine max supported GPU family
  318. // https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf
  319. // https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
  320. {
  321. for (int i = MTLGPUFamilyApple1 + 20; i >= MTLGPUFamilyApple1; --i) {
  322. if ([ctx->device supportsFamily:i]) {
  323. GGML_METAL_LOG_INFO("%s: GPU family: MTLGPUFamilyApple%d (%d)\n", __func__, i - (int) MTLGPUFamilyApple1 + 1, i);
  324. break;
  325. }
  326. }
  327. for (int i = MTLGPUFamilyCommon1 + 5; i >= MTLGPUFamilyCommon1; --i) {
  328. if ([ctx->device supportsFamily:i]) {
  329. GGML_METAL_LOG_INFO("%s: GPU family: MTLGPUFamilyCommon%d (%d)\n", __func__, i - (int) MTLGPUFamilyCommon1 + 1, i);
  330. break;
  331. }
  332. }
  333. for (int i = MTLGPUFamilyMetal3 + 5; i >= MTLGPUFamilyMetal3; --i) {
  334. if ([ctx->device supportsFamily:i]) {
  335. GGML_METAL_LOG_INFO("%s: GPU family: MTLGPUFamilyMetal%d (%d)\n", __func__, i - (int) MTLGPUFamilyMetal3 + 3, i);
  336. break;
  337. }
  338. }
  339. }
  340. ctx->support_simdgroup_reduction = [ctx->device supportsFamily:MTLGPUFamilyApple7];
  341. ctx->support_simdgroup_reduction |= [ctx->device supportsFamily:MTLGPUFamilyMetal3];
  342. ctx->support_simdgroup_mm = [ctx->device supportsFamily:MTLGPUFamilyApple7];
  343. GGML_METAL_LOG_INFO("%s: simdgroup reduction support = %s\n", __func__, ctx->support_simdgroup_reduction ? "true" : "false");
  344. GGML_METAL_LOG_INFO("%s: simdgroup matrix mul. support = %s\n", __func__, ctx->support_simdgroup_mm ? "true" : "false");
  345. GGML_METAL_LOG_INFO("%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false");
  346. ctx->should_capture_next_compute = false;
  347. #if TARGET_OS_OSX || (TARGET_OS_IOS && __clang_major__ >= 15)
  348. if (@available(macOS 10.12, iOS 16.0, *)) {
  349. GGML_METAL_LOG_INFO("%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1e6);
  350. }
  351. #elif TARGET_OS_OSX
  352. if (ctx->device.maxTransferRate != 0) {
  353. GGML_METAL_LOG_INFO("%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1e6);
  354. } else {
  355. GGML_METAL_LOG_INFO("%s: maxTransferRate = built-in GPU\n", __func__);
  356. }
  357. #endif
  358. // load kernels
  359. {
  360. NSError * error = nil;
  361. for (int i = 0; i < GGML_METAL_KERNEL_TYPE_COUNT; ++i) {
  362. ctx->kernels[i].pipeline = nil;
  363. }
  364. /*
  365. GGML_METAL_LOG_INFO("%s: loaded %-32s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) kernel->pipeline, \
  366. (int) kernel->pipeline.maxTotalThreadsPerThreadgroup, \
  367. (int) kernel->pipeline.threadExecutionWidth); \
  368. */
  369. #define GGML_METAL_ADD_KERNEL(e, name, supported) \
  370. if (supported) { \
  371. struct ggml_metal_kernel * kernel = &ctx->kernels[e]; \
  372. id<MTLFunction> metal_function = [metal_library newFunctionWithName:@"kernel_"#name]; \
  373. kernel->pipeline = [ctx->device newComputePipelineStateWithFunction:metal_function error:&error]; \
  374. [metal_function release]; \
  375. if (error) { \
  376. GGML_METAL_LOG_ERROR("%s: error: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \
  377. [metal_library release]; \
  378. return NULL; \
  379. } \
  380. } else { \
  381. GGML_METAL_LOG_WARN("%s: skipping %-32s (not supported)\n", __func__, "kernel_"#name); \
  382. }
  383. // simd_sum and simd_max requires MTLGPUFamilyApple7
  384. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD, add, true);
  385. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_ROW, add_row, true);
  386. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL, mul, true);
  387. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_ROW, mul_row, true);
  388. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIV, div, true);
  389. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIV_ROW, div_row, true);
  390. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SCALE, scale, true);
  391. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SCALE_4, scale_4, true);
  392. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_TANH, tanh, true);
  393. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RELU, relu, true);
  394. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU, gelu, true);
  395. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_QUICK, gelu_quick, true);
  396. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SILU, silu, true);
  397. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX, soft_max, ctx->support_simdgroup_reduction);
  398. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_4, soft_max_4, ctx->support_simdgroup_reduction);
  399. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF, diag_mask_inf, true);
  400. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8, diag_mask_inf_8, true);
  401. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_F32, get_rows_f32, true);
  402. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_F16, get_rows_f16, true);
  403. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_0, get_rows_q4_0, true);
  404. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_1, get_rows_q4_1, true);
  405. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0, get_rows_q5_0, true);
  406. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1, get_rows_q5_1, true);
  407. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0, get_rows_q8_0, true);
  408. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K, get_rows_q2_K, true);
  409. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K, get_rows_q3_K, true);
  410. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K, get_rows_q4_K, true);
  411. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_K, get_rows_q5_K, true);
  412. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q6_K, get_rows_q6_K, true);
  413. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XXS, get_rows_iq2_xxs, true);
  414. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XS, get_rows_iq2_xs, true);
  415. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_XXS, get_rows_iq3_xxs, true);
  416. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_S, get_rows_iq3_s, true);
  417. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_S, get_rows_iq2_s, true);
  418. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_S, get_rows_iq1_s, true);
  419. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL, get_rows_iq4_nl, true);
  420. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS, get_rows_iq4_xs, true);
  421. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_I32, get_rows_i32, true);
  422. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RMS_NORM, rms_norm, ctx->support_simdgroup_reduction);
  423. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GROUP_NORM, group_norm, ctx->support_simdgroup_reduction);
  424. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NORM, norm, true);
  425. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32, mul_mv_f32_f32, ctx->support_simdgroup_reduction);
  426. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16, mul_mv_f16_f16, ctx->support_simdgroup_reduction);
  427. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32, mul_mv_f16_f32, ctx->support_simdgroup_reduction);
  428. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW, mul_mv_f16_f32_1row, ctx->support_simdgroup_reduction);
  429. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4, mul_mv_f16_f32_l4, ctx->support_simdgroup_reduction);
  430. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32, mul_mv_q4_0_f32, ctx->support_simdgroup_reduction);
  431. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32, mul_mv_q4_1_f32, ctx->support_simdgroup_reduction);
  432. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32, mul_mv_q5_0_f32, ctx->support_simdgroup_reduction);
  433. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32, mul_mv_q5_1_f32, ctx->support_simdgroup_reduction);
  434. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32, mul_mv_q8_0_f32, ctx->support_simdgroup_reduction);
  435. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32, mul_mv_q2_K_f32, ctx->support_simdgroup_reduction);
  436. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32, mul_mv_q3_K_f32, ctx->support_simdgroup_reduction);
  437. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32, mul_mv_q4_K_f32, ctx->support_simdgroup_reduction);
  438. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32, mul_mv_q5_K_f32, ctx->support_simdgroup_reduction);
  439. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32, mul_mv_q6_K_f32, ctx->support_simdgroup_reduction);
  440. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32, mul_mv_iq2_xxs_f32, ctx->support_simdgroup_reduction);
  441. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32, mul_mv_iq2_xs_f32, ctx->support_simdgroup_reduction);
  442. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32, mul_mv_iq3_xxs_f32, ctx->support_simdgroup_reduction);
  443. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_S_F32, mul_mv_iq3_s_f32, ctx->support_simdgroup_reduction);
  444. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_S_F32, mul_mv_iq2_s_f32, ctx->support_simdgroup_reduction);
  445. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32, mul_mv_iq1_s_f32, ctx->support_simdgroup_reduction);
  446. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32, mul_mv_iq4_nl_f32, ctx->support_simdgroup_reduction);
  447. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32, mul_mv_iq4_xs_f32, ctx->support_simdgroup_reduction);
  448. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F32_F32, mul_mv_id_f32_f32, ctx->support_simdgroup_reduction);
  449. //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F16, mul_mv_id_f16_f16, ctx->support_simdgroup_reduction);
  450. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32, mul_mv_id_f16_f32, ctx->support_simdgroup_reduction);
  451. //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_1ROW, mul_mv_id_f16_f32_1row, ctx->support_simdgroup_reduction);
  452. //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_L4, mul_mv_id_f16_f32_l4, ctx->support_simdgroup_reduction);
  453. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_0_F32, mul_mv_id_q4_0_f32, ctx->support_simdgroup_reduction);
  454. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_1_F32, mul_mv_id_q4_1_f32, ctx->support_simdgroup_reduction);
  455. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32, mul_mv_id_q5_0_f32, ctx->support_simdgroup_reduction);
  456. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32, mul_mv_id_q5_1_f32, ctx->support_simdgroup_reduction);
  457. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32, mul_mv_id_q8_0_f32, ctx->support_simdgroup_reduction);
  458. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32, mul_mv_id_q2_K_f32, ctx->support_simdgroup_reduction);
  459. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32, mul_mv_id_q3_K_f32, ctx->support_simdgroup_reduction);
  460. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32, mul_mv_id_q4_K_f32, ctx->support_simdgroup_reduction);
  461. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_K_F32, mul_mv_id_q5_K_f32, ctx->support_simdgroup_reduction);
  462. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q6_K_F32, mul_mv_id_q6_K_f32, ctx->support_simdgroup_reduction);
  463. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XXS_F32, mul_mv_id_iq2_xxs_f32, ctx->support_simdgroup_reduction);
  464. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XS_F32, mul_mv_id_iq2_xs_f32, ctx->support_simdgroup_reduction);
  465. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_XXS_F32, mul_mv_id_iq3_xxs_f32, ctx->support_simdgroup_reduction);
  466. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_S_F32, mul_mv_id_iq3_s_f32, ctx->support_simdgroup_reduction);
  467. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_S_F32, mul_mv_id_iq2_s_f32, ctx->support_simdgroup_reduction);
  468. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_S_F32, mul_mv_id_iq1_s_f32, ctx->support_simdgroup_reduction);
  469. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32, mul_mv_id_iq4_nl_f32, ctx->support_simdgroup_reduction);
  470. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32, mul_mv_id_iq4_xs_f32, ctx->support_simdgroup_reduction);
  471. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32, mul_mm_f32_f32, ctx->support_simdgroup_mm);
  472. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32, mul_mm_f16_f32, ctx->support_simdgroup_mm);
  473. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32, mul_mm_q4_0_f32, ctx->support_simdgroup_mm);
  474. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32, mul_mm_q4_1_f32, ctx->support_simdgroup_mm);
  475. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32, mul_mm_q5_0_f32, ctx->support_simdgroup_mm);
  476. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32, mul_mm_q5_1_f32, ctx->support_simdgroup_mm);
  477. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32, mul_mm_q8_0_f32, ctx->support_simdgroup_mm);
  478. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32, mul_mm_q2_K_f32, ctx->support_simdgroup_mm);
  479. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32, mul_mm_q3_K_f32, ctx->support_simdgroup_mm);
  480. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32, mul_mm_q4_K_f32, ctx->support_simdgroup_mm);
  481. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32, mul_mm_q5_K_f32, ctx->support_simdgroup_mm);
  482. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32, mul_mm_q6_K_f32, ctx->support_simdgroup_mm);
  483. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32, mul_mm_iq2_xxs_f32, ctx->support_simdgroup_mm);
  484. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32, mul_mm_iq2_xs_f32, ctx->support_simdgroup_mm);
  485. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32, mul_mm_iq3_xxs_f32, ctx->support_simdgroup_mm);
  486. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_S_F32, mul_mm_iq3_s_f32, ctx->support_simdgroup_mm);
  487. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_S_F32, mul_mm_iq2_s_f32, ctx->support_simdgroup_mm);
  488. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32, mul_mm_iq1_s_f32, ctx->support_simdgroup_mm);
  489. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32, mul_mm_iq4_nl_f32, ctx->support_simdgroup_mm);
  490. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32, mul_mm_iq4_xs_f32, ctx->support_simdgroup_mm);
  491. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F32, mul_mm_id_f32_f32, ctx->support_simdgroup_mm);
  492. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F16_F32, mul_mm_id_f16_f32, ctx->support_simdgroup_mm);
  493. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_0_F32, mul_mm_id_q4_0_f32, ctx->support_simdgroup_mm);
  494. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_1_F32, mul_mm_id_q4_1_f32, ctx->support_simdgroup_mm);
  495. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F32, mul_mm_id_q5_0_f32, ctx->support_simdgroup_mm);
  496. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F32, mul_mm_id_q5_1_f32, ctx->support_simdgroup_mm);
  497. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F32, mul_mm_id_q8_0_f32, ctx->support_simdgroup_mm);
  498. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F32, mul_mm_id_q2_K_f32, ctx->support_simdgroup_mm);
  499. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F32, mul_mm_id_q3_K_f32, ctx->support_simdgroup_mm);
  500. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F32, mul_mm_id_q4_K_f32, ctx->support_simdgroup_mm);
  501. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_K_F32, mul_mm_id_q5_K_f32, ctx->support_simdgroup_mm);
  502. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q6_K_F32, mul_mm_id_q6_K_f32, ctx->support_simdgroup_mm);
  503. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XXS_F32, mul_mm_id_iq2_xxs_f32, ctx->support_simdgroup_mm);
  504. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XS_F32, mul_mm_id_iq2_xs_f32, ctx->support_simdgroup_mm);
  505. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_XXS_F32, mul_mm_id_iq3_xxs_f32, ctx->support_simdgroup_mm);
  506. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_S_F32, mul_mm_id_iq3_s_f32, ctx->support_simdgroup_mm);
  507. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_S_F32, mul_mm_id_iq2_s_f32, ctx->support_simdgroup_mm);
  508. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_S_F32, mul_mm_id_iq1_s_f32, ctx->support_simdgroup_mm);
  509. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32, mul_mm_id_iq4_nl_f32, ctx->support_simdgroup_mm);
  510. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32, mul_mm_id_iq4_xs_f32, ctx->support_simdgroup_mm);
  511. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F32, rope_f32, true);
  512. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F16, rope_f16, true);
  513. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ALIBI_F32, alibi_f32, true);
  514. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F16, im2col_f16, true);
  515. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F32, im2col_f32, true);
  516. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_UPSCALE_F32, upscale_f32, true);
  517. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_PAD_F32, pad_f32, true);
  518. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC, argsort_f32_i32_asc, true);
  519. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC, argsort_f32_i32_desc, true);
  520. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32, leaky_relu_f32, true);
  521. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F16, cpy_f32_f16, true);
  522. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F32, cpy_f32_f32, true);
  523. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0, cpy_f32_q8_0, true);
  524. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_0, cpy_f32_q4_0, true);
  525. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_1, cpy_f32_q4_1, true);
  526. //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_0, cpy_f32_q5_0, true);
  527. //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_1, cpy_f32_q5_1, true);
  528. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F16_F16, cpy_f16_f16, true);
  529. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F16_F32, cpy_f16_f32, true);
  530. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CONCAT, concat, true);
  531. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SQR, sqr, true);
  532. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUM_ROWS, sum_rows, true);
  533. }
  534. [metal_library release];
  535. return ctx;
  536. }
  537. static void ggml_metal_free(struct ggml_metal_context * ctx) {
  538. GGML_METAL_LOG_INFO("%s: deallocating\n", __func__);
  539. for (int i = 0; i < GGML_METAL_KERNEL_TYPE_COUNT; ++i) {
  540. [ctx->kernels[i].pipeline release];
  541. }
  542. [ctx->queue release];
  543. [ctx->device release];
  544. dispatch_release(ctx->d_queue);
  545. free(ctx);
  546. }
  547. // temporarily defined here for compatibility between ggml-backend and the old API
  548. struct ggml_backend_metal_buffer {
  549. void * data;
  550. size_t size;
  551. id<MTLBuffer> metal;
  552. };
  553. struct ggml_backend_metal_buffer_context {
  554. void * all_data;
  555. size_t all_size;
  556. bool owned;
  557. // multiple buffers are used only to avoid the maximum buffer size limitation when using mmap
  558. int n_buffers;
  559. struct ggml_backend_metal_buffer buffers[GGML_METAL_MAX_BUFFERS];
  560. };
  561. // finds the Metal buffer that contains the tensor data on the GPU device
  562. // the assumption is that there is 1-to-1 mapping between the host and device memory buffers, so we can find the
  563. // Metal buffer based on the host memory pointer
  564. //
  565. static id<MTLBuffer> ggml_metal_get_buffer(struct ggml_tensor * t, size_t * offs) {
  566. //GGML_METAL_LOG_INFO("%s: data tensor '%16s', offs_data = %8ld, offs_eval = %8ld, offs_cach = %8ld\n", __func__, t->name, offs_data, offs_eval, offs_cach);
  567. const int64_t tsize = ggml_nbytes(t);
  568. ggml_backend_buffer_t buffer = t->view_src ? t->view_src->buffer : t->buffer;
  569. struct ggml_backend_metal_buffer_context * buf_ctx = (struct ggml_backend_metal_buffer_context *) buffer->context;
  570. // find the view that contains the tensor fully
  571. for (int i = 0; i < buf_ctx->n_buffers; ++i) {
  572. const int64_t ioffs = (int64_t) t->data - (int64_t) buf_ctx->buffers[i].data;
  573. //GGML_METAL_LOG_INFO("ioffs = %10ld, tsize = %10ld, sum = %10ld, buf_ctx->buffers[%d].size = %10ld\n", ioffs, tsize, ioffs + tsize, i, buf_ctx->buffers[i].size);
  574. if (ioffs >= 0 && ioffs + tsize <= (int64_t) buf_ctx->buffers[i].size) {
  575. *offs = (size_t) ioffs;
  576. //GGML_METAL_LOG_INFO("%s: tensor '%16s', offs = %8ld\n", __func__, t->name, *offs);
  577. return buf_ctx->buffers[i].metal;
  578. }
  579. }
  580. GGML_METAL_LOG_ERROR("%s: error: tensor '%s' buffer is nil\n", __func__, t->name);
  581. return nil;
  582. }
  583. static bool ggml_metal_supports_op(const struct ggml_metal_context * ctx, const struct ggml_tensor * op) {
  584. switch (op->op) {
  585. case GGML_OP_UNARY:
  586. switch (ggml_get_unary_op(op)) {
  587. case GGML_UNARY_OP_TANH:
  588. case GGML_UNARY_OP_RELU:
  589. case GGML_UNARY_OP_GELU:
  590. case GGML_UNARY_OP_GELU_QUICK:
  591. case GGML_UNARY_OP_SILU:
  592. return true;
  593. default:
  594. return false;
  595. }
  596. case GGML_OP_NONE:
  597. case GGML_OP_RESHAPE:
  598. case GGML_OP_VIEW:
  599. case GGML_OP_TRANSPOSE:
  600. case GGML_OP_PERMUTE:
  601. case GGML_OP_CONCAT:
  602. case GGML_OP_ADD:
  603. case GGML_OP_ACC:
  604. case GGML_OP_MUL:
  605. case GGML_OP_DIV:
  606. case GGML_OP_SCALE:
  607. case GGML_OP_SQR:
  608. case GGML_OP_SUM_ROWS:
  609. return true;
  610. case GGML_OP_SOFT_MAX:
  611. case GGML_OP_RMS_NORM:
  612. case GGML_OP_GROUP_NORM:
  613. return ctx->support_simdgroup_reduction;
  614. case GGML_OP_NORM:
  615. case GGML_OP_ALIBI:
  616. case GGML_OP_ROPE:
  617. case GGML_OP_IM2COL:
  618. return true;
  619. case GGML_OP_POOL_1D:
  620. case GGML_OP_POOL_2D:
  621. return false;
  622. case GGML_OP_UPSCALE:
  623. case GGML_OP_PAD:
  624. case GGML_OP_ARGSORT:
  625. case GGML_OP_LEAKY_RELU:
  626. return true;
  627. case GGML_OP_MUL_MAT:
  628. case GGML_OP_MUL_MAT_ID:
  629. return ctx->support_simdgroup_reduction &&
  630. (op->src[0]->type != GGML_TYPE_F32 || op->src[1]->type == GGML_TYPE_F32);
  631. case GGML_OP_CPY:
  632. case GGML_OP_DUP:
  633. case GGML_OP_CONT:
  634. {
  635. switch (op->src[0]->type) {
  636. case GGML_TYPE_F32:
  637. switch (op->type) {
  638. case GGML_TYPE_F16:
  639. case GGML_TYPE_F32:
  640. case GGML_TYPE_Q8_0:
  641. case GGML_TYPE_Q4_0:
  642. case GGML_TYPE_Q4_1:
  643. return true;
  644. default:
  645. return false;
  646. }
  647. case GGML_TYPE_F16:
  648. switch (op->type) {
  649. case GGML_TYPE_F16:
  650. case GGML_TYPE_F32:
  651. return true;
  652. default:
  653. return false;
  654. }
  655. default:
  656. return false;
  657. };
  658. }
  659. case GGML_OP_DIAG_MASK_INF:
  660. case GGML_OP_GET_ROWS:
  661. {
  662. return op->ne[3] == 1;
  663. }
  664. default:
  665. return false;
  666. }
  667. }
  668. static bool ggml_metal_graph_compute(
  669. struct ggml_metal_context * ctx,
  670. struct ggml_cgraph * gf) {
  671. @autoreleasepool {
  672. MTLComputePassDescriptor * edesc = MTLComputePassDescriptor.computePassDescriptor;
  673. edesc.dispatchType = MTLDispatchTypeSerial;
  674. // create multiple command buffers and enqueue them
  675. // then, we encode the graph into the command buffers in parallel
  676. const int n_nodes = gf->n_nodes;
  677. const int n_cb = ctx->n_cb;
  678. const int n_nodes_per_cb = (n_nodes + n_cb - 1) / n_cb;
  679. const bool should_capture = ctx->should_capture_next_compute;
  680. if (should_capture) {
  681. ctx->should_capture_next_compute = false;
  682. MTLCaptureDescriptor * descriptor = [MTLCaptureDescriptor new];
  683. descriptor.captureObject = ctx->queue;
  684. NSError * error = nil;
  685. if (![[MTLCaptureManager sharedCaptureManager] startCaptureWithDescriptor:descriptor error:&error]) {
  686. GGML_METAL_LOG_ERROR("%s: error: unable to start capture '%s'\n", __func__, [[error localizedDescription] UTF8String]);
  687. GGML_ASSERT(!"capture failed");
  688. }
  689. }
  690. id<MTLCommandBuffer> command_buffer_builder[n_cb];
  691. for (int cb_idx = 0; cb_idx < n_cb; ++cb_idx) {
  692. id<MTLCommandBuffer> command_buffer = [ctx->queue commandBufferWithUnretainedReferences];
  693. command_buffer_builder[cb_idx] = command_buffer;
  694. // enqueue the command buffers in order to specify their execution order
  695. [command_buffer enqueue];
  696. }
  697. const id<MTLCommandBuffer> *command_buffers = command_buffer_builder;
  698. dispatch_apply(n_cb, ctx->d_queue, ^(size_t iter) {
  699. const int cb_idx = iter;
  700. size_t offs_src0 = 0;
  701. size_t offs_src1 = 0;
  702. size_t offs_src2 = 0;
  703. size_t offs_dst = 0;
  704. id<MTLCommandBuffer> command_buffer = command_buffers[cb_idx];
  705. id<MTLComputeCommandEncoder> encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
  706. const int node_start = (cb_idx + 0) * n_nodes_per_cb;
  707. const int node_end = MIN((cb_idx == n_cb - 1) ? n_nodes : (cb_idx + 1) * n_nodes_per_cb, n_nodes);
  708. for (int i = node_start; i < node_end; ++i) {
  709. if (i == -1) {
  710. [encoder memoryBarrierWithScope:MTLBarrierScopeBuffers];
  711. continue;
  712. }
  713. //GGML_METAL_LOG_INFO("%s: encoding node %3d, op = %8s\n", __func__, i, ggml_op_name(gf->nodes[i]->op));
  714. struct ggml_tensor * src0 = gf->nodes[i]->src[0];
  715. struct ggml_tensor * src1 = gf->nodes[i]->src[1];
  716. struct ggml_tensor * src2 = gf->nodes[i]->src[2];
  717. struct ggml_tensor * dst = gf->nodes[i];
  718. switch (dst->op) {
  719. case GGML_OP_NONE:
  720. case GGML_OP_RESHAPE:
  721. case GGML_OP_VIEW:
  722. case GGML_OP_TRANSPOSE:
  723. case GGML_OP_PERMUTE:
  724. {
  725. // noop -> next node
  726. } continue;
  727. default:
  728. {
  729. } break;
  730. }
  731. if (!ggml_metal_supports_op(ctx, dst)) {
  732. GGML_METAL_LOG_ERROR("%s: error: unsupported op '%s'\n", __func__, ggml_op_desc(dst));
  733. GGML_ASSERT(!"unsupported op");
  734. }
  735. if (should_capture) {
  736. [encoder pushDebugGroup:[NSString stringWithCString:ggml_op_desc(dst) encoding:NSUTF8StringEncoding]];
  737. }
  738. const int64_t ne00 = src0 ? src0->ne[0] : 0;
  739. const int64_t ne01 = src0 ? src0->ne[1] : 0;
  740. const int64_t ne02 = src0 ? src0->ne[2] : 0;
  741. const int64_t ne03 = src0 ? src0->ne[3] : 0;
  742. const uint64_t nb00 = src0 ? src0->nb[0] : 0;
  743. const uint64_t nb01 = src0 ? src0->nb[1] : 0;
  744. const uint64_t nb02 = src0 ? src0->nb[2] : 0;
  745. const uint64_t nb03 = src0 ? src0->nb[3] : 0;
  746. const int64_t ne10 = src1 ? src1->ne[0] : 0;
  747. const int64_t ne11 = src1 ? src1->ne[1] : 0;
  748. const int64_t ne12 = src1 ? src1->ne[2] : 0;
  749. const int64_t ne13 = src1 ? src1->ne[3] : 0; UNUSED(ne13);
  750. const uint64_t nb10 = src1 ? src1->nb[0] : 0;
  751. const uint64_t nb11 = src1 ? src1->nb[1] : 0;
  752. const uint64_t nb12 = src1 ? src1->nb[2] : 0;
  753. const uint64_t nb13 = src1 ? src1->nb[3] : 0; UNUSED(nb13);
  754. const int64_t ne0 = dst ? dst->ne[0] : 0;
  755. const int64_t ne1 = dst ? dst->ne[1] : 0;
  756. const int64_t ne2 = dst ? dst->ne[2] : 0;
  757. const int64_t ne3 = dst ? dst->ne[3] : 0;
  758. const uint64_t nb0 = dst ? dst->nb[0] : 0;
  759. const uint64_t nb1 = dst ? dst->nb[1] : 0;
  760. const uint64_t nb2 = dst ? dst->nb[2] : 0;
  761. const uint64_t nb3 = dst ? dst->nb[3] : 0;
  762. const enum ggml_type src0t = src0 ? src0->type : GGML_TYPE_COUNT;
  763. const enum ggml_type src1t = src1 ? src1->type : GGML_TYPE_COUNT;
  764. const enum ggml_type dstt = dst ? dst->type : GGML_TYPE_COUNT;
  765. id<MTLBuffer> id_src0 = src0 ? ggml_metal_get_buffer(src0, &offs_src0) : nil;
  766. id<MTLBuffer> id_src1 = src1 ? ggml_metal_get_buffer(src1, &offs_src1) : nil;
  767. id<MTLBuffer> id_src2 = src2 ? ggml_metal_get_buffer(src2, &offs_src2) : nil;
  768. id<MTLBuffer> id_dst = dst ? ggml_metal_get_buffer(dst, &offs_dst) : nil;
  769. //GGML_METAL_LOG_INFO("%s: op - %s\n", __func__, ggml_op_name(dst->op));
  770. //if (src0) {
  771. // GGML_METAL_LOG_INFO("%s: src0 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src0t), ne00, ne01, ne02,
  772. // ggml_is_contiguous(src0), src0->name);
  773. //}
  774. //if (src1) {
  775. // GGML_METAL_LOG_INFO("%s: src1 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src1t), ne10, ne11, ne12,
  776. // ggml_is_contiguous(src1), src1->name);
  777. //}
  778. //if (dst) {
  779. // GGML_METAL_LOG_INFO("%s: dst - %4s [%5lld, %5lld, %5lld], 1, %s\n", __func__, ggml_type_name(dstt), ne0, ne1, ne2,
  780. // dst->name);
  781. //}
  782. switch (dst->op) {
  783. case GGML_OP_CONCAT:
  784. {
  785. const int64_t nb = ne00;
  786. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CONCAT].pipeline;
  787. [encoder setComputePipelineState:pipeline];
  788. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  789. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  790. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  791. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  792. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
  793. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
  794. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:6];
  795. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:7];
  796. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:8];
  797. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:9];
  798. [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:10];
  799. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:11];
  800. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:12];
  801. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:13];
  802. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:14];
  803. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:15];
  804. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:16];
  805. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:17];
  806. [encoder setBytes:&nb13 length:sizeof(nb13) atIndex:18];
  807. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:19];
  808. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:20];
  809. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:21];
  810. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:22];
  811. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:23];
  812. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:24];
  813. [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:25];
  814. [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:26];
  815. [encoder setBytes:&nb length:sizeof(nb) atIndex:27];
  816. const int nth = MIN(1024, ne0);
  817. [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  818. } break;
  819. case GGML_OP_ADD:
  820. case GGML_OP_MUL:
  821. case GGML_OP_DIV:
  822. {
  823. const size_t offs = 0;
  824. bool bcast_row = false;
  825. int64_t nb = ne00;
  826. id<MTLComputePipelineState> pipeline = nil;
  827. if (ggml_nelements(src1) == ne10 && ggml_is_contiguous(src1) && ne00 % 4 == 0 && ne10 % 4 == 0) {
  828. GGML_ASSERT(ggml_is_contiguous(src0));
  829. // src1 is a row
  830. GGML_ASSERT(ne11 == 1);
  831. nb = ne00 / 4;
  832. switch (dst->op) {
  833. case GGML_OP_ADD: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_ROW].pipeline; break;
  834. case GGML_OP_MUL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_ROW].pipeline; break;
  835. case GGML_OP_DIV: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIV_ROW].pipeline; break;
  836. default: GGML_ASSERT(false);
  837. }
  838. bcast_row = true;
  839. } else {
  840. switch (dst->op) {
  841. case GGML_OP_ADD: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD].pipeline; break;
  842. case GGML_OP_MUL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL].pipeline; break;
  843. case GGML_OP_DIV: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIV].pipeline; break;
  844. default: GGML_ASSERT(false);
  845. }
  846. }
  847. [encoder setComputePipelineState:pipeline];
  848. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  849. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  850. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  851. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  852. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
  853. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
  854. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:6];
  855. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:7];
  856. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:8];
  857. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:9];
  858. [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:10];
  859. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:11];
  860. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:12];
  861. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:13];
  862. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:14];
  863. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:15];
  864. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:16];
  865. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:17];
  866. [encoder setBytes:&nb13 length:sizeof(nb13) atIndex:18];
  867. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:19];
  868. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:20];
  869. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:21];
  870. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:22];
  871. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:23];
  872. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:24];
  873. [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:25];
  874. [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:26];
  875. [encoder setBytes:&offs length:sizeof(offs) atIndex:27];
  876. [encoder setBytes:&nb length:sizeof(nb) atIndex:28];
  877. if (bcast_row) {
  878. const int64_t n = ggml_nelements(dst)/4;
  879. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  880. } else {
  881. const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne0);
  882. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  883. }
  884. } break;
  885. case GGML_OP_ACC:
  886. {
  887. GGML_ASSERT(src0t == GGML_TYPE_F32);
  888. GGML_ASSERT(src1t == GGML_TYPE_F32);
  889. GGML_ASSERT(dstt == GGML_TYPE_F32);
  890. GGML_ASSERT(ggml_is_contiguous(src0));
  891. GGML_ASSERT(ggml_is_contiguous(src1));
  892. const size_t pnb1 = ((int32_t *) dst->op_params)[0];
  893. const size_t pnb2 = ((int32_t *) dst->op_params)[1];
  894. const size_t pnb3 = ((int32_t *) dst->op_params)[2];
  895. const size_t offs = ((int32_t *) dst->op_params)[3];
  896. const bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  897. if (!inplace) {
  898. // run a separete kernel to cpy src->dst
  899. // not sure how to avoid this
  900. // TODO: make a simpler cpy_bytes kernel
  901. const id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_F32].pipeline;
  902. [encoder setComputePipelineState:pipeline];
  903. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  904. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  905. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  906. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  907. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  908. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
  909. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
  910. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
  911. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
  912. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
  913. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
  914. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
  915. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
  916. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
  917. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
  918. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
  919. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
  920. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
  921. const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne00);
  922. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  923. }
  924. const id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD].pipeline;
  925. [encoder setComputePipelineState:pipeline];
  926. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  927. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  928. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  929. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  930. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
  931. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
  932. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:6];
  933. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:7];
  934. [encoder setBytes:&pnb1 length:sizeof(pnb1) atIndex:8];
  935. [encoder setBytes:&pnb2 length:sizeof(pnb2) atIndex:9];
  936. [encoder setBytes:&pnb3 length:sizeof(pnb3) atIndex:10];
  937. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:11];
  938. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:12];
  939. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:13];
  940. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:14];
  941. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:15];
  942. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:16];
  943. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:17];
  944. [encoder setBytes:&nb13 length:sizeof(nb13) atIndex:18];
  945. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:19];
  946. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:20];
  947. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:21];
  948. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:22];
  949. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:23];
  950. [encoder setBytes:&pnb1 length:sizeof(pnb1) atIndex:24];
  951. [encoder setBytes:&pnb2 length:sizeof(pnb2) atIndex:25];
  952. [encoder setBytes:&pnb3 length:sizeof(pnb3) atIndex:26];
  953. [encoder setBytes:&offs length:sizeof(offs) atIndex:27];
  954. const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne00);
  955. [encoder dispatchThreadgroups:MTLSizeMake(ne11, ne12, ne13) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  956. } break;
  957. case GGML_OP_SCALE:
  958. {
  959. GGML_ASSERT(ggml_is_contiguous(src0));
  960. const float scale = *(const float *) dst->op_params;
  961. int64_t n = ggml_nelements(dst);
  962. id<MTLComputePipelineState> pipeline = nil;
  963. if (n % 4 == 0) {
  964. n /= 4;
  965. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SCALE_4].pipeline;
  966. } else {
  967. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SCALE].pipeline;
  968. }
  969. [encoder setComputePipelineState:pipeline];
  970. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  971. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  972. [encoder setBytes:&scale length:sizeof(scale) atIndex:2];
  973. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  974. } break;
  975. case GGML_OP_UNARY:
  976. switch (ggml_get_unary_op(gf->nodes[i])) {
  977. case GGML_UNARY_OP_TANH:
  978. {
  979. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_TANH].pipeline;
  980. [encoder setComputePipelineState:pipeline];
  981. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  982. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  983. const int64_t n = ggml_nelements(dst);
  984. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  985. } break;
  986. case GGML_UNARY_OP_RELU:
  987. {
  988. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_RELU].pipeline;
  989. [encoder setComputePipelineState:pipeline];
  990. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  991. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  992. const int64_t n = ggml_nelements(dst);
  993. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  994. } break;
  995. case GGML_UNARY_OP_GELU:
  996. {
  997. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GELU].pipeline;
  998. [encoder setComputePipelineState:pipeline];
  999. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1000. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1001. const int64_t n = ggml_nelements(dst);
  1002. GGML_ASSERT(n % 4 == 0);
  1003. [encoder dispatchThreadgroups:MTLSizeMake(n/4, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1004. } break;
  1005. case GGML_UNARY_OP_GELU_QUICK:
  1006. {
  1007. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GELU_QUICK].pipeline;
  1008. [encoder setComputePipelineState:pipeline];
  1009. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1010. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1011. const int64_t n = ggml_nelements(dst);
  1012. GGML_ASSERT(n % 4 == 0);
  1013. [encoder dispatchThreadgroups:MTLSizeMake(n/4, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1014. } break;
  1015. case GGML_UNARY_OP_SILU:
  1016. {
  1017. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SILU].pipeline;
  1018. [encoder setComputePipelineState:pipeline];
  1019. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1020. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1021. const int64_t n = ggml_nelements(dst);
  1022. GGML_ASSERT(n % 4 == 0);
  1023. [encoder dispatchThreadgroups:MTLSizeMake(n/4, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1024. } break;
  1025. default:
  1026. {
  1027. GGML_METAL_LOG_WARN("%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
  1028. GGML_ASSERT(false);
  1029. }
  1030. } break;
  1031. case GGML_OP_SQR:
  1032. {
  1033. GGML_ASSERT(ggml_is_contiguous(src0));
  1034. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SQR].pipeline;
  1035. [encoder setComputePipelineState:pipeline];
  1036. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1037. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1038. const int64_t n = ggml_nelements(dst);
  1039. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1040. } break;
  1041. case GGML_OP_SUM_ROWS:
  1042. {
  1043. GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type));
  1044. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SUM_ROWS].pipeline;
  1045. [encoder setComputePipelineState:pipeline];
  1046. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1047. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1048. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
  1049. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
  1050. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
  1051. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:5];
  1052. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6];
  1053. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7];
  1054. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8];
  1055. [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:9];
  1056. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:10];
  1057. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:11];
  1058. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:12];
  1059. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:13];
  1060. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:14];
  1061. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:15];
  1062. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:16];
  1063. [encoder setBytes:&nb13 length:sizeof(nb13) atIndex:17];
  1064. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:18];
  1065. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:19];
  1066. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:20];
  1067. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:21];
  1068. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:22];
  1069. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:23];
  1070. [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:24];
  1071. [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:25];
  1072. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1073. } break;
  1074. case GGML_OP_SOFT_MAX:
  1075. {
  1076. int nth = 32; // SIMD width
  1077. id<MTLComputePipelineState> pipeline = nil;
  1078. if (ne00%4 == 0) {
  1079. while (nth < ne00/4 && nth < 256) {
  1080. nth *= 2;
  1081. }
  1082. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX_4].pipeline;
  1083. } else {
  1084. while (nth < ne00 && nth < 1024) {
  1085. nth *= 2;
  1086. }
  1087. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX].pipeline;
  1088. }
  1089. const float scale = ((float *) dst->op_params)[0];
  1090. const float max_bias = ((float *) dst->op_params)[1];
  1091. const int64_t nrows_x = ggml_nrows(src0);
  1092. const int64_t nrows_y = src0->ne[1];
  1093. const uint32_t n_head_kv = nrows_x/nrows_y;
  1094. const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv));
  1095. const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
  1096. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
  1097. [encoder setComputePipelineState:pipeline];
  1098. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1099. if (id_src1) {
  1100. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1101. } else {
  1102. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
  1103. }
  1104. if (id_src2) {
  1105. [encoder setBuffer:id_src2 offset:offs_src2 atIndex:2];
  1106. } else {
  1107. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:2];
  1108. }
  1109. [encoder setBuffer:id_dst offset:offs_dst atIndex:3];
  1110. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:4];
  1111. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:5];
  1112. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:6];
  1113. [encoder setBytes:&scale length:sizeof(scale) atIndex:7];
  1114. [encoder setBytes:&max_bias length:sizeof(max_bias) atIndex:8];
  1115. [encoder setBytes:&m0 length:sizeof(m0) atIndex:9];
  1116. [encoder setBytes:&m1 length:sizeof(m1) atIndex:10];
  1117. [encoder setBytes:&n_head_log2 length:sizeof(n_head_log2) atIndex:11];
  1118. [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
  1119. [encoder dispatchThreadgroups:MTLSizeMake(ne01*ne02*ne03, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1120. } break;
  1121. case GGML_OP_DIAG_MASK_INF:
  1122. {
  1123. const int n_past = ((int32_t *)(dst->op_params))[0];
  1124. id<MTLComputePipelineState> pipeline = nil;
  1125. if (ne00%8 == 0) {
  1126. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8].pipeline;
  1127. } else {
  1128. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF].pipeline;
  1129. }
  1130. [encoder setComputePipelineState:pipeline];
  1131. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1132. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1133. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
  1134. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
  1135. [encoder setBytes:&n_past length:sizeof(int) atIndex:4];
  1136. if (ne00%8 == 0) {
  1137. [encoder dispatchThreadgroups:MTLSizeMake(ne00*ne01*ne02/8, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1138. }
  1139. else {
  1140. [encoder dispatchThreadgroups:MTLSizeMake(ne00, ne01, ne02) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1141. }
  1142. } break;
  1143. case GGML_OP_MUL_MAT:
  1144. {
  1145. GGML_ASSERT(ne00 == ne10);
  1146. // TODO: assert that dim2 and dim3 are contiguous
  1147. GGML_ASSERT(ne12 % ne02 == 0);
  1148. GGML_ASSERT(ne13 % ne03 == 0);
  1149. const uint r2 = ne12/ne02;
  1150. const uint r3 = ne13/ne03;
  1151. // find the break-even point where the matrix-matrix kernel becomes more efficient compared
  1152. // to the matrix-vector kernel
  1153. int ne11_mm_min = 1;
  1154. #if 0
  1155. // the numbers below are measured on M2 Ultra for 7B and 13B models
  1156. // these numbers do not translate to other devices or model sizes
  1157. // TODO: need to find a better approach
  1158. if ([ctx->device.name isEqualToString:@"Apple M2 Ultra"]) {
  1159. switch (src0t) {
  1160. case GGML_TYPE_F16: ne11_mm_min = 2; break;
  1161. case GGML_TYPE_Q8_0: ne11_mm_min = 7; break;
  1162. case GGML_TYPE_Q2_K: ne11_mm_min = 15; break;
  1163. case GGML_TYPE_Q3_K: ne11_mm_min = 7; break;
  1164. case GGML_TYPE_Q4_0:
  1165. case GGML_TYPE_Q4_1: ne11_mm_min = 15; break;
  1166. case GGML_TYPE_Q4_K: ne11_mm_min = 11; break;
  1167. case GGML_TYPE_Q5_0: // not tested yet
  1168. case GGML_TYPE_Q5_1: ne11_mm_min = 13; break; // not tested yet
  1169. case GGML_TYPE_Q5_K: ne11_mm_min = 7; break;
  1170. case GGML_TYPE_Q6_K: ne11_mm_min = 7; break;
  1171. default: ne11_mm_min = 1; break;
  1172. }
  1173. }
  1174. #endif
  1175. // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
  1176. // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
  1177. if ([ctx->device supportsFamily:MTLGPUFamilyApple7] &&
  1178. !ggml_is_transposed(src0) &&
  1179. !ggml_is_transposed(src1) &&
  1180. src1t == GGML_TYPE_F32 &&
  1181. ne00 % 32 == 0 && ne00 >= 64 &&
  1182. (ne11 > ne11_mm_min || (ggml_is_quantized(src0t) && ne12 > 1))) {
  1183. //printf("matrix: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12);
  1184. id<MTLComputePipelineState> pipeline = nil;
  1185. switch (src0->type) {
  1186. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32 ].pipeline; break;
  1187. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32 ].pipeline; break;
  1188. case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32 ].pipeline; break;
  1189. case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32 ].pipeline; break;
  1190. case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32 ].pipeline; break;
  1191. case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32 ].pipeline; break;
  1192. case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32 ].pipeline; break;
  1193. case GGML_TYPE_Q2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32 ].pipeline; break;
  1194. case GGML_TYPE_Q3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32 ].pipeline; break;
  1195. case GGML_TYPE_Q4_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32 ].pipeline; break;
  1196. case GGML_TYPE_Q5_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32 ].pipeline; break;
  1197. case GGML_TYPE_Q6_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32 ].pipeline; break;
  1198. case GGML_TYPE_IQ2_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32].pipeline; break;
  1199. case GGML_TYPE_IQ2_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32 ].pipeline; break;
  1200. case GGML_TYPE_IQ3_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32].pipeline; break;
  1201. case GGML_TYPE_IQ3_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_S_F32 ].pipeline; break;
  1202. case GGML_TYPE_IQ2_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_S_F32 ].pipeline; break;
  1203. case GGML_TYPE_IQ1_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32 ].pipeline; break;
  1204. case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32 ].pipeline; break;
  1205. case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32 ].pipeline; break;
  1206. default: GGML_ASSERT(false && "MUL MAT-MAT not implemented");
  1207. }
  1208. [encoder setComputePipelineState:pipeline];
  1209. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1210. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1211. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1212. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  1213. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
  1214. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:5];
  1215. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:6];
  1216. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:7];
  1217. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:8];
  1218. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:9];
  1219. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:10];
  1220. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:11];
  1221. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:12];
  1222. [encoder setBytes:&r2 length:sizeof(r2) atIndex:13];
  1223. [encoder setBytes:&r3 length:sizeof(r3) atIndex:14];
  1224. [encoder setThreadgroupMemoryLength:8192 atIndex:0];
  1225. [encoder dispatchThreadgroups:MTLSizeMake( (ne11 + 31)/32, (ne01 + 63)/64, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
  1226. } else {
  1227. int nth0 = 32;
  1228. int nth1 = 1;
  1229. int nrows = 1;
  1230. //printf("vector: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12);
  1231. id<MTLComputePipelineState> pipeline = nil;
  1232. // use custom matrix x vector kernel
  1233. switch (src0t) {
  1234. case GGML_TYPE_F32:
  1235. {
  1236. GGML_ASSERT(src1t == GGML_TYPE_F32);
  1237. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32].pipeline;
  1238. nrows = 4;
  1239. } break;
  1240. case GGML_TYPE_F16:
  1241. {
  1242. nth0 = 32;
  1243. nth1 = 1;
  1244. if (src1t == GGML_TYPE_F32) {
  1245. if (ne11 * ne12 < 4) {
  1246. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW].pipeline;
  1247. } else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) {
  1248. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4].pipeline;
  1249. nrows = ne11;
  1250. } else {
  1251. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32].pipeline;
  1252. nrows = 4;
  1253. }
  1254. } else {
  1255. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16].pipeline;
  1256. nrows = 4;
  1257. }
  1258. } break;
  1259. case GGML_TYPE_Q4_0:
  1260. {
  1261. nth0 = 8;
  1262. nth1 = 8;
  1263. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32].pipeline;
  1264. } break;
  1265. case GGML_TYPE_Q4_1:
  1266. {
  1267. nth0 = 8;
  1268. nth1 = 8;
  1269. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32].pipeline;
  1270. } break;
  1271. case GGML_TYPE_Q5_0:
  1272. {
  1273. nth0 = 8;
  1274. nth1 = 8;
  1275. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32].pipeline;
  1276. } break;
  1277. case GGML_TYPE_Q5_1:
  1278. {
  1279. nth0 = 8;
  1280. nth1 = 8;
  1281. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32].pipeline;
  1282. } break;
  1283. case GGML_TYPE_Q8_0:
  1284. {
  1285. nth0 = 8;
  1286. nth1 = 8;
  1287. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32].pipeline;
  1288. } break;
  1289. case GGML_TYPE_Q2_K:
  1290. {
  1291. nth0 = 2;
  1292. nth1 = 32;
  1293. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32].pipeline;
  1294. } break;
  1295. case GGML_TYPE_Q3_K:
  1296. {
  1297. nth0 = 2;
  1298. nth1 = 32;
  1299. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32].pipeline;
  1300. } break;
  1301. case GGML_TYPE_Q4_K:
  1302. {
  1303. nth0 = 4; //1;
  1304. nth1 = 8; //32;
  1305. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32].pipeline;
  1306. } break;
  1307. case GGML_TYPE_Q5_K:
  1308. {
  1309. nth0 = 2;
  1310. nth1 = 32;
  1311. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32].pipeline;
  1312. } break;
  1313. case GGML_TYPE_Q6_K:
  1314. {
  1315. nth0 = 2;
  1316. nth1 = 32;
  1317. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32].pipeline;
  1318. } break;
  1319. case GGML_TYPE_IQ2_XXS:
  1320. {
  1321. nth0 = 4;
  1322. nth1 = 16;
  1323. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32].pipeline;
  1324. } break;
  1325. case GGML_TYPE_IQ2_XS:
  1326. {
  1327. nth0 = 4;
  1328. nth1 = 16;
  1329. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32].pipeline;
  1330. } break;
  1331. case GGML_TYPE_IQ3_XXS:
  1332. {
  1333. nth0 = 4;
  1334. nth1 = 16;
  1335. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32].pipeline;
  1336. } break;
  1337. case GGML_TYPE_IQ3_S:
  1338. {
  1339. nth0 = 4;
  1340. nth1 = 16;
  1341. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_S_F32].pipeline;
  1342. } break;
  1343. case GGML_TYPE_IQ2_S:
  1344. {
  1345. nth0 = 4;
  1346. nth1 = 16;
  1347. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_S_F32].pipeline;
  1348. } break;
  1349. case GGML_TYPE_IQ1_S:
  1350. {
  1351. nth0 = 4;
  1352. nth1 = 16;
  1353. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32].pipeline;
  1354. } break;
  1355. case GGML_TYPE_IQ4_NL:
  1356. {
  1357. nth0 = 4;
  1358. nth1 = 16;
  1359. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32].pipeline;
  1360. } break;
  1361. case GGML_TYPE_IQ4_XS:
  1362. {
  1363. nth0 = 4;
  1364. nth1 = 16;
  1365. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32].pipeline;
  1366. } break;
  1367. default:
  1368. {
  1369. GGML_METAL_LOG_ERROR("Asserting on type %d\n", (int)src0t);
  1370. GGML_ASSERT(false && "not implemented");
  1371. }
  1372. };
  1373. if (ggml_is_quantized(src0t)) {
  1374. GGML_ASSERT(ne00 >= nth0*nth1);
  1375. }
  1376. [encoder setComputePipelineState:pipeline];
  1377. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1378. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1379. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1380. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  1381. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
  1382. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
  1383. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6];
  1384. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7];
  1385. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8];
  1386. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:9];
  1387. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:10];
  1388. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:11];
  1389. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:12];
  1390. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:13];
  1391. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:14];
  1392. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:15];
  1393. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:16];
  1394. [encoder setBytes:&r2 length:sizeof(r2) atIndex:17];
  1395. [encoder setBytes:&r3 length:sizeof(r3) atIndex:18];
  1396. if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 ||
  1397. src0t == GGML_TYPE_Q5_0 || src0t == GGML_TYPE_Q5_1 || src0t == GGML_TYPE_Q8_0 ||
  1398. src0t == GGML_TYPE_Q2_K || src0t == GGML_TYPE_IQ1_S || src0t == GGML_TYPE_IQ2_S) {
  1399. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1400. }
  1401. else if (src0t == GGML_TYPE_IQ2_XXS || src0t == GGML_TYPE_IQ2_XS) {
  1402. const int mem_size = src0t == GGML_TYPE_IQ2_XXS ? 256*8+128 : 512*8+128;
  1403. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1404. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1405. }
  1406. else if (src0t == GGML_TYPE_IQ3_XXS || src0t == GGML_TYPE_IQ3_S) {
  1407. const int mem_size = src0t == GGML_TYPE_IQ3_XXS ? 256*4+128 : 512*4;
  1408. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1409. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1410. }
  1411. else if (src0t == GGML_TYPE_IQ4_NL || src0t == GGML_TYPE_IQ4_XS) {
  1412. const int mem_size = 32*sizeof(float);
  1413. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1414. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1415. }
  1416. else if (src0t == GGML_TYPE_Q4_K) {
  1417. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1418. }
  1419. else if (src0t == GGML_TYPE_Q3_K) {
  1420. #ifdef GGML_QKK_64
  1421. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 1)/2, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1422. #else
  1423. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1424. #endif
  1425. }
  1426. else if (src0t == GGML_TYPE_Q5_K) {
  1427. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1428. }
  1429. else if (src0t == GGML_TYPE_Q6_K) {
  1430. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 1)/2, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1431. } else {
  1432. const int64_t ny = (ne11 + nrows - 1)/nrows;
  1433. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ny, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1434. }
  1435. }
  1436. } break;
  1437. case GGML_OP_MUL_MAT_ID:
  1438. {
  1439. //GGML_ASSERT(ne00 == ne10);
  1440. //GGML_ASSERT(ne03 == ne13);
  1441. GGML_ASSERT(src0t == GGML_TYPE_I32);
  1442. const int n_as = ((int32_t *) dst->op_params)[1];
  1443. // TODO: make this more general
  1444. GGML_ASSERT(n_as <= 8);
  1445. // max size of the src1ids array in the kernel stack
  1446. GGML_ASSERT(ne11 <= 512);
  1447. const int64_t ne20 = src2 ? src2->ne[0] : 0;
  1448. const int64_t ne21 = src2 ? src2->ne[1] : 0;
  1449. const int64_t ne22 = src2 ? src2->ne[2] : 0;
  1450. const int64_t ne23 = src2 ? src2->ne[3] : 0; GGML_UNUSED(ne23);
  1451. const uint64_t nb20 = src2 ? src2->nb[0] : 0; GGML_UNUSED(nb20);
  1452. const uint64_t nb21 = src2 ? src2->nb[1] : 0;
  1453. const uint64_t nb22 = src2 ? src2->nb[2] : 0;
  1454. const uint64_t nb23 = src2 ? src2->nb[3] : 0; GGML_UNUSED(nb23);
  1455. const enum ggml_type src2t = src2 ? src2->type : GGML_TYPE_COUNT; GGML_UNUSED(src2t);
  1456. GGML_ASSERT(!ggml_is_transposed(src2));
  1457. GGML_ASSERT(!ggml_is_transposed(src1));
  1458. GGML_ASSERT(src1t == GGML_TYPE_F32);
  1459. const uint r2 = ne12/ne22;
  1460. const uint r3 = ne13/ne23;
  1461. // find the break-even point where the matrix-matrix kernel becomes more efficient compared
  1462. // to the matrix-vector kernel
  1463. int ne11_mm_min = n_as;
  1464. const int idx = ((int32_t *) dst->op_params)[0];
  1465. // batch size
  1466. GGML_ASSERT(ne01 == ne11);
  1467. // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
  1468. // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
  1469. // !!!
  1470. // TODO: for now, always use mat-vec kernels until we figure out how to improve the
  1471. // indirect matrix multiplication
  1472. // !!!
  1473. if ([ctx->device supportsFamily:MTLGPUFamilyApple7] &&
  1474. ne20 % 32 == 0 && ne20 >= 64 &&
  1475. ne11 > ne11_mm_min) {
  1476. id<MTLComputePipelineState> pipeline = nil;
  1477. switch (src2->type) {
  1478. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F32 ].pipeline; break;
  1479. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F16_F32 ].pipeline; break;
  1480. case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_0_F32 ].pipeline; break;
  1481. case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_1_F32 ].pipeline; break;
  1482. case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F32 ].pipeline; break;
  1483. case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F32 ].pipeline; break;
  1484. case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F32 ].pipeline; break;
  1485. case GGML_TYPE_Q2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F32 ].pipeline; break;
  1486. case GGML_TYPE_Q3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F32 ].pipeline; break;
  1487. case GGML_TYPE_Q4_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F32 ].pipeline; break;
  1488. case GGML_TYPE_Q5_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_K_F32 ].pipeline; break;
  1489. case GGML_TYPE_Q6_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q6_K_F32 ].pipeline; break;
  1490. case GGML_TYPE_IQ2_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XXS_F32].pipeline; break;
  1491. case GGML_TYPE_IQ2_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XS_F32 ].pipeline; break;
  1492. case GGML_TYPE_IQ3_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_XXS_F32].pipeline; break;
  1493. case GGML_TYPE_IQ3_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_S_F32 ].pipeline; break;
  1494. case GGML_TYPE_IQ2_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_S_F32 ].pipeline; break;
  1495. case GGML_TYPE_IQ1_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_S_F32 ].pipeline; break;
  1496. case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32 ].pipeline; break;
  1497. case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32 ].pipeline; break;
  1498. default: GGML_ASSERT(false && "MUL_MAT_ID not implemented");
  1499. }
  1500. [encoder setComputePipelineState:pipeline];
  1501. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1502. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1503. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1504. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:3];
  1505. [encoder setBytes:&ne20 length:sizeof(ne20) atIndex:4];
  1506. [encoder setBytes:&ne22 length:sizeof(ne22) atIndex:5];
  1507. [encoder setBytes:&nb21 length:sizeof(nb21) atIndex:6];
  1508. [encoder setBytes:&nb22 length:sizeof(nb22) atIndex:7];
  1509. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:8];
  1510. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:9];
  1511. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:10];
  1512. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:11];
  1513. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:12];
  1514. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:13];
  1515. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:14];
  1516. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:15];
  1517. [encoder setBytes:&r2 length:sizeof(r2) atIndex:16];
  1518. [encoder setBytes:&r3 length:sizeof(r3) atIndex:17];
  1519. [encoder setBytes:&idx length:sizeof(idx) atIndex:18];
  1520. // TODO: how to make this an array? read Metal docs
  1521. for (int j = 0; j < 8; ++j) {
  1522. // NOTE: this is done like this to avoid uninitialized kernel arguments when n_as < 8
  1523. struct ggml_tensor * src_cur = dst->src[2 + (j % n_as)];
  1524. size_t offs_src_cur = 0;
  1525. id<MTLBuffer> id_src_cur = ggml_metal_get_buffer(src_cur, &offs_src_cur);
  1526. [encoder setBuffer:id_src_cur offset:offs_src_cur atIndex:19 + j];
  1527. }
  1528. [encoder setThreadgroupMemoryLength:8192 atIndex:0];
  1529. [encoder dispatchThreadgroups:MTLSizeMake((ne11 + 31)/32, (ne21 + 63)/64, n_as*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
  1530. } else {
  1531. int nth0 = 32;
  1532. int nth1 = 1;
  1533. int nrows = 1;
  1534. //printf("vector: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12);
  1535. id<MTLComputePipelineState> pipeline = nil;
  1536. // use custom matrix x vector kernel
  1537. switch (src2t) {
  1538. case GGML_TYPE_F32:
  1539. {
  1540. GGML_ASSERT(src1t == GGML_TYPE_F32);
  1541. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F32_F32].pipeline;
  1542. } break;
  1543. case GGML_TYPE_F16:
  1544. {
  1545. GGML_ASSERT(src1t == GGML_TYPE_F32);
  1546. nth0 = 32;
  1547. nth1 = 1;
  1548. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32].pipeline;
  1549. } break;
  1550. case GGML_TYPE_Q4_0:
  1551. {
  1552. nth0 = 8;
  1553. nth1 = 8;
  1554. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_0_F32].pipeline;
  1555. } break;
  1556. case GGML_TYPE_Q4_1:
  1557. {
  1558. nth0 = 8;
  1559. nth1 = 8;
  1560. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_1_F32].pipeline;
  1561. } break;
  1562. case GGML_TYPE_Q5_0:
  1563. {
  1564. nth0 = 8;
  1565. nth1 = 8;
  1566. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32].pipeline;
  1567. } break;
  1568. case GGML_TYPE_Q5_1:
  1569. {
  1570. nth0 = 8;
  1571. nth1 = 8;
  1572. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32].pipeline;
  1573. } break;
  1574. case GGML_TYPE_Q8_0:
  1575. {
  1576. nth0 = 8;
  1577. nth1 = 8;
  1578. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32].pipeline;
  1579. } break;
  1580. case GGML_TYPE_Q2_K:
  1581. {
  1582. nth0 = 2;
  1583. nth1 = 32;
  1584. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32].pipeline;
  1585. } break;
  1586. case GGML_TYPE_Q3_K:
  1587. {
  1588. nth0 = 2;
  1589. nth1 = 32;
  1590. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32].pipeline;
  1591. } break;
  1592. case GGML_TYPE_Q4_K:
  1593. {
  1594. nth0 = 4; //1;
  1595. nth1 = 8; //32;
  1596. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32].pipeline;
  1597. } break;
  1598. case GGML_TYPE_Q5_K:
  1599. {
  1600. nth0 = 2;
  1601. nth1 = 32;
  1602. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_K_F32].pipeline;
  1603. } break;
  1604. case GGML_TYPE_Q6_K:
  1605. {
  1606. nth0 = 2;
  1607. nth1 = 32;
  1608. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q6_K_F32].pipeline;
  1609. } break;
  1610. case GGML_TYPE_IQ2_XXS:
  1611. {
  1612. nth0 = 4;
  1613. nth1 = 16;
  1614. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XXS_F32].pipeline;
  1615. } break;
  1616. case GGML_TYPE_IQ2_XS:
  1617. {
  1618. nth0 = 4;
  1619. nth1 = 16;
  1620. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XS_F32].pipeline;
  1621. } break;
  1622. case GGML_TYPE_IQ3_XXS:
  1623. {
  1624. nth0 = 4;
  1625. nth1 = 16;
  1626. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_XXS_F32].pipeline;
  1627. } break;
  1628. case GGML_TYPE_IQ3_S:
  1629. {
  1630. nth0 = 4;
  1631. nth1 = 16;
  1632. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_S_F32].pipeline;
  1633. } break;
  1634. case GGML_TYPE_IQ2_S:
  1635. {
  1636. nth0 = 4;
  1637. nth1 = 16;
  1638. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_S_F32].pipeline;
  1639. } break;
  1640. case GGML_TYPE_IQ1_S:
  1641. {
  1642. nth0 = 4;
  1643. nth1 = 16;
  1644. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_S_F32].pipeline;
  1645. } break;
  1646. case GGML_TYPE_IQ4_NL:
  1647. {
  1648. nth0 = 4;
  1649. nth1 = 16;
  1650. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32].pipeline;
  1651. } break;
  1652. case GGML_TYPE_IQ4_XS:
  1653. {
  1654. nth0 = 4;
  1655. nth1 = 16;
  1656. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32].pipeline;
  1657. } break;
  1658. default:
  1659. {
  1660. GGML_METAL_LOG_ERROR("Asserting on type %d\n", (int)src2t);
  1661. GGML_ASSERT(false && "not implemented");
  1662. }
  1663. };
  1664. if (ggml_is_quantized(src2t)) {
  1665. GGML_ASSERT(ne20 >= nth0*nth1);
  1666. }
  1667. const int64_t _ne1 = 1; // kernels needs a reference in constant memory
  1668. [encoder setComputePipelineState:pipeline];
  1669. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1670. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1671. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1672. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:3];
  1673. [encoder setBytes:&ne20 length:sizeof(ne20) atIndex:4];
  1674. [encoder setBytes:&ne21 length:sizeof(ne21) atIndex:5];
  1675. [encoder setBytes:&ne22 length:sizeof(ne22) atIndex:6];
  1676. [encoder setBytes:&nb20 length:sizeof(nb20) atIndex:7];
  1677. [encoder setBytes:&nb21 length:sizeof(nb21) atIndex:8];
  1678. [encoder setBytes:&nb22 length:sizeof(nb22) atIndex:9];
  1679. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:10];
  1680. [encoder setBytes:&_ne1 length:sizeof(_ne1) atIndex:11];
  1681. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:12];
  1682. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:13];
  1683. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:14];
  1684. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:15];
  1685. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:16];
  1686. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:17];
  1687. [encoder setBytes:&_ne1 length:sizeof(_ne1) atIndex:18];
  1688. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:19];
  1689. [encoder setBytes:&r2 length:sizeof(r2) atIndex:20];
  1690. [encoder setBytes:&r3 length:sizeof(r3) atIndex:21];
  1691. [encoder setBytes:&idx length:sizeof(idx) atIndex:22];
  1692. // TODO: how to make this an array? read Metal docs
  1693. for (int j = 0; j < 8; ++j) {
  1694. // NOTE: this is done like this to avoid uninitialized kernel arguments when n_as < 8
  1695. struct ggml_tensor * src_cur = dst->src[2 + (j % n_as)];
  1696. size_t offs_src_cur = 0;
  1697. id<MTLBuffer> id_src_cur = ggml_metal_get_buffer(src_cur, &offs_src_cur);
  1698. [encoder setBuffer:id_src_cur offset:offs_src_cur atIndex:23 + j];
  1699. }
  1700. if (src2t == GGML_TYPE_Q4_0 || src2t == GGML_TYPE_Q4_1 ||
  1701. src2t == GGML_TYPE_Q5_0 || src2t == GGML_TYPE_Q5_1 || src2t == GGML_TYPE_Q8_0 ||
  1702. src2t == GGML_TYPE_Q2_K || src2t == GGML_TYPE_IQ1_S || src2t == GGML_TYPE_IQ2_S) {
  1703. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 7)/8, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1704. }
  1705. else if (src2t == GGML_TYPE_IQ2_XXS || src2t == GGML_TYPE_IQ2_XS) {
  1706. const int mem_size = src2t == GGML_TYPE_IQ2_XXS ? 256*8+128 : 512*8+128;
  1707. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1708. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 7)/8, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1709. }
  1710. else if (src2t == GGML_TYPE_IQ3_XXS || src2t == GGML_TYPE_IQ3_S) {
  1711. const int mem_size = src2t == GGML_TYPE_IQ3_XXS ? 256*4+128 : 512*4;
  1712. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1713. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 7)/8, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1714. }
  1715. else if (src2t == GGML_TYPE_IQ4_NL || src2t == GGML_TYPE_IQ4_XS) {
  1716. const int mem_size = 32*sizeof(float);
  1717. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1718. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 3)/4, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1719. }
  1720. else if (src2t == GGML_TYPE_Q4_K) {
  1721. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 3)/4, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1722. }
  1723. else if (src2t == GGML_TYPE_Q3_K) {
  1724. #ifdef GGML_QKK_64
  1725. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 1)/2, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1726. #else
  1727. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 3)/4, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1728. #endif
  1729. }
  1730. else if (src2t == GGML_TYPE_Q5_K) {
  1731. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 3)/4, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1732. }
  1733. else if (src2t == GGML_TYPE_Q6_K) {
  1734. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 1)/2, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1735. } else {
  1736. const int64_t ny = (_ne1 + nrows - 1)/nrows;
  1737. [encoder dispatchThreadgroups:MTLSizeMake(ne21, ny, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1738. }
  1739. }
  1740. } break;
  1741. case GGML_OP_GET_ROWS:
  1742. {
  1743. id<MTLComputePipelineState> pipeline = nil;
  1744. switch (src0->type) {
  1745. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_F32 ].pipeline; break;
  1746. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_F16 ].pipeline; break;
  1747. case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_0 ].pipeline; break;
  1748. case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_1 ].pipeline; break;
  1749. case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0 ].pipeline; break;
  1750. case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1 ].pipeline; break;
  1751. case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0 ].pipeline; break;
  1752. case GGML_TYPE_Q2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K ].pipeline; break;
  1753. case GGML_TYPE_Q3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K ].pipeline; break;
  1754. case GGML_TYPE_Q4_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K ].pipeline; break;
  1755. case GGML_TYPE_Q5_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_K ].pipeline; break;
  1756. case GGML_TYPE_Q6_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q6_K ].pipeline; break;
  1757. case GGML_TYPE_IQ2_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XXS].pipeline; break;
  1758. case GGML_TYPE_IQ2_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XS ].pipeline; break;
  1759. case GGML_TYPE_IQ3_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_XXS].pipeline; break;
  1760. case GGML_TYPE_IQ3_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_S ].pipeline; break;
  1761. case GGML_TYPE_IQ2_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_S ].pipeline; break;
  1762. case GGML_TYPE_IQ1_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_S ].pipeline; break;
  1763. case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL ].pipeline; break;
  1764. case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS ].pipeline; break;
  1765. case GGML_TYPE_I32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_I32 ].pipeline; break;
  1766. default: GGML_ASSERT(false && "not implemented");
  1767. }
  1768. [encoder setComputePipelineState:pipeline];
  1769. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1770. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1771. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1772. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:3];
  1773. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:4];
  1774. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:5];
  1775. [encoder setBytes:&ne10 length:sizeof( int64_t) atIndex:6];
  1776. [encoder setBytes:&nb10 length:sizeof( int64_t) atIndex:7];
  1777. [encoder setBytes:&nb11 length:sizeof( int64_t) atIndex:8];
  1778. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:9];
  1779. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:10];
  1780. [encoder dispatchThreadgroups:MTLSizeMake(ne10, ne11, 1) threadsPerThreadgroup:MTLSizeMake(32, 1, 1)];
  1781. } break;
  1782. case GGML_OP_RMS_NORM:
  1783. {
  1784. GGML_ASSERT(ne00 % 4 == 0);
  1785. float eps;
  1786. memcpy(&eps, dst->op_params, sizeof(float));
  1787. int nth = 32; // SIMD width
  1788. while (nth < ne00/4 && nth < 1024) {
  1789. nth *= 2;
  1790. }
  1791. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_RMS_NORM].pipeline;
  1792. [encoder setComputePipelineState:pipeline];
  1793. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1794. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1795. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  1796. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3];
  1797. [encoder setBytes:&eps length:sizeof( float) atIndex:4];
  1798. [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
  1799. const int64_t nrows = ggml_nrows(src0);
  1800. [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1801. } break;
  1802. case GGML_OP_GROUP_NORM:
  1803. {
  1804. GGML_ASSERT(ne00 % 4 == 0);
  1805. //float eps;
  1806. //memcpy(&eps, dst->op_params, sizeof(float));
  1807. const float eps = 1e-6f; // TODO: temporarily hardcoded
  1808. const int32_t n_groups = ((int32_t *) dst->op_params)[0];
  1809. int nth = 32; // SIMD width
  1810. //while (nth < ne00/4 && nth < 1024) {
  1811. // nth *= 2;
  1812. //}
  1813. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GROUP_NORM].pipeline;
  1814. [encoder setComputePipelineState:pipeline];
  1815. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1816. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1817. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  1818. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  1819. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  1820. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:5];
  1821. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:6];
  1822. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:7];
  1823. [encoder setBytes:&n_groups length:sizeof( int32_t) atIndex:8];
  1824. [encoder setBytes:&eps length:sizeof( float) atIndex:9];
  1825. [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
  1826. [encoder dispatchThreadgroups:MTLSizeMake(n_groups, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1827. } break;
  1828. case GGML_OP_NORM:
  1829. {
  1830. float eps;
  1831. memcpy(&eps, dst->op_params, sizeof(float));
  1832. const int nth = MIN(256, ne00);
  1833. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_NORM].pipeline;
  1834. [encoder setComputePipelineState:pipeline];
  1835. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1836. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1837. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  1838. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3];
  1839. [encoder setBytes:&eps length:sizeof( float) atIndex:4];
  1840. [encoder setThreadgroupMemoryLength:GGML_PAD(nth*sizeof(float), 16) atIndex:0];
  1841. const int64_t nrows = ggml_nrows(src0);
  1842. [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1843. } break;
  1844. case GGML_OP_ALIBI:
  1845. {
  1846. GGML_ASSERT((src0t == GGML_TYPE_F32));
  1847. const int nth = MIN(1024, ne00);
  1848. //const int n_past = ((int32_t *) dst->op_params)[0];
  1849. const int n_head = ((int32_t *) dst->op_params)[1];
  1850. float max_bias;
  1851. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  1852. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  1853. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  1854. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  1855. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ALIBI_F32].pipeline;
  1856. [encoder setComputePipelineState:pipeline];
  1857. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1858. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1859. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  1860. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  1861. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  1862. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
  1863. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
  1864. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
  1865. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
  1866. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
  1867. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
  1868. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
  1869. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
  1870. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
  1871. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
  1872. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
  1873. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
  1874. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
  1875. [encoder setBytes:&m0 length:sizeof( float) atIndex:18];
  1876. [encoder setBytes:&m1 length:sizeof( float) atIndex:19];
  1877. [encoder setBytes:&n_heads_log2_floor length:sizeof(int) atIndex:20];
  1878. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1879. } break;
  1880. case GGML_OP_ROPE:
  1881. {
  1882. GGML_ASSERT(ne10 == ne02);
  1883. const int nth = MIN(1024, ne00);
  1884. const int n_past = ((int32_t *) dst->op_params)[0];
  1885. const int n_dims = ((int32_t *) dst->op_params)[1];
  1886. const int mode = ((int32_t *) dst->op_params)[2];
  1887. // skip 3, n_ctx, used in GLM RoPE, unimplemented in metal
  1888. const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
  1889. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
  1890. memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
  1891. memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
  1892. memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
  1893. memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
  1894. memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
  1895. memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
  1896. id<MTLComputePipelineState> pipeline = nil;
  1897. switch (src0->type) {
  1898. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_F32].pipeline; break;
  1899. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_F16].pipeline; break;
  1900. default: GGML_ASSERT(false);
  1901. };
  1902. [encoder setComputePipelineState:pipeline];
  1903. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1904. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1905. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1906. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:3];
  1907. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:4];
  1908. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:5];
  1909. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:6];
  1910. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:7];
  1911. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:8];
  1912. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:9];
  1913. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:10];
  1914. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:11];
  1915. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:12];
  1916. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:13];
  1917. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:14];
  1918. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:15];
  1919. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:16];
  1920. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:17];
  1921. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:18];
  1922. [encoder setBytes:&n_past length:sizeof( int) atIndex:19];
  1923. [encoder setBytes:&n_dims length:sizeof( int) atIndex:20];
  1924. [encoder setBytes:&mode length:sizeof( int) atIndex:21];
  1925. [encoder setBytes:&n_orig_ctx length:sizeof( int) atIndex:22];
  1926. [encoder setBytes:&freq_base length:sizeof( float) atIndex:23];
  1927. [encoder setBytes:&freq_scale length:sizeof( float) atIndex:24];
  1928. [encoder setBytes:&ext_factor length:sizeof( float) atIndex:25];
  1929. [encoder setBytes:&attn_factor length:sizeof( float) atIndex:26];
  1930. [encoder setBytes:&beta_fast length:sizeof( float) atIndex:27];
  1931. [encoder setBytes:&beta_slow length:sizeof( float) atIndex:28];
  1932. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1933. } break;
  1934. case GGML_OP_IM2COL:
  1935. {
  1936. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  1937. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  1938. GGML_ASSERT( dst->type == GGML_TYPE_F16 || dst->type == GGML_TYPE_F32);
  1939. const int32_t s0 = ((const int32_t *)(dst->op_params))[0];
  1940. const int32_t s1 = ((const int32_t *)(dst->op_params))[1];
  1941. const int32_t p0 = ((const int32_t *)(dst->op_params))[2];
  1942. const int32_t p1 = ((const int32_t *)(dst->op_params))[3];
  1943. const int32_t d0 = ((const int32_t *)(dst->op_params))[4];
  1944. const int32_t d1 = ((const int32_t *)(dst->op_params))[5];
  1945. const bool is_2D = ((const int32_t *)(dst->op_params))[6] == 1;
  1946. const int32_t N = src1->ne[is_2D ? 3 : 2];
  1947. const int32_t IC = src1->ne[is_2D ? 2 : 1];
  1948. const int32_t IH = is_2D ? src1->ne[1] : 1;
  1949. const int32_t IW = src1->ne[0];
  1950. const int32_t KH = is_2D ? src0->ne[1] : 1;
  1951. const int32_t KW = src0->ne[0];
  1952. const int32_t OH = is_2D ? dst->ne[2] : 1;
  1953. const int32_t OW = dst->ne[1];
  1954. const int32_t CHW = IC * KH * KW;
  1955. const int32_t ofs0 = src1->nb[is_2D ? 3 : 2] / 4;
  1956. const int32_t ofs1 = src1->nb[is_2D ? 2 : 1] / 4;
  1957. id<MTLComputePipelineState> pipeline = nil;
  1958. switch (dst->type) {
  1959. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_IM2COL_F32].pipeline; break;
  1960. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_IM2COL_F16].pipeline; break;
  1961. default: GGML_ASSERT(false);
  1962. };
  1963. [encoder setComputePipelineState:pipeline];
  1964. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:0];
  1965. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1966. [encoder setBytes:&ofs0 length:sizeof( int32_t) atIndex:2];
  1967. [encoder setBytes:&ofs1 length:sizeof( int32_t) atIndex:3];
  1968. [encoder setBytes:&IW length:sizeof( int32_t) atIndex:4];
  1969. [encoder setBytes:&IH length:sizeof( int32_t) atIndex:5];
  1970. [encoder setBytes:&CHW length:sizeof( int32_t) atIndex:6];
  1971. [encoder setBytes:&s0 length:sizeof( int32_t) atIndex:7];
  1972. [encoder setBytes:&s1 length:sizeof( int32_t) atIndex:8];
  1973. [encoder setBytes:&p0 length:sizeof( int32_t) atIndex:9];
  1974. [encoder setBytes:&p1 length:sizeof( int32_t) atIndex:10];
  1975. [encoder setBytes:&d0 length:sizeof( int32_t) atIndex:11];
  1976. [encoder setBytes:&d1 length:sizeof( int32_t) atIndex:12];
  1977. [encoder dispatchThreadgroups:MTLSizeMake(IC, OH, OW) threadsPerThreadgroup:MTLSizeMake(N, KH, KW)];
  1978. } break;
  1979. case GGML_OP_UPSCALE:
  1980. {
  1981. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  1982. const int sf = dst->op_params[0];
  1983. const id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_UPSCALE_F32].pipeline;
  1984. [encoder setComputePipelineState:pipeline];
  1985. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1986. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1987. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
  1988. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
  1989. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
  1990. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:5];
  1991. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6];
  1992. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7];
  1993. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8];
  1994. [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:9];
  1995. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:10];
  1996. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:11];
  1997. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:12];
  1998. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:13];
  1999. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:14];
  2000. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:15];
  2001. [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:16];
  2002. [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:17];
  2003. [encoder setBytes:&sf length:sizeof(sf) atIndex:18];
  2004. const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne0);
  2005. [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  2006. } break;
  2007. case GGML_OP_PAD:
  2008. {
  2009. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  2010. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_PAD_F32].pipeline;
  2011. [encoder setComputePipelineState:pipeline];
  2012. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  2013. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  2014. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
  2015. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
  2016. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
  2017. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:5];
  2018. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6];
  2019. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7];
  2020. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8];
  2021. [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:9];
  2022. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:10];
  2023. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:11];
  2024. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:12];
  2025. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:13];
  2026. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:14];
  2027. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:15];
  2028. [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:16];
  2029. [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:17];
  2030. const int nth = MIN(1024, ne0);
  2031. [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  2032. } break;
  2033. case GGML_OP_ARGSORT:
  2034. {
  2035. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  2036. GGML_ASSERT( dst->type == GGML_TYPE_I32);
  2037. const int nrows = ggml_nrows(src0);
  2038. enum ggml_sort_order order = (enum ggml_sort_order) dst->op_params[0];
  2039. id<MTLComputePipelineState> pipeline = nil;
  2040. switch (order) {
  2041. case GGML_SORT_ORDER_ASC: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC].pipeline; break;
  2042. case GGML_SORT_ORDER_DESC: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC].pipeline; break;
  2043. default: GGML_ASSERT(false);
  2044. };
  2045. [encoder setComputePipelineState:pipeline];
  2046. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  2047. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  2048. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  2049. [encoder dispatchThreadgroups:MTLSizeMake(1, nrows, 1) threadsPerThreadgroup:MTLSizeMake(ne00, 1, 1)];
  2050. } break;
  2051. case GGML_OP_LEAKY_RELU:
  2052. {
  2053. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  2054. float slope;
  2055. memcpy(&slope, dst->op_params, sizeof(float));
  2056. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32].pipeline;
  2057. [encoder setComputePipelineState:pipeline];
  2058. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  2059. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  2060. [encoder setBytes:&slope length:sizeof(slope) atIndex:2];
  2061. const int64_t n = ggml_nelements(dst);
  2062. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  2063. } break;
  2064. case GGML_OP_DUP:
  2065. case GGML_OP_CPY:
  2066. case GGML_OP_CONT:
  2067. {
  2068. GGML_ASSERT(ne00 % ggml_blck_size(src0->type) == 0);
  2069. int nth = MIN(1024, ne00/ggml_blck_size(src0->type));
  2070. id<MTLComputePipelineState> pipeline = nil;
  2071. switch (src0t) {
  2072. case GGML_TYPE_F32:
  2073. {
  2074. GGML_ASSERT(ne0 % ggml_blck_size(dst->type) == 0);
  2075. switch (dstt) {
  2076. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_F16].pipeline; break;
  2077. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_F32].pipeline; break;
  2078. case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0].pipeline; break;
  2079. case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_0].pipeline; break;
  2080. case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_1].pipeline; break;
  2081. //case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_0].pipeline; break;
  2082. //case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_1].pipeline; break;
  2083. default: GGML_ASSERT(false && "not implemented");
  2084. };
  2085. } break;
  2086. case GGML_TYPE_F16:
  2087. {
  2088. switch (dstt) {
  2089. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F16_F16].pipeline; break;
  2090. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F16_F32].pipeline; break;
  2091. default: GGML_ASSERT(false && "not implemented");
  2092. };
  2093. } break;
  2094. default: GGML_ASSERT(false && "not implemented");
  2095. }
  2096. [encoder setComputePipelineState:pipeline];
  2097. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  2098. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  2099. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  2100. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  2101. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  2102. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
  2103. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
  2104. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
  2105. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
  2106. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
  2107. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
  2108. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
  2109. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
  2110. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
  2111. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
  2112. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
  2113. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
  2114. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
  2115. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  2116. } break;
  2117. default:
  2118. {
  2119. GGML_METAL_LOG_ERROR("%s: error: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
  2120. GGML_ASSERT(false);
  2121. }
  2122. }
  2123. if (should_capture) {
  2124. [encoder popDebugGroup];
  2125. }
  2126. }
  2127. [encoder endEncoding];
  2128. [command_buffer commit];
  2129. });
  2130. // Wait for completion and check status of each command buffer
  2131. // needed to detect if the device ran out-of-memory for example (#1881)
  2132. for (int i = 0; i < n_cb; ++i) {
  2133. id<MTLCommandBuffer> command_buffer = command_buffers[i];
  2134. [command_buffer waitUntilCompleted];
  2135. MTLCommandBufferStatus status = [command_buffer status];
  2136. if (status != MTLCommandBufferStatusCompleted) {
  2137. GGML_METAL_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, i, status);
  2138. return false;
  2139. }
  2140. }
  2141. if (should_capture) {
  2142. [[MTLCaptureManager sharedCaptureManager] stopCapture];
  2143. }
  2144. }
  2145. return true;
  2146. }
  2147. ////////////////////////////////////////////////////////////////////////////////
  2148. // backend interface
  2149. // default buffer
  2150. static id<MTLDevice> g_backend_device = nil;
  2151. static int g_backend_device_ref_count = 0;
  2152. static id<MTLDevice> ggml_backend_metal_get_device(void) {
  2153. if (g_backend_device == nil) {
  2154. g_backend_device = MTLCreateSystemDefaultDevice();
  2155. }
  2156. g_backend_device_ref_count++;
  2157. return g_backend_device;
  2158. }
  2159. static void ggml_backend_metal_free_device(void) {
  2160. assert(g_backend_device_ref_count > 0);
  2161. g_backend_device_ref_count--;
  2162. if (g_backend_device_ref_count == 0) {
  2163. [g_backend_device release];
  2164. g_backend_device = nil;
  2165. }
  2166. }
  2167. GGML_CALL static const char * ggml_backend_metal_buffer_get_name(ggml_backend_buffer_t buffer) {
  2168. return "Metal";
  2169. UNUSED(buffer);
  2170. }
  2171. GGML_CALL static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  2172. struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
  2173. for (int i = 0; i < ctx->n_buffers; i++) {
  2174. [ctx->buffers[i].metal release];
  2175. }
  2176. ggml_backend_metal_free_device();
  2177. if (ctx->owned) {
  2178. free(ctx->all_data);
  2179. }
  2180. free(ctx);
  2181. }
  2182. GGML_CALL static void * ggml_backend_metal_buffer_get_base(ggml_backend_buffer_t buffer) {
  2183. struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
  2184. return ctx->all_data;
  2185. }
  2186. GGML_CALL static void ggml_backend_metal_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  2187. memcpy((char *)tensor->data + offset, data, size);
  2188. UNUSED(buffer);
  2189. }
  2190. GGML_CALL static void ggml_backend_metal_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  2191. memcpy(data, (const char *)tensor->data + offset, size);
  2192. UNUSED(buffer);
  2193. }
  2194. GGML_CALL static bool ggml_backend_metal_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
  2195. if (ggml_backend_buffer_is_host(src->buffer)) {
  2196. memcpy(dst->data, src->data, ggml_nbytes(src));
  2197. return true;
  2198. }
  2199. return false;
  2200. UNUSED(buffer);
  2201. }
  2202. GGML_CALL static void ggml_backend_metal_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  2203. struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
  2204. memset(ctx->all_data, value, ctx->all_size);
  2205. }
  2206. static struct ggml_backend_buffer_i ggml_backend_metal_buffer_i = {
  2207. /* .get_name = */ ggml_backend_metal_buffer_get_name,
  2208. /* .free_buffer = */ ggml_backend_metal_buffer_free_buffer,
  2209. /* .get_base = */ ggml_backend_metal_buffer_get_base,
  2210. /* .init_tensor = */ NULL,
  2211. /* .set_tensor = */ ggml_backend_metal_buffer_set_tensor,
  2212. /* .get_tensor = */ ggml_backend_metal_buffer_get_tensor,
  2213. /* .cpy_tensor = */ ggml_backend_metal_buffer_cpy_tensor,
  2214. /* .clear = */ ggml_backend_metal_buffer_clear,
  2215. /* .reset = */ NULL,
  2216. };
  2217. // default buffer type
  2218. GGML_CALL static const char * ggml_backend_metal_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
  2219. return "Metal";
  2220. UNUSED(buft);
  2221. }
  2222. static void ggml_backend_metal_log_allocated_size(id<MTLDevice> device) {
  2223. #if TARGET_OS_OSX || (TARGET_OS_IOS && __clang_major__ >= 15)
  2224. if (@available(macOS 10.12, iOS 16.0, *)) {
  2225. GGML_METAL_LOG_INFO(", (%8.2f / %8.2f)",
  2226. device.currentAllocatedSize / 1024.0 / 1024.0,
  2227. device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
  2228. if (device.currentAllocatedSize > device.recommendedMaxWorkingSetSize) {
  2229. GGML_METAL_LOG_WARN("%s: warning: current allocated size is greater than the recommended max working set size\n", __func__);
  2230. } else {
  2231. GGML_METAL_LOG_INFO("\n");
  2232. }
  2233. } else {
  2234. GGML_METAL_LOG_INFO(", (%8.2f)\n", device.currentAllocatedSize / 1024.0 / 1024.0);
  2235. }
  2236. #endif
  2237. UNUSED(device);
  2238. }
  2239. GGML_CALL static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  2240. struct ggml_backend_metal_buffer_context * ctx = malloc(sizeof(struct ggml_backend_metal_buffer_context));
  2241. const size_t size_page = sysconf(_SC_PAGESIZE);
  2242. size_t size_aligned = size;
  2243. if ((size_aligned % size_page) != 0) {
  2244. size_aligned += (size_page - (size_aligned % size_page));
  2245. }
  2246. id<MTLDevice> device = ggml_backend_metal_get_device();
  2247. ctx->all_data = ggml_metal_host_malloc(size_aligned);
  2248. ctx->all_size = size_aligned;
  2249. ctx->owned = true;
  2250. ctx->n_buffers = 1;
  2251. ctx->buffers[0].data = ctx->all_data;
  2252. ctx->buffers[0].size = size;
  2253. ctx->buffers[0].metal = [device newBufferWithBytesNoCopy:ctx->all_data
  2254. length:size_aligned
  2255. options:MTLResourceStorageModeShared
  2256. deallocator:nil];
  2257. if (ctx->buffers[0].metal == nil) {
  2258. GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0);
  2259. free(ctx);
  2260. ggml_backend_metal_free_device();
  2261. return NULL;
  2262. }
  2263. GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB", __func__, size_aligned / 1024.0 / 1024.0);
  2264. ggml_backend_metal_log_allocated_size(device);
  2265. return ggml_backend_buffer_init(buft, ggml_backend_metal_buffer_i, ctx, size);
  2266. }
  2267. GGML_CALL static size_t ggml_backend_metal_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  2268. return 32;
  2269. UNUSED(buft);
  2270. }
  2271. GGML_CALL static size_t ggml_backend_metal_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) {
  2272. id<MTLDevice> device = ggml_backend_metal_get_device();
  2273. size_t max_size = device.maxBufferLength;
  2274. ggml_backend_metal_free_device();
  2275. return max_size;
  2276. UNUSED(buft);
  2277. }
  2278. GGML_CALL static bool ggml_backend_metal_buffer_type_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) {
  2279. return ggml_backend_is_metal(backend) || ggml_backend_is_cpu(backend);
  2280. UNUSED(buft);
  2281. }
  2282. GGML_CALL static bool ggml_backend_metal_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
  2283. return true;
  2284. UNUSED(buft);
  2285. }
  2286. GGML_CALL ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void) {
  2287. static struct ggml_backend_buffer_type ggml_backend_buffer_type_metal = {
  2288. /* .iface = */ {
  2289. /* .get_name = */ ggml_backend_metal_buffer_type_get_name,
  2290. /* .alloc_buffer = */ ggml_backend_metal_buffer_type_alloc_buffer,
  2291. /* .get_alignment = */ ggml_backend_metal_buffer_type_get_alignment,
  2292. /* .get_max_size = */ ggml_backend_metal_buffer_type_get_max_size,
  2293. /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
  2294. /* .supports_backend = */ ggml_backend_metal_buffer_type_supports_backend,
  2295. /* .is_host = */ ggml_backend_metal_buffer_type_is_host,
  2296. },
  2297. /* .context = */ NULL,
  2298. };
  2299. return &ggml_backend_buffer_type_metal;
  2300. }
  2301. // buffer from ptr
  2302. GGML_CALL ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size) {
  2303. struct ggml_backend_metal_buffer_context * ctx = malloc(sizeof(struct ggml_backend_metal_buffer_context));
  2304. ctx->all_data = data;
  2305. ctx->all_size = size;
  2306. ctx->owned = false;
  2307. ctx->n_buffers = 0;
  2308. const size_t size_page = sysconf(_SC_PAGESIZE);
  2309. // page-align the data ptr
  2310. {
  2311. const uintptr_t offs = (uintptr_t) data % size_page;
  2312. data = (void *) ((char *) data - offs);
  2313. size += offs;
  2314. }
  2315. size_t size_aligned = size;
  2316. if ((size_aligned % size_page) != 0) {
  2317. size_aligned += (size_page - (size_aligned % size_page));
  2318. }
  2319. id<MTLDevice> device = ggml_backend_metal_get_device();
  2320. // the buffer fits into the max buffer size allowed by the device
  2321. if (size_aligned <= device.maxBufferLength) {
  2322. ctx->buffers[ctx->n_buffers].data = data;
  2323. ctx->buffers[ctx->n_buffers].size = size;
  2324. ctx->buffers[ctx->n_buffers].metal = [device newBufferWithBytesNoCopy:data length:size_aligned options:MTLResourceStorageModeShared deallocator:nil];
  2325. if (ctx->buffers[ctx->n_buffers].metal == nil) {
  2326. GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0);
  2327. return false;
  2328. }
  2329. GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB", __func__, size_aligned / 1024.0 / 1024.0);
  2330. ++ctx->n_buffers;
  2331. } else {
  2332. // this overlap between the views will guarantee that the tensor with the maximum size will fully fit into
  2333. // one of the views
  2334. const size_t size_ovlp = ((max_size + size_page - 1) / size_page + 1) * size_page; // round-up 2 pages just in case
  2335. const size_t size_step = device.maxBufferLength - size_ovlp;
  2336. const size_t size_view = device.maxBufferLength;
  2337. for (size_t i = 0; i < size; i += size_step) {
  2338. const size_t size_step_aligned = (i + size_view <= size) ? size_view : (size_aligned - i);
  2339. ctx->buffers[ctx->n_buffers].data = (void *) ((uint8_t *) data + i);
  2340. ctx->buffers[ctx->n_buffers].size = size_step_aligned;
  2341. ctx->buffers[ctx->n_buffers].metal = [device newBufferWithBytesNoCopy:(void *) ((uint8_t *) data + i) length:size_step_aligned options:MTLResourceStorageModeShared deallocator:nil];
  2342. if (ctx->buffers[ctx->n_buffers].metal == nil) {
  2343. GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_step_aligned / 1024.0 / 1024.0);
  2344. return false;
  2345. }
  2346. GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB, offs = %12ld", __func__, size_step_aligned / 1024.0 / 1024.0, i);
  2347. if (i + size_step < size) {
  2348. GGML_METAL_LOG_INFO("\n");
  2349. }
  2350. ++ctx->n_buffers;
  2351. }
  2352. }
  2353. ggml_backend_metal_log_allocated_size(device);
  2354. return ggml_backend_buffer_init(ggml_backend_metal_buffer_type(), ggml_backend_metal_buffer_i, ctx, size);
  2355. }
  2356. // backend
  2357. GGML_CALL static const char * ggml_backend_metal_name(ggml_backend_t backend) {
  2358. return "Metal";
  2359. UNUSED(backend);
  2360. }
  2361. GGML_CALL static void ggml_backend_metal_free(ggml_backend_t backend) {
  2362. struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context;
  2363. ggml_metal_free(ctx);
  2364. free(backend);
  2365. }
  2366. GGML_CALL static ggml_backend_buffer_type_t ggml_backend_metal_get_default_buffer_type(ggml_backend_t backend) {
  2367. return ggml_backend_metal_buffer_type();
  2368. UNUSED(backend);
  2369. }
  2370. GGML_CALL static bool ggml_backend_metal_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
  2371. struct ggml_metal_context * metal_ctx = (struct ggml_metal_context *)backend->context;
  2372. return ggml_metal_graph_compute(metal_ctx, cgraph);
  2373. }
  2374. GGML_CALL static bool ggml_backend_metal_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
  2375. struct ggml_metal_context * metal_ctx = (struct ggml_metal_context *)backend->context;
  2376. return ggml_metal_supports_op(metal_ctx, op);
  2377. }
  2378. static struct ggml_backend_i ggml_backend_metal_i = {
  2379. /* .get_name = */ ggml_backend_metal_name,
  2380. /* .free = */ ggml_backend_metal_free,
  2381. /* .get_default_buffer_type = */ ggml_backend_metal_get_default_buffer_type,
  2382. /* .set_tensor_async = */ NULL,
  2383. /* .get_tensor_async = */ NULL,
  2384. /* .cpy_tensor_async = */ NULL,
  2385. /* .synchronize = */ NULL,
  2386. /* .graph_plan_create = */ NULL,
  2387. /* .graph_plan_free = */ NULL,
  2388. /* .graph_plan_compute = */ NULL,
  2389. /* .graph_compute = */ ggml_backend_metal_graph_compute,
  2390. /* .supports_op = */ ggml_backend_metal_supports_op,
  2391. };
  2392. void ggml_backend_metal_log_set_callback(ggml_log_callback log_callback, void * user_data) {
  2393. ggml_metal_log_callback = log_callback;
  2394. ggml_metal_log_user_data = user_data;
  2395. }
  2396. static ggml_guid_t ggml_backend_metal_guid(void) {
  2397. static ggml_guid guid = { 0x81, 0xa1, 0x8b, 0x1e, 0x71, 0xec, 0x79, 0xed, 0x2b, 0x85, 0xdc, 0x8a, 0x61, 0x98, 0x30, 0xe6 };
  2398. return &guid;
  2399. }
  2400. ggml_backend_t ggml_backend_metal_init(void) {
  2401. struct ggml_metal_context * ctx = ggml_metal_init(GGML_DEFAULT_N_THREADS);
  2402. if (ctx == NULL) {
  2403. return NULL;
  2404. }
  2405. ggml_backend_t metal_backend = malloc(sizeof(struct ggml_backend));
  2406. *metal_backend = (struct ggml_backend) {
  2407. /* .guid = */ ggml_backend_metal_guid(),
  2408. /* .interface = */ ggml_backend_metal_i,
  2409. /* .context = */ ctx,
  2410. };
  2411. return metal_backend;
  2412. }
  2413. bool ggml_backend_is_metal(ggml_backend_t backend) {
  2414. return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_metal_guid());
  2415. }
  2416. void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb) {
  2417. GGML_ASSERT(ggml_backend_is_metal(backend));
  2418. struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context;
  2419. ctx->n_cb = MIN(n_cb, GGML_METAL_MAX_BUFFERS);
  2420. }
  2421. bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family) {
  2422. GGML_ASSERT(ggml_backend_is_metal(backend));
  2423. struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context;
  2424. return [ctx->device supportsFamily:(MTLGPUFamilyApple1 + family - 1)];
  2425. }
  2426. void ggml_backend_metal_capture_next_compute(ggml_backend_t backend) {
  2427. GGML_ASSERT(ggml_backend_is_metal(backend));
  2428. struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context;
  2429. ctx->should_capture_next_compute = true;
  2430. }
  2431. GGML_CALL ggml_backend_t ggml_backend_reg_metal_init(const char * params, void * user_data); // silence warning
  2432. GGML_CALL ggml_backend_t ggml_backend_reg_metal_init(const char * params, void * user_data) {
  2433. return ggml_backend_metal_init();
  2434. GGML_UNUSED(params);
  2435. GGML_UNUSED(user_data);
  2436. }