ggml-metal.m 153 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750
  1. #import "ggml-metal.h"
  2. #import "ggml-backend-impl.h"
  3. #import "ggml.h"
  4. #import <Foundation/Foundation.h>
  5. #import <Metal/Metal.h>
  6. #undef MIN
  7. #undef MAX
  8. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  9. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  10. #ifdef GGML_METAL_NDEBUG
  11. #define GGML_METAL_LOG_INFO(...)
  12. #define GGML_METAL_LOG_WARN(...)
  13. #define GGML_METAL_LOG_ERROR(...)
  14. #else
  15. #define GGML_METAL_LOG_INFO(...) ggml_metal_log(GGML_LOG_LEVEL_INFO, __VA_ARGS__)
  16. #define GGML_METAL_LOG_WARN(...) ggml_metal_log(GGML_LOG_LEVEL_WARN, __VA_ARGS__)
  17. #define GGML_METAL_LOG_ERROR(...) ggml_metal_log(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
  18. #endif
  19. #define UNUSED(x) (void)(x)
  20. struct ggml_metal_kernel {
  21. id<MTLComputePipelineState> pipeline;
  22. };
  23. enum ggml_metal_kernel_type {
  24. GGML_METAL_KERNEL_TYPE_ADD,
  25. GGML_METAL_KERNEL_TYPE_ADD_ROW,
  26. GGML_METAL_KERNEL_TYPE_MUL,
  27. GGML_METAL_KERNEL_TYPE_MUL_ROW,
  28. GGML_METAL_KERNEL_TYPE_DIV,
  29. GGML_METAL_KERNEL_TYPE_DIV_ROW,
  30. GGML_METAL_KERNEL_TYPE_SCALE,
  31. GGML_METAL_KERNEL_TYPE_SCALE_4,
  32. GGML_METAL_KERNEL_TYPE_TANH,
  33. GGML_METAL_KERNEL_TYPE_RELU,
  34. GGML_METAL_KERNEL_TYPE_GELU,
  35. GGML_METAL_KERNEL_TYPE_GELU_QUICK,
  36. GGML_METAL_KERNEL_TYPE_SILU,
  37. GGML_METAL_KERNEL_TYPE_SOFT_MAX,
  38. GGML_METAL_KERNEL_TYPE_SOFT_MAX_4,
  39. GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF,
  40. GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8,
  41. GGML_METAL_KERNEL_TYPE_GET_ROWS_F32,
  42. GGML_METAL_KERNEL_TYPE_GET_ROWS_F16,
  43. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_0,
  44. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_1,
  45. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0,
  46. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1,
  47. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0,
  48. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K,
  49. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K,
  50. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K,
  51. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_K,
  52. GGML_METAL_KERNEL_TYPE_GET_ROWS_Q6_K,
  53. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XXS,
  54. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XS,
  55. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_XXS,
  56. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_S,
  57. GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL,
  58. GGML_METAL_KERNEL_TYPE_GET_ROWS_I32,
  59. GGML_METAL_KERNEL_TYPE_RMS_NORM,
  60. GGML_METAL_KERNEL_TYPE_GROUP_NORM,
  61. GGML_METAL_KERNEL_TYPE_NORM,
  62. GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32,
  63. GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16,
  64. GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32,
  65. GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW,
  66. GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4,
  67. GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32,
  68. GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32,
  69. GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32,
  70. GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32,
  71. GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32,
  72. GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32,
  73. GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32,
  74. GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32,
  75. GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32,
  76. GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32,
  77. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32,
  78. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32,
  79. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32,
  80. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32,
  81. GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32,
  82. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F32_F32,
  83. //GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F16,
  84. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32,
  85. //GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_1ROW,
  86. //GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_L4,
  87. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_0_F32,
  88. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_1_F32,
  89. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32,
  90. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32,
  91. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32,
  92. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32,
  93. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32,
  94. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32,
  95. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_K_F32,
  96. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q6_K_F32,
  97. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XXS_F32,
  98. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XS_F32,
  99. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_XXS_F32,
  100. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_S_F32,
  101. GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32,
  102. GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32,
  103. GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32,
  104. GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32,
  105. GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32,
  106. GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32,
  107. GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32,
  108. GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32,
  109. GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32,
  110. GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32,
  111. GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32,
  112. GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32,
  113. GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32,
  114. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32,
  115. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32,
  116. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32,
  117. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32,
  118. GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32,
  119. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F32,
  120. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F16_F32,
  121. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_0_F32,
  122. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_1_F32,
  123. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F32,
  124. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F32,
  125. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F32,
  126. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F32,
  127. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F32,
  128. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F32,
  129. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_K_F32,
  130. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q6_K_F32,
  131. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XXS_F32,
  132. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XS_F32,
  133. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_XXS_F32,
  134. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_S_F32,
  135. GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32,
  136. GGML_METAL_KERNEL_TYPE_ROPE_F32,
  137. GGML_METAL_KERNEL_TYPE_ROPE_F16,
  138. GGML_METAL_KERNEL_TYPE_ALIBI_F32,
  139. GGML_METAL_KERNEL_TYPE_IM2COL_F16,
  140. GGML_METAL_KERNEL_TYPE_IM2COL_F32,
  141. GGML_METAL_KERNEL_TYPE_UPSCALE_F32,
  142. GGML_METAL_KERNEL_TYPE_PAD_F32,
  143. GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC,
  144. GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC,
  145. GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32,
  146. GGML_METAL_KERNEL_TYPE_CPY_F32_F16,
  147. GGML_METAL_KERNEL_TYPE_CPY_F32_F32,
  148. GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0,
  149. GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_0,
  150. GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_1,
  151. //GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_0,
  152. //GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_1,
  153. GGML_METAL_KERNEL_TYPE_CPY_F16_F16,
  154. GGML_METAL_KERNEL_TYPE_CPY_F16_F32,
  155. GGML_METAL_KERNEL_TYPE_CONCAT,
  156. GGML_METAL_KERNEL_TYPE_SQR,
  157. GGML_METAL_KERNEL_TYPE_SUM_ROWS,
  158. GGML_METAL_KERNEL_TYPE_COUNT
  159. };
  160. struct ggml_metal_context {
  161. int n_cb;
  162. id<MTLDevice> device;
  163. id<MTLCommandQueue> queue;
  164. dispatch_queue_t d_queue;
  165. struct ggml_metal_kernel kernels[GGML_METAL_KERNEL_TYPE_COUNT];
  166. bool support_simdgroup_reduction;
  167. bool support_simdgroup_mm;
  168. bool should_capture_next_compute;
  169. };
  170. // MSL code
  171. // TODO: move the contents here when ready
  172. // for now it is easier to work in a separate file
  173. // static NSString * const msl_library_source = @"see metal.metal";
  174. // Here to assist with NSBundle Path Hack
  175. @interface GGMLMetalClass : NSObject
  176. @end
  177. @implementation GGMLMetalClass
  178. @end
  179. static void ggml_metal_default_log_callback(enum ggml_log_level level, const char * msg, void * user_data) {
  180. fprintf(stderr, "%s", msg);
  181. UNUSED(level);
  182. UNUSED(user_data);
  183. }
  184. ggml_log_callback ggml_metal_log_callback = ggml_metal_default_log_callback;
  185. void * ggml_metal_log_user_data = NULL;
  186. GGML_ATTRIBUTE_FORMAT(2, 3)
  187. static void ggml_metal_log(enum ggml_log_level level, const char * format, ...){
  188. if (ggml_metal_log_callback != NULL) {
  189. va_list args;
  190. va_start(args, format);
  191. char buffer[128];
  192. int len = vsnprintf(buffer, 128, format, args);
  193. if (len < 128) {
  194. ggml_metal_log_callback(level, buffer, ggml_metal_log_user_data);
  195. } else {
  196. char* buffer2 = malloc(len+1);
  197. va_end(args);
  198. va_start(args, format);
  199. vsnprintf(buffer2, len+1, format, args);
  200. buffer2[len] = 0;
  201. ggml_metal_log_callback(level, buffer2, ggml_metal_log_user_data);
  202. free(buffer2);
  203. }
  204. va_end(args);
  205. }
  206. }
  207. static void * ggml_metal_host_malloc(size_t n) {
  208. void * data = NULL;
  209. const int result = posix_memalign((void **) &data, sysconf(_SC_PAGESIZE), n);
  210. if (result != 0) {
  211. GGML_METAL_LOG_ERROR("%s: error: posix_memalign failed\n", __func__);
  212. return NULL;
  213. }
  214. return data;
  215. }
  216. static struct ggml_metal_context * ggml_metal_init(int n_cb) {
  217. GGML_METAL_LOG_INFO("%s: allocating\n", __func__);
  218. #if TARGET_OS_OSX && !GGML_METAL_NDEBUG
  219. // Show all the Metal device instances in the system
  220. NSArray * devices = MTLCopyAllDevices();
  221. for (id<MTLDevice> device in devices) {
  222. GGML_METAL_LOG_INFO("%s: found device: %s\n", __func__, [[device name] UTF8String]);
  223. }
  224. [devices release]; // since it was created by a *Copy* C method
  225. #endif
  226. // Pick and show default Metal device
  227. id<MTLDevice> device = MTLCreateSystemDefaultDevice();
  228. GGML_METAL_LOG_INFO("%s: picking default device: %s\n", __func__, [[device name] UTF8String]);
  229. // Configure context
  230. struct ggml_metal_context * ctx = malloc(sizeof(struct ggml_metal_context));
  231. ctx->device = device;
  232. ctx->n_cb = MIN(n_cb, GGML_METAL_MAX_BUFFERS);
  233. ctx->queue = [ctx->device newCommandQueue];
  234. ctx->d_queue = dispatch_queue_create("ggml-metal", DISPATCH_QUEUE_CONCURRENT);
  235. id<MTLLibrary> metal_library;
  236. // load library
  237. {
  238. NSBundle * bundle = nil;
  239. #ifdef SWIFT_PACKAGE
  240. bundle = SWIFTPM_MODULE_BUNDLE;
  241. #else
  242. bundle = [NSBundle bundleForClass:[GGMLMetalClass class]];
  243. #endif
  244. NSError * error = nil;
  245. NSString * libPath = [bundle pathForResource:@"default" ofType:@"metallib"];
  246. if (libPath != nil) {
  247. // pre-compiled library found
  248. NSURL * libURL = [NSURL fileURLWithPath:libPath];
  249. GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [libPath UTF8String]);
  250. metal_library = [ctx->device newLibraryWithURL:libURL error:&error];
  251. if (error) {
  252. GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
  253. return NULL;
  254. }
  255. } else {
  256. #if GGML_METAL_EMBED_LIBRARY
  257. GGML_METAL_LOG_INFO("%s: using embedded metal library\n", __func__);
  258. extern const char ggml_metallib_start[];
  259. extern const char ggml_metallib_end[];
  260. NSString * src = [[NSString alloc] initWithBytes:ggml_metallib_start length:(ggml_metallib_end-ggml_metallib_start) encoding:NSUTF8StringEncoding];
  261. #else
  262. GGML_METAL_LOG_INFO("%s: default.metallib not found, loading from source\n", __func__);
  263. NSString * sourcePath;
  264. NSString * ggmlMetalPathResources = [[NSProcessInfo processInfo].environment objectForKey:@"GGML_METAL_PATH_RESOURCES"];
  265. GGML_METAL_LOG_INFO("%s: GGML_METAL_PATH_RESOURCES = %s\n", __func__, ggmlMetalPathResources ? [ggmlMetalPathResources UTF8String] : "nil");
  266. if (ggmlMetalPathResources) {
  267. sourcePath = [ggmlMetalPathResources stringByAppendingPathComponent:@"ggml-metal.metal"];
  268. } else {
  269. sourcePath = [bundle pathForResource:@"ggml-metal" ofType:@"metal"];
  270. }
  271. if (sourcePath == nil) {
  272. GGML_METAL_LOG_WARN("%s: error: could not use bundle path to find ggml-metal.metal, falling back to trying cwd\n", __func__);
  273. sourcePath = @"ggml-metal.metal";
  274. }
  275. GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [sourcePath UTF8String]);
  276. NSString * src = [NSString stringWithContentsOfFile:sourcePath encoding:NSUTF8StringEncoding error:&error];
  277. if (error) {
  278. GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
  279. return NULL;
  280. }
  281. #endif
  282. @autoreleasepool {
  283. // dictionary of preprocessor macros
  284. NSMutableDictionary * prep = [NSMutableDictionary dictionary];
  285. #ifdef GGML_QKK_64
  286. prep[@"QK_K"] = @(64);
  287. #endif
  288. MTLCompileOptions* options = [MTLCompileOptions new];
  289. options.preprocessorMacros = prep;
  290. //[options setFastMathEnabled:false];
  291. metal_library = [ctx->device newLibraryWithSource:src options:options error:&error];
  292. if (error) {
  293. GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
  294. return NULL;
  295. }
  296. }
  297. }
  298. }
  299. // print MTL GPU family:
  300. GGML_METAL_LOG_INFO("%s: GPU name: %s\n", __func__, [[ctx->device name] UTF8String]);
  301. const NSInteger MTLGPUFamilyMetal3 = 5001;
  302. // determine max supported GPU family
  303. // https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf
  304. // https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
  305. {
  306. for (int i = MTLGPUFamilyApple1 + 20; i >= MTLGPUFamilyApple1; --i) {
  307. if ([ctx->device supportsFamily:i]) {
  308. GGML_METAL_LOG_INFO("%s: GPU family: MTLGPUFamilyApple%d (%d)\n", __func__, i - (int) MTLGPUFamilyApple1 + 1, i);
  309. break;
  310. }
  311. }
  312. for (int i = MTLGPUFamilyCommon1 + 5; i >= MTLGPUFamilyCommon1; --i) {
  313. if ([ctx->device supportsFamily:i]) {
  314. GGML_METAL_LOG_INFO("%s: GPU family: MTLGPUFamilyCommon%d (%d)\n", __func__, i - (int) MTLGPUFamilyCommon1 + 1, i);
  315. break;
  316. }
  317. }
  318. for (int i = MTLGPUFamilyMetal3 + 5; i >= MTLGPUFamilyMetal3; --i) {
  319. if ([ctx->device supportsFamily:i]) {
  320. GGML_METAL_LOG_INFO("%s: GPU family: MTLGPUFamilyMetal%d (%d)\n", __func__, i - (int) MTLGPUFamilyMetal3 + 3, i);
  321. break;
  322. }
  323. }
  324. }
  325. ctx->support_simdgroup_reduction = [ctx->device supportsFamily:MTLGPUFamilyApple7];
  326. ctx->support_simdgroup_reduction |= [ctx->device supportsFamily:MTLGPUFamilyMetal3];
  327. ctx->support_simdgroup_mm = [ctx->device supportsFamily:MTLGPUFamilyApple7];
  328. GGML_METAL_LOG_INFO("%s: simdgroup reduction support = %s\n", __func__, ctx->support_simdgroup_reduction ? "true" : "false");
  329. GGML_METAL_LOG_INFO("%s: simdgroup matrix mul. support = %s\n", __func__, ctx->support_simdgroup_mm ? "true" : "false");
  330. GGML_METAL_LOG_INFO("%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false");
  331. ctx->should_capture_next_compute = false;
  332. #if TARGET_OS_OSX || (TARGET_OS_IOS && __clang_major__ >= 15)
  333. if (@available(macOS 10.12, iOS 16.0, *)) {
  334. GGML_METAL_LOG_INFO("%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1e6);
  335. }
  336. #elif TARGET_OS_OSX
  337. if (ctx->device.maxTransferRate != 0) {
  338. GGML_METAL_LOG_INFO("%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1e6);
  339. } else {
  340. GGML_METAL_LOG_INFO("%s: maxTransferRate = built-in GPU\n", __func__);
  341. }
  342. #endif
  343. // load kernels
  344. {
  345. NSError * error = nil;
  346. for (int i = 0; i < GGML_METAL_KERNEL_TYPE_COUNT; ++i) {
  347. ctx->kernels[i].pipeline = nil;
  348. }
  349. /*
  350. GGML_METAL_LOG_INFO("%s: loaded %-32s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) kernel->pipeline, \
  351. (int) kernel->pipeline.maxTotalThreadsPerThreadgroup, \
  352. (int) kernel->pipeline.threadExecutionWidth); \
  353. */
  354. #define GGML_METAL_ADD_KERNEL(e, name, supported) \
  355. if (supported) { \
  356. struct ggml_metal_kernel * kernel = &ctx->kernels[e]; \
  357. id<MTLFunction> metal_function = [metal_library newFunctionWithName:@"kernel_"#name]; \
  358. kernel->pipeline = [ctx->device newComputePipelineStateWithFunction:metal_function error:&error]; \
  359. [metal_function release]; \
  360. if (error) { \
  361. GGML_METAL_LOG_ERROR("%s: error: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \
  362. [metal_library release]; \
  363. return NULL; \
  364. } \
  365. } else { \
  366. GGML_METAL_LOG_WARN("%s: skipping %-32s (not supported)\n", __func__, "kernel_"#name); \
  367. }
  368. // simd_sum and simd_max requires MTLGPUFamilyApple7
  369. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD, add, true);
  370. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ADD_ROW, add_row, true);
  371. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL, mul, true);
  372. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_ROW, mul_row, true);
  373. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIV, div, true);
  374. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIV_ROW, div_row, true);
  375. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SCALE, scale, true);
  376. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SCALE_4, scale_4, true);
  377. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_TANH, tanh, true);
  378. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RELU, relu, true);
  379. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU, gelu, true);
  380. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_QUICK, gelu_quick, true);
  381. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SILU, silu, true);
  382. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX, soft_max, ctx->support_simdgroup_reduction);
  383. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SOFT_MAX_4, soft_max_4, ctx->support_simdgroup_reduction);
  384. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF, diag_mask_inf, true);
  385. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8, diag_mask_inf_8, true);
  386. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_F32, get_rows_f32, true);
  387. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_F16, get_rows_f16, true);
  388. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_0, get_rows_q4_0, true);
  389. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_1, get_rows_q4_1, true);
  390. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0, get_rows_q5_0, true);
  391. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1, get_rows_q5_1, true);
  392. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0, get_rows_q8_0, true);
  393. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K, get_rows_q2_K, true);
  394. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K, get_rows_q3_K, true);
  395. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K, get_rows_q4_K, true);
  396. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_K, get_rows_q5_K, true);
  397. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_Q6_K, get_rows_q6_K, true);
  398. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XXS, get_rows_iq2_xxs, true);
  399. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XS, get_rows_iq2_xs, true);
  400. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_XXS, get_rows_iq3_xxs, true);
  401. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_S, get_rows_iq1_s, true);
  402. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL, get_rows_iq4_nl, true);
  403. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_I32, get_rows_i32, true);
  404. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RMS_NORM, rms_norm, ctx->support_simdgroup_reduction);
  405. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GROUP_NORM, group_norm, ctx->support_simdgroup_reduction);
  406. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_NORM, norm, true);
  407. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32, mul_mv_f32_f32, ctx->support_simdgroup_reduction);
  408. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16, mul_mv_f16_f16, ctx->support_simdgroup_reduction);
  409. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32, mul_mv_f16_f32, ctx->support_simdgroup_reduction);
  410. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW, mul_mv_f16_f32_1row, ctx->support_simdgroup_reduction);
  411. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4, mul_mv_f16_f32_l4, ctx->support_simdgroup_reduction);
  412. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32, mul_mv_q4_0_f32, ctx->support_simdgroup_reduction);
  413. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32, mul_mv_q4_1_f32, ctx->support_simdgroup_reduction);
  414. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32, mul_mv_q5_0_f32, ctx->support_simdgroup_reduction);
  415. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32, mul_mv_q5_1_f32, ctx->support_simdgroup_reduction);
  416. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32, mul_mv_q8_0_f32, ctx->support_simdgroup_reduction);
  417. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32, mul_mv_q2_K_f32, ctx->support_simdgroup_reduction);
  418. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32, mul_mv_q3_K_f32, ctx->support_simdgroup_reduction);
  419. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32, mul_mv_q4_K_f32, ctx->support_simdgroup_reduction);
  420. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32, mul_mv_q5_K_f32, ctx->support_simdgroup_reduction);
  421. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32, mul_mv_q6_K_f32, ctx->support_simdgroup_reduction);
  422. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32, mul_mv_iq2_xxs_f32, ctx->support_simdgroup_reduction);
  423. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32, mul_mv_iq2_xs_f32, ctx->support_simdgroup_reduction);
  424. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32, mul_mv_iq3_xxs_f32, ctx->support_simdgroup_reduction);
  425. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32, mul_mv_iq1_s_f32, ctx->support_simdgroup_reduction);
  426. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32, mul_mv_iq4_nl_f32, ctx->support_simdgroup_reduction);
  427. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F32_F32, mul_mv_id_f32_f32, ctx->support_simdgroup_reduction);
  428. //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F16, mul_mv_id_f16_f16, ctx->support_simdgroup_reduction);
  429. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32, mul_mv_id_f16_f32, ctx->support_simdgroup_reduction);
  430. //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_1ROW, mul_mv_id_f16_f32_1row, ctx->support_simdgroup_reduction);
  431. //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32_L4, mul_mv_id_f16_f32_l4, ctx->support_simdgroup_reduction);
  432. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_0_F32, mul_mv_id_q4_0_f32, ctx->support_simdgroup_reduction);
  433. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_1_F32, mul_mv_id_q4_1_f32, ctx->support_simdgroup_reduction);
  434. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32, mul_mv_id_q5_0_f32, ctx->support_simdgroup_reduction);
  435. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32, mul_mv_id_q5_1_f32, ctx->support_simdgroup_reduction);
  436. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32, mul_mv_id_q8_0_f32, ctx->support_simdgroup_reduction);
  437. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32, mul_mv_id_q2_K_f32, ctx->support_simdgroup_reduction);
  438. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32, mul_mv_id_q3_K_f32, ctx->support_simdgroup_reduction);
  439. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32, mul_mv_id_q4_K_f32, ctx->support_simdgroup_reduction);
  440. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_K_F32, mul_mv_id_q5_K_f32, ctx->support_simdgroup_reduction);
  441. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q6_K_F32, mul_mv_id_q6_K_f32, ctx->support_simdgroup_reduction);
  442. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XXS_F32, mul_mv_id_iq2_xxs_f32, ctx->support_simdgroup_reduction);
  443. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XS_F32, mul_mv_id_iq2_xs_f32, ctx->support_simdgroup_reduction);
  444. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_XXS_F32, mul_mv_id_iq3_xxs_f32, ctx->support_simdgroup_reduction);
  445. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_S_F32, mul_mv_id_iq1_s_f32, ctx->support_simdgroup_reduction);
  446. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32, mul_mv_id_iq4_nl_f32, ctx->support_simdgroup_reduction);
  447. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32, mul_mm_f32_f32, ctx->support_simdgroup_mm);
  448. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32, mul_mm_f16_f32, ctx->support_simdgroup_mm);
  449. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32, mul_mm_q4_0_f32, ctx->support_simdgroup_mm);
  450. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32, mul_mm_q4_1_f32, ctx->support_simdgroup_mm);
  451. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32, mul_mm_q5_0_f32, ctx->support_simdgroup_mm);
  452. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32, mul_mm_q5_1_f32, ctx->support_simdgroup_mm);
  453. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32, mul_mm_q8_0_f32, ctx->support_simdgroup_mm);
  454. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32, mul_mm_q2_K_f32, ctx->support_simdgroup_mm);
  455. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32, mul_mm_q3_K_f32, ctx->support_simdgroup_mm);
  456. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32, mul_mm_q4_K_f32, ctx->support_simdgroup_mm);
  457. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32, mul_mm_q5_K_f32, ctx->support_simdgroup_mm);
  458. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32, mul_mm_q6_K_f32, ctx->support_simdgroup_mm);
  459. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32, mul_mm_iq2_xxs_f32, ctx->support_simdgroup_mm);
  460. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32, mul_mm_iq2_xs_f32, ctx->support_simdgroup_mm);
  461. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32, mul_mm_iq3_xxs_f32, ctx->support_simdgroup_mm);
  462. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32, mul_mm_iq1_s_f32, ctx->support_simdgroup_mm);
  463. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32, mul_mm_iq4_nl_f32, ctx->support_simdgroup_mm);
  464. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F32, mul_mm_id_f32_f32, ctx->support_simdgroup_mm);
  465. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F16_F32, mul_mm_id_f16_f32, ctx->support_simdgroup_mm);
  466. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_0_F32, mul_mm_id_q4_0_f32, ctx->support_simdgroup_mm);
  467. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_1_F32, mul_mm_id_q4_1_f32, ctx->support_simdgroup_mm);
  468. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F32, mul_mm_id_q5_0_f32, ctx->support_simdgroup_mm);
  469. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F32, mul_mm_id_q5_1_f32, ctx->support_simdgroup_mm);
  470. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F32, mul_mm_id_q8_0_f32, ctx->support_simdgroup_mm);
  471. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F32, mul_mm_id_q2_K_f32, ctx->support_simdgroup_mm);
  472. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F32, mul_mm_id_q3_K_f32, ctx->support_simdgroup_mm);
  473. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F32, mul_mm_id_q4_K_f32, ctx->support_simdgroup_mm);
  474. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_K_F32, mul_mm_id_q5_K_f32, ctx->support_simdgroup_mm);
  475. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q6_K_F32, mul_mm_id_q6_K_f32, ctx->support_simdgroup_mm);
  476. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XXS_F32, mul_mm_id_iq2_xxs_f32, ctx->support_simdgroup_mm);
  477. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XS_F32, mul_mm_id_iq2_xs_f32, ctx->support_simdgroup_mm);
  478. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_XXS_F32, mul_mm_id_iq3_xxs_f32, ctx->support_simdgroup_mm);
  479. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_S_F32, mul_mm_id_iq1_s_f32, ctx->support_simdgroup_mm);
  480. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32, mul_mm_id_iq4_nl_f32, ctx->support_simdgroup_mm);
  481. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F32, rope_f32, true);
  482. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F16, rope_f16, true);
  483. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ALIBI_F32, alibi_f32, true);
  484. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F16, im2col_f16, true);
  485. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F32, im2col_f32, true);
  486. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_UPSCALE_F32, upscale_f32, true);
  487. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_PAD_F32, pad_f32, true);
  488. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC, argsort_f32_i32_asc, true);
  489. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC, argsort_f32_i32_desc, true);
  490. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32, leaky_relu_f32, true);
  491. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F16, cpy_f32_f16, true);
  492. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F32, cpy_f32_f32, true);
  493. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0, cpy_f32_q8_0, true);
  494. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_0, cpy_f32_q4_0, true);
  495. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_1, cpy_f32_q4_1, true);
  496. //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_0, cpy_f32_q5_0, true);
  497. //GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_1, cpy_f32_q5_1, true);
  498. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F16_F16, cpy_f16_f16, true);
  499. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F16_F32, cpy_f16_f32, true);
  500. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CONCAT, concat, true);
  501. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SQR, sqr, true);
  502. GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SUM_ROWS, sum_rows, true);
  503. }
  504. [metal_library release];
  505. return ctx;
  506. }
  507. static void ggml_metal_free(struct ggml_metal_context * ctx) {
  508. GGML_METAL_LOG_INFO("%s: deallocating\n", __func__);
  509. for (int i = 0; i < GGML_METAL_KERNEL_TYPE_COUNT; ++i) {
  510. [ctx->kernels[i].pipeline release];
  511. }
  512. [ctx->queue release];
  513. [ctx->device release];
  514. dispatch_release(ctx->d_queue);
  515. free(ctx);
  516. }
  517. // temporarily defined here for compatibility between ggml-backend and the old API
  518. struct ggml_backend_metal_buffer {
  519. void * data;
  520. size_t size;
  521. id<MTLBuffer> metal;
  522. };
  523. struct ggml_backend_metal_buffer_context {
  524. void * all_data;
  525. size_t all_size;
  526. bool owned;
  527. // multiple buffers are used only to avoid the maximum buffer size limitation when using mmap
  528. int n_buffers;
  529. struct ggml_backend_metal_buffer buffers[GGML_METAL_MAX_BUFFERS];
  530. };
  531. // finds the Metal buffer that contains the tensor data on the GPU device
  532. // the assumption is that there is 1-to-1 mapping between the host and device memory buffers, so we can find the
  533. // Metal buffer based on the host memory pointer
  534. //
  535. static id<MTLBuffer> ggml_metal_get_buffer(struct ggml_tensor * t, size_t * offs) {
  536. //GGML_METAL_LOG_INFO("%s: data tensor '%16s', offs_data = %8ld, offs_eval = %8ld, offs_cach = %8ld\n", __func__, t->name, offs_data, offs_eval, offs_cach);
  537. const int64_t tsize = ggml_nbytes(t);
  538. ggml_backend_buffer_t buffer = t->view_src ? t->view_src->buffer : t->buffer;
  539. struct ggml_backend_metal_buffer_context * buf_ctx = (struct ggml_backend_metal_buffer_context *) buffer->context;
  540. // find the view that contains the tensor fully
  541. for (int i = 0; i < buf_ctx->n_buffers; ++i) {
  542. const int64_t ioffs = (int64_t) t->data - (int64_t) buf_ctx->buffers[i].data;
  543. //GGML_METAL_LOG_INFO("ioffs = %10ld, tsize = %10ld, sum = %10ld, buf_ctx->buffers[%d].size = %10ld\n", ioffs, tsize, ioffs + tsize, i, buf_ctx->buffers[i].size);
  544. if (ioffs >= 0 && ioffs + tsize <= (int64_t) buf_ctx->buffers[i].size) {
  545. *offs = (size_t) ioffs;
  546. //GGML_METAL_LOG_INFO("%s: tensor '%16s', offs = %8ld\n", __func__, t->name, *offs);
  547. return buf_ctx->buffers[i].metal;
  548. }
  549. }
  550. GGML_METAL_LOG_ERROR("%s: error: tensor '%s' buffer is nil\n", __func__, t->name);
  551. return nil;
  552. }
  553. static bool ggml_metal_supports_op(const struct ggml_metal_context * ctx, const struct ggml_tensor * op) {
  554. switch (op->op) {
  555. case GGML_OP_UNARY:
  556. switch (ggml_get_unary_op(op)) {
  557. case GGML_UNARY_OP_TANH:
  558. case GGML_UNARY_OP_RELU:
  559. case GGML_UNARY_OP_GELU:
  560. case GGML_UNARY_OP_GELU_QUICK:
  561. case GGML_UNARY_OP_SILU:
  562. return true;
  563. default:
  564. return false;
  565. }
  566. case GGML_OP_NONE:
  567. case GGML_OP_RESHAPE:
  568. case GGML_OP_VIEW:
  569. case GGML_OP_TRANSPOSE:
  570. case GGML_OP_PERMUTE:
  571. case GGML_OP_CONCAT:
  572. case GGML_OP_ADD:
  573. case GGML_OP_ACC:
  574. case GGML_OP_MUL:
  575. case GGML_OP_DIV:
  576. case GGML_OP_SCALE:
  577. case GGML_OP_SQR:
  578. case GGML_OP_SUM_ROWS:
  579. return true;
  580. case GGML_OP_SOFT_MAX:
  581. case GGML_OP_RMS_NORM:
  582. case GGML_OP_GROUP_NORM:
  583. return ctx->support_simdgroup_reduction;
  584. case GGML_OP_NORM:
  585. case GGML_OP_ALIBI:
  586. case GGML_OP_ROPE:
  587. case GGML_OP_IM2COL:
  588. return true;
  589. case GGML_OP_POOL_1D:
  590. case GGML_OP_POOL_2D:
  591. return false;
  592. case GGML_OP_UPSCALE:
  593. case GGML_OP_PAD:
  594. case GGML_OP_ARGSORT:
  595. case GGML_OP_LEAKY_RELU:
  596. return true;
  597. case GGML_OP_MUL_MAT:
  598. case GGML_OP_MUL_MAT_ID:
  599. return ctx->support_simdgroup_reduction &&
  600. (op->src[0]->type != GGML_TYPE_F32 || op->src[1]->type == GGML_TYPE_F32);
  601. case GGML_OP_CPY:
  602. case GGML_OP_DUP:
  603. case GGML_OP_CONT:
  604. {
  605. switch (op->src[0]->type) {
  606. case GGML_TYPE_F32:
  607. switch (op->type) {
  608. case GGML_TYPE_F16:
  609. case GGML_TYPE_F32:
  610. case GGML_TYPE_Q8_0:
  611. case GGML_TYPE_Q4_0:
  612. case GGML_TYPE_Q4_1:
  613. return true;
  614. default:
  615. return false;
  616. }
  617. case GGML_TYPE_F16:
  618. switch (op->type) {
  619. case GGML_TYPE_F16:
  620. case GGML_TYPE_F32:
  621. return true;
  622. default:
  623. return false;
  624. }
  625. default:
  626. return false;
  627. };
  628. }
  629. case GGML_OP_DIAG_MASK_INF:
  630. case GGML_OP_GET_ROWS:
  631. {
  632. return op->ne[3] == 1;
  633. }
  634. default:
  635. return false;
  636. }
  637. }
  638. static bool ggml_metal_graph_compute(
  639. struct ggml_metal_context * ctx,
  640. struct ggml_cgraph * gf) {
  641. @autoreleasepool {
  642. MTLComputePassDescriptor * edesc = MTLComputePassDescriptor.computePassDescriptor;
  643. edesc.dispatchType = MTLDispatchTypeSerial;
  644. // create multiple command buffers and enqueue them
  645. // then, we encode the graph into the command buffers in parallel
  646. const int n_nodes = gf->n_nodes;
  647. const int n_cb = ctx->n_cb;
  648. const int n_nodes_per_cb = (n_nodes + n_cb - 1) / n_cb;
  649. const bool should_capture = ctx->should_capture_next_compute;
  650. if (should_capture) {
  651. ctx->should_capture_next_compute = false;
  652. MTLCaptureDescriptor * descriptor = [MTLCaptureDescriptor new];
  653. descriptor.captureObject = ctx->queue;
  654. NSError * error = nil;
  655. if (![[MTLCaptureManager sharedCaptureManager] startCaptureWithDescriptor:descriptor error:&error]) {
  656. GGML_METAL_LOG_ERROR("%s: error: unable to start capture '%s'\n", __func__, [[error localizedDescription] UTF8String]);
  657. GGML_ASSERT(!"capture failed");
  658. }
  659. }
  660. id<MTLCommandBuffer> command_buffer_builder[n_cb];
  661. for (int cb_idx = 0; cb_idx < n_cb; ++cb_idx) {
  662. id<MTLCommandBuffer> command_buffer = [ctx->queue commandBufferWithUnretainedReferences];
  663. command_buffer_builder[cb_idx] = command_buffer;
  664. // enqueue the command buffers in order to specify their execution order
  665. [command_buffer enqueue];
  666. }
  667. const id<MTLCommandBuffer> *command_buffers = command_buffer_builder;
  668. dispatch_apply(n_cb, ctx->d_queue, ^(size_t iter) {
  669. const int cb_idx = iter;
  670. size_t offs_src0 = 0;
  671. size_t offs_src1 = 0;
  672. size_t offs_src2 = 0;
  673. size_t offs_dst = 0;
  674. id<MTLCommandBuffer> command_buffer = command_buffers[cb_idx];
  675. id<MTLComputeCommandEncoder> encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
  676. const int node_start = (cb_idx + 0) * n_nodes_per_cb;
  677. const int node_end = MIN((cb_idx == n_cb - 1) ? n_nodes : (cb_idx + 1) * n_nodes_per_cb, n_nodes);
  678. for (int i = node_start; i < node_end; ++i) {
  679. if (i == -1) {
  680. [encoder memoryBarrierWithScope:MTLBarrierScopeBuffers];
  681. continue;
  682. }
  683. //GGML_METAL_LOG_INFO("%s: encoding node %3d, op = %8s\n", __func__, i, ggml_op_name(gf->nodes[i]->op));
  684. struct ggml_tensor * src0 = gf->nodes[i]->src[0];
  685. struct ggml_tensor * src1 = gf->nodes[i]->src[1];
  686. struct ggml_tensor * src2 = gf->nodes[i]->src[2];
  687. struct ggml_tensor * dst = gf->nodes[i];
  688. switch (dst->op) {
  689. case GGML_OP_NONE:
  690. case GGML_OP_RESHAPE:
  691. case GGML_OP_VIEW:
  692. case GGML_OP_TRANSPOSE:
  693. case GGML_OP_PERMUTE:
  694. {
  695. // noop -> next node
  696. } continue;
  697. default:
  698. {
  699. } break;
  700. }
  701. if (!ggml_metal_supports_op(ctx, dst)) {
  702. GGML_METAL_LOG_ERROR("%s: error: unsupported op '%s'\n", __func__, ggml_op_desc(dst));
  703. GGML_ASSERT(!"unsupported op");
  704. }
  705. if (should_capture) {
  706. [encoder pushDebugGroup:[NSString stringWithCString:ggml_op_desc(dst) encoding:NSUTF8StringEncoding]];
  707. }
  708. const int64_t ne00 = src0 ? src0->ne[0] : 0;
  709. const int64_t ne01 = src0 ? src0->ne[1] : 0;
  710. const int64_t ne02 = src0 ? src0->ne[2] : 0;
  711. const int64_t ne03 = src0 ? src0->ne[3] : 0;
  712. const uint64_t nb00 = src0 ? src0->nb[0] : 0;
  713. const uint64_t nb01 = src0 ? src0->nb[1] : 0;
  714. const uint64_t nb02 = src0 ? src0->nb[2] : 0;
  715. const uint64_t nb03 = src0 ? src0->nb[3] : 0;
  716. const int64_t ne10 = src1 ? src1->ne[0] : 0;
  717. const int64_t ne11 = src1 ? src1->ne[1] : 0;
  718. const int64_t ne12 = src1 ? src1->ne[2] : 0;
  719. const int64_t ne13 = src1 ? src1->ne[3] : 0; UNUSED(ne13);
  720. const uint64_t nb10 = src1 ? src1->nb[0] : 0;
  721. const uint64_t nb11 = src1 ? src1->nb[1] : 0;
  722. const uint64_t nb12 = src1 ? src1->nb[2] : 0;
  723. const uint64_t nb13 = src1 ? src1->nb[3] : 0; UNUSED(nb13);
  724. const int64_t ne0 = dst ? dst->ne[0] : 0;
  725. const int64_t ne1 = dst ? dst->ne[1] : 0;
  726. const int64_t ne2 = dst ? dst->ne[2] : 0;
  727. const int64_t ne3 = dst ? dst->ne[3] : 0;
  728. const uint64_t nb0 = dst ? dst->nb[0] : 0;
  729. const uint64_t nb1 = dst ? dst->nb[1] : 0;
  730. const uint64_t nb2 = dst ? dst->nb[2] : 0;
  731. const uint64_t nb3 = dst ? dst->nb[3] : 0;
  732. const enum ggml_type src0t = src0 ? src0->type : GGML_TYPE_COUNT;
  733. const enum ggml_type src1t = src1 ? src1->type : GGML_TYPE_COUNT;
  734. const enum ggml_type dstt = dst ? dst->type : GGML_TYPE_COUNT;
  735. id<MTLBuffer> id_src0 = src0 ? ggml_metal_get_buffer(src0, &offs_src0) : nil;
  736. id<MTLBuffer> id_src1 = src1 ? ggml_metal_get_buffer(src1, &offs_src1) : nil;
  737. id<MTLBuffer> id_src2 = src2 ? ggml_metal_get_buffer(src2, &offs_src2) : nil;
  738. id<MTLBuffer> id_dst = dst ? ggml_metal_get_buffer(dst, &offs_dst) : nil;
  739. //GGML_METAL_LOG_INFO("%s: op - %s\n", __func__, ggml_op_name(dst->op));
  740. //if (src0) {
  741. // GGML_METAL_LOG_INFO("%s: src0 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src0t), ne00, ne01, ne02,
  742. // ggml_is_contiguous(src0), src0->name);
  743. //}
  744. //if (src1) {
  745. // GGML_METAL_LOG_INFO("%s: src1 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src1t), ne10, ne11, ne12,
  746. // ggml_is_contiguous(src1), src1->name);
  747. //}
  748. //if (dst) {
  749. // GGML_METAL_LOG_INFO("%s: dst - %4s [%5lld, %5lld, %5lld], 1, %s\n", __func__, ggml_type_name(dstt), ne0, ne1, ne2,
  750. // dst->name);
  751. //}
  752. switch (dst->op) {
  753. case GGML_OP_CONCAT:
  754. {
  755. const int64_t nb = ne00;
  756. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CONCAT].pipeline;
  757. [encoder setComputePipelineState:pipeline];
  758. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  759. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  760. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  761. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  762. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
  763. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
  764. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:6];
  765. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:7];
  766. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:8];
  767. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:9];
  768. [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:10];
  769. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:11];
  770. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:12];
  771. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:13];
  772. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:14];
  773. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:15];
  774. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:16];
  775. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:17];
  776. [encoder setBytes:&nb13 length:sizeof(nb13) atIndex:18];
  777. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:19];
  778. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:20];
  779. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:21];
  780. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:22];
  781. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:23];
  782. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:24];
  783. [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:25];
  784. [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:26];
  785. [encoder setBytes:&nb length:sizeof(nb) atIndex:27];
  786. const int nth = MIN(1024, ne0);
  787. [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  788. } break;
  789. case GGML_OP_ADD:
  790. case GGML_OP_MUL:
  791. case GGML_OP_DIV:
  792. {
  793. const size_t offs = 0;
  794. bool bcast_row = false;
  795. int64_t nb = ne00;
  796. id<MTLComputePipelineState> pipeline = nil;
  797. if (ggml_nelements(src1) == ne10 && ggml_is_contiguous(src1) && ne00 % 4 == 0 && ne10 % 4 == 0) {
  798. GGML_ASSERT(ggml_is_contiguous(src0));
  799. // src1 is a row
  800. GGML_ASSERT(ne11 == 1);
  801. nb = ne00 / 4;
  802. switch (dst->op) {
  803. case GGML_OP_ADD: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD_ROW].pipeline; break;
  804. case GGML_OP_MUL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_ROW].pipeline; break;
  805. case GGML_OP_DIV: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIV_ROW].pipeline; break;
  806. default: GGML_ASSERT(false);
  807. }
  808. bcast_row = true;
  809. } else {
  810. switch (dst->op) {
  811. case GGML_OP_ADD: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD].pipeline; break;
  812. case GGML_OP_MUL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL].pipeline; break;
  813. case GGML_OP_DIV: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIV].pipeline; break;
  814. default: GGML_ASSERT(false);
  815. }
  816. }
  817. [encoder setComputePipelineState:pipeline];
  818. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  819. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  820. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  821. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  822. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
  823. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
  824. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:6];
  825. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:7];
  826. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:8];
  827. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:9];
  828. [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:10];
  829. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:11];
  830. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:12];
  831. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:13];
  832. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:14];
  833. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:15];
  834. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:16];
  835. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:17];
  836. [encoder setBytes:&nb13 length:sizeof(nb13) atIndex:18];
  837. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:19];
  838. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:20];
  839. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:21];
  840. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:22];
  841. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:23];
  842. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:24];
  843. [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:25];
  844. [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:26];
  845. [encoder setBytes:&offs length:sizeof(offs) atIndex:27];
  846. [encoder setBytes:&nb length:sizeof(nb) atIndex:28];
  847. if (bcast_row) {
  848. const int64_t n = ggml_nelements(dst)/4;
  849. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  850. } else {
  851. const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne0);
  852. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  853. }
  854. } break;
  855. case GGML_OP_ACC:
  856. {
  857. GGML_ASSERT(src0t == GGML_TYPE_F32);
  858. GGML_ASSERT(src1t == GGML_TYPE_F32);
  859. GGML_ASSERT(dstt == GGML_TYPE_F32);
  860. GGML_ASSERT(ggml_is_contiguous(src0));
  861. GGML_ASSERT(ggml_is_contiguous(src1));
  862. const size_t pnb1 = ((int32_t *) dst->op_params)[0];
  863. const size_t pnb2 = ((int32_t *) dst->op_params)[1];
  864. const size_t pnb3 = ((int32_t *) dst->op_params)[2];
  865. const size_t offs = ((int32_t *) dst->op_params)[3];
  866. const bool inplace = (bool) ((int32_t *) dst->op_params)[4];
  867. if (!inplace) {
  868. // run a separete kernel to cpy src->dst
  869. // not sure how to avoid this
  870. // TODO: make a simpler cpy_bytes kernel
  871. const id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_F32].pipeline;
  872. [encoder setComputePipelineState:pipeline];
  873. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  874. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  875. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  876. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  877. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  878. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
  879. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
  880. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
  881. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
  882. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
  883. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
  884. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
  885. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
  886. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
  887. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
  888. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
  889. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
  890. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
  891. const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne00);
  892. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  893. }
  894. const id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ADD].pipeline;
  895. [encoder setComputePipelineState:pipeline];
  896. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  897. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  898. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  899. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  900. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
  901. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
  902. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:6];
  903. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:7];
  904. [encoder setBytes:&pnb1 length:sizeof(pnb1) atIndex:8];
  905. [encoder setBytes:&pnb2 length:sizeof(pnb2) atIndex:9];
  906. [encoder setBytes:&pnb3 length:sizeof(pnb3) atIndex:10];
  907. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:11];
  908. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:12];
  909. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:13];
  910. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:14];
  911. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:15];
  912. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:16];
  913. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:17];
  914. [encoder setBytes:&nb13 length:sizeof(nb13) atIndex:18];
  915. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:19];
  916. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:20];
  917. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:21];
  918. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:22];
  919. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:23];
  920. [encoder setBytes:&pnb1 length:sizeof(pnb1) atIndex:24];
  921. [encoder setBytes:&pnb2 length:sizeof(pnb2) atIndex:25];
  922. [encoder setBytes:&pnb3 length:sizeof(pnb3) atIndex:26];
  923. [encoder setBytes:&offs length:sizeof(offs) atIndex:27];
  924. const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne00);
  925. [encoder dispatchThreadgroups:MTLSizeMake(ne11, ne12, ne13) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  926. } break;
  927. case GGML_OP_SCALE:
  928. {
  929. GGML_ASSERT(ggml_is_contiguous(src0));
  930. const float scale = *(const float *) dst->op_params;
  931. int64_t n = ggml_nelements(dst);
  932. id<MTLComputePipelineState> pipeline = nil;
  933. if (n % 4 == 0) {
  934. n /= 4;
  935. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SCALE_4].pipeline;
  936. } else {
  937. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SCALE].pipeline;
  938. }
  939. [encoder setComputePipelineState:pipeline];
  940. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  941. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  942. [encoder setBytes:&scale length:sizeof(scale) atIndex:2];
  943. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  944. } break;
  945. case GGML_OP_UNARY:
  946. switch (ggml_get_unary_op(gf->nodes[i])) {
  947. case GGML_UNARY_OP_TANH:
  948. {
  949. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_TANH].pipeline;
  950. [encoder setComputePipelineState:pipeline];
  951. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  952. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  953. const int64_t n = ggml_nelements(dst);
  954. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  955. } break;
  956. case GGML_UNARY_OP_RELU:
  957. {
  958. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_RELU].pipeline;
  959. [encoder setComputePipelineState:pipeline];
  960. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  961. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  962. const int64_t n = ggml_nelements(dst);
  963. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  964. } break;
  965. case GGML_UNARY_OP_GELU:
  966. {
  967. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GELU].pipeline;
  968. [encoder setComputePipelineState:pipeline];
  969. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  970. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  971. const int64_t n = ggml_nelements(dst);
  972. GGML_ASSERT(n % 4 == 0);
  973. [encoder dispatchThreadgroups:MTLSizeMake(n/4, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  974. } break;
  975. case GGML_UNARY_OP_GELU_QUICK:
  976. {
  977. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GELU_QUICK].pipeline;
  978. [encoder setComputePipelineState:pipeline];
  979. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  980. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  981. const int64_t n = ggml_nelements(dst);
  982. GGML_ASSERT(n % 4 == 0);
  983. [encoder dispatchThreadgroups:MTLSizeMake(n/4, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  984. } break;
  985. case GGML_UNARY_OP_SILU:
  986. {
  987. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SILU].pipeline;
  988. [encoder setComputePipelineState:pipeline];
  989. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  990. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  991. const int64_t n = ggml_nelements(dst);
  992. GGML_ASSERT(n % 4 == 0);
  993. [encoder dispatchThreadgroups:MTLSizeMake(n/4, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  994. } break;
  995. default:
  996. {
  997. GGML_METAL_LOG_WARN("%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
  998. GGML_ASSERT(false);
  999. }
  1000. } break;
  1001. case GGML_OP_SQR:
  1002. {
  1003. GGML_ASSERT(ggml_is_contiguous(src0));
  1004. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SQR].pipeline;
  1005. [encoder setComputePipelineState:pipeline];
  1006. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1007. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1008. const int64_t n = ggml_nelements(dst);
  1009. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1010. } break;
  1011. case GGML_OP_SUM_ROWS:
  1012. {
  1013. GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type));
  1014. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SUM_ROWS].pipeline;
  1015. [encoder setComputePipelineState:pipeline];
  1016. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1017. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1018. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
  1019. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
  1020. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
  1021. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:5];
  1022. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6];
  1023. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7];
  1024. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8];
  1025. [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:9];
  1026. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:10];
  1027. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:11];
  1028. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:12];
  1029. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:13];
  1030. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:14];
  1031. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:15];
  1032. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:16];
  1033. [encoder setBytes:&nb13 length:sizeof(nb13) atIndex:17];
  1034. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:18];
  1035. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:19];
  1036. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:20];
  1037. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:21];
  1038. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:22];
  1039. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:23];
  1040. [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:24];
  1041. [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:25];
  1042. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1043. } break;
  1044. case GGML_OP_SOFT_MAX:
  1045. {
  1046. int nth = 32; // SIMD width
  1047. id<MTLComputePipelineState> pipeline = nil;
  1048. if (ne00%4 == 0) {
  1049. while (nth < ne00/4 && nth < 256) {
  1050. nth *= 2;
  1051. }
  1052. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX_4].pipeline;
  1053. } else {
  1054. while (nth < ne00 && nth < 1024) {
  1055. nth *= 2;
  1056. }
  1057. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX].pipeline;
  1058. }
  1059. const float scale = ((float *) dst->op_params)[0];
  1060. const float max_bias = ((float *) dst->op_params)[1];
  1061. const int64_t nrows_x = ggml_nrows(src0);
  1062. const int64_t nrows_y = src0->ne[1];
  1063. const uint32_t n_head_kv = nrows_x/nrows_y;
  1064. const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv));
  1065. const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
  1066. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
  1067. [encoder setComputePipelineState:pipeline];
  1068. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1069. if (id_src1) {
  1070. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1071. } else {
  1072. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
  1073. }
  1074. if (id_src2) {
  1075. [encoder setBuffer:id_src2 offset:offs_src2 atIndex:2];
  1076. } else {
  1077. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:2];
  1078. }
  1079. [encoder setBuffer:id_dst offset:offs_dst atIndex:3];
  1080. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:4];
  1081. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:5];
  1082. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:6];
  1083. [encoder setBytes:&scale length:sizeof(scale) atIndex:7];
  1084. [encoder setBytes:&max_bias length:sizeof(max_bias) atIndex:8];
  1085. [encoder setBytes:&m0 length:sizeof(m0) atIndex:9];
  1086. [encoder setBytes:&m1 length:sizeof(m1) atIndex:10];
  1087. [encoder setBytes:&n_head_log2 length:sizeof(n_head_log2) atIndex:11];
  1088. [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
  1089. [encoder dispatchThreadgroups:MTLSizeMake(ne01*ne02*ne03, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1090. } break;
  1091. case GGML_OP_DIAG_MASK_INF:
  1092. {
  1093. const int n_past = ((int32_t *)(dst->op_params))[0];
  1094. id<MTLComputePipelineState> pipeline = nil;
  1095. if (ne00%8 == 0) {
  1096. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF_8].pipeline;
  1097. } else {
  1098. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_DIAG_MASK_INF].pipeline;
  1099. }
  1100. [encoder setComputePipelineState:pipeline];
  1101. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1102. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1103. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
  1104. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
  1105. [encoder setBytes:&n_past length:sizeof(int) atIndex:4];
  1106. if (ne00%8 == 0) {
  1107. [encoder dispatchThreadgroups:MTLSizeMake(ne00*ne01*ne02/8, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1108. }
  1109. else {
  1110. [encoder dispatchThreadgroups:MTLSizeMake(ne00, ne01, ne02) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1111. }
  1112. } break;
  1113. case GGML_OP_MUL_MAT:
  1114. {
  1115. GGML_ASSERT(ne00 == ne10);
  1116. // TODO: assert that dim2 and dim3 are contiguous
  1117. GGML_ASSERT(ne12 % ne02 == 0);
  1118. GGML_ASSERT(ne13 % ne03 == 0);
  1119. const uint r2 = ne12/ne02;
  1120. const uint r3 = ne13/ne03;
  1121. // find the break-even point where the matrix-matrix kernel becomes more efficient compared
  1122. // to the matrix-vector kernel
  1123. int ne11_mm_min = 1;
  1124. #if 0
  1125. // the numbers below are measured on M2 Ultra for 7B and 13B models
  1126. // these numbers do not translate to other devices or model sizes
  1127. // TODO: need to find a better approach
  1128. if ([ctx->device.name isEqualToString:@"Apple M2 Ultra"]) {
  1129. switch (src0t) {
  1130. case GGML_TYPE_F16: ne11_mm_min = 2; break;
  1131. case GGML_TYPE_Q8_0: ne11_mm_min = 7; break;
  1132. case GGML_TYPE_Q2_K: ne11_mm_min = 15; break;
  1133. case GGML_TYPE_Q3_K: ne11_mm_min = 7; break;
  1134. case GGML_TYPE_Q4_0:
  1135. case GGML_TYPE_Q4_1: ne11_mm_min = 15; break;
  1136. case GGML_TYPE_Q4_K: ne11_mm_min = 11; break;
  1137. case GGML_TYPE_Q5_0: // not tested yet
  1138. case GGML_TYPE_Q5_1: ne11_mm_min = 13; break; // not tested yet
  1139. case GGML_TYPE_Q5_K: ne11_mm_min = 7; break;
  1140. case GGML_TYPE_Q6_K: ne11_mm_min = 7; break;
  1141. default: ne11_mm_min = 1; break;
  1142. }
  1143. }
  1144. #endif
  1145. // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
  1146. // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
  1147. if ([ctx->device supportsFamily:MTLGPUFamilyApple7] &&
  1148. !ggml_is_transposed(src0) &&
  1149. !ggml_is_transposed(src1) &&
  1150. src1t == GGML_TYPE_F32 &&
  1151. ne00 % 32 == 0 && ne00 >= 64 &&
  1152. (ne11 > ne11_mm_min || (ggml_is_quantized(src0t) && ne12 > 1))) {
  1153. //printf("matrix: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12);
  1154. id<MTLComputePipelineState> pipeline = nil;
  1155. switch (src0->type) {
  1156. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_F32_F32 ].pipeline; break;
  1157. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_F16_F32 ].pipeline; break;
  1158. case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_0_F32 ].pipeline; break;
  1159. case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_1_F32 ].pipeline; break;
  1160. case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_0_F32 ].pipeline; break;
  1161. case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_1_F32 ].pipeline; break;
  1162. case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q8_0_F32 ].pipeline; break;
  1163. case GGML_TYPE_Q2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q2_K_F32 ].pipeline; break;
  1164. case GGML_TYPE_Q3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q3_K_F32 ].pipeline; break;
  1165. case GGML_TYPE_Q4_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q4_K_F32 ].pipeline; break;
  1166. case GGML_TYPE_Q5_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q5_K_F32 ].pipeline; break;
  1167. case GGML_TYPE_Q6_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_Q6_K_F32 ].pipeline; break;
  1168. case GGML_TYPE_IQ2_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XXS_F32].pipeline; break;
  1169. case GGML_TYPE_IQ2_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_XS_F32 ].pipeline; break;
  1170. case GGML_TYPE_IQ3_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_XXS_F32].pipeline; break;
  1171. case GGML_TYPE_IQ1_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ1_S_F32 ].pipeline; break;
  1172. case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32 ].pipeline; break;
  1173. default: GGML_ASSERT(false && "MUL MAT-MAT not implemented");
  1174. }
  1175. [encoder setComputePipelineState:pipeline];
  1176. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1177. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1178. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1179. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  1180. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
  1181. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:5];
  1182. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:6];
  1183. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:7];
  1184. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:8];
  1185. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:9];
  1186. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:10];
  1187. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:11];
  1188. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:12];
  1189. [encoder setBytes:&r2 length:sizeof(r2) atIndex:13];
  1190. [encoder setBytes:&r3 length:sizeof(r3) atIndex:14];
  1191. [encoder setThreadgroupMemoryLength:8192 atIndex:0];
  1192. [encoder dispatchThreadgroups:MTLSizeMake( (ne11 + 31)/32, (ne01 + 63)/64, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
  1193. } else {
  1194. int nth0 = 32;
  1195. int nth1 = 1;
  1196. int nrows = 1;
  1197. //printf("vector: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12);
  1198. id<MTLComputePipelineState> pipeline = nil;
  1199. // use custom matrix x vector kernel
  1200. switch (src0t) {
  1201. case GGML_TYPE_F32:
  1202. {
  1203. GGML_ASSERT(src1t == GGML_TYPE_F32);
  1204. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F32_F32].pipeline;
  1205. nrows = 4;
  1206. } break;
  1207. case GGML_TYPE_F16:
  1208. {
  1209. nth0 = 32;
  1210. nth1 = 1;
  1211. if (src1t == GGML_TYPE_F32) {
  1212. if (ne11 * ne12 < 4) {
  1213. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_1ROW].pipeline;
  1214. } else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) {
  1215. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32_L4].pipeline;
  1216. nrows = ne11;
  1217. } else {
  1218. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F32].pipeline;
  1219. nrows = 4;
  1220. }
  1221. } else {
  1222. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_F16_F16].pipeline;
  1223. nrows = 4;
  1224. }
  1225. } break;
  1226. case GGML_TYPE_Q4_0:
  1227. {
  1228. nth0 = 8;
  1229. nth1 = 8;
  1230. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_0_F32].pipeline;
  1231. } break;
  1232. case GGML_TYPE_Q4_1:
  1233. {
  1234. nth0 = 8;
  1235. nth1 = 8;
  1236. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_1_F32].pipeline;
  1237. } break;
  1238. case GGML_TYPE_Q5_0:
  1239. {
  1240. nth0 = 8;
  1241. nth1 = 8;
  1242. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_0_F32].pipeline;
  1243. } break;
  1244. case GGML_TYPE_Q5_1:
  1245. {
  1246. nth0 = 8;
  1247. nth1 = 8;
  1248. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_1_F32].pipeline;
  1249. } break;
  1250. case GGML_TYPE_Q8_0:
  1251. {
  1252. nth0 = 8;
  1253. nth1 = 8;
  1254. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q8_0_F32].pipeline;
  1255. } break;
  1256. case GGML_TYPE_Q2_K:
  1257. {
  1258. nth0 = 2;
  1259. nth1 = 32;
  1260. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q2_K_F32].pipeline;
  1261. } break;
  1262. case GGML_TYPE_Q3_K:
  1263. {
  1264. nth0 = 2;
  1265. nth1 = 32;
  1266. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q3_K_F32].pipeline;
  1267. } break;
  1268. case GGML_TYPE_Q4_K:
  1269. {
  1270. nth0 = 4; //1;
  1271. nth1 = 8; //32;
  1272. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q4_K_F32].pipeline;
  1273. } break;
  1274. case GGML_TYPE_Q5_K:
  1275. {
  1276. nth0 = 2;
  1277. nth1 = 32;
  1278. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q5_K_F32].pipeline;
  1279. } break;
  1280. case GGML_TYPE_Q6_K:
  1281. {
  1282. nth0 = 2;
  1283. nth1 = 32;
  1284. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_Q6_K_F32].pipeline;
  1285. } break;
  1286. case GGML_TYPE_IQ2_XXS:
  1287. {
  1288. nth0 = 4;
  1289. nth1 = 16;
  1290. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XXS_F32].pipeline;
  1291. } break;
  1292. case GGML_TYPE_IQ2_XS:
  1293. {
  1294. nth0 = 4;
  1295. nth1 = 16;
  1296. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_XS_F32].pipeline;
  1297. } break;
  1298. case GGML_TYPE_IQ3_XXS:
  1299. {
  1300. nth0 = 4;
  1301. nth1 = 16;
  1302. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_XXS_F32].pipeline;
  1303. } break;
  1304. case GGML_TYPE_IQ1_S:
  1305. {
  1306. nth0 = 4;
  1307. nth1 = 16;
  1308. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ1_S_F32].pipeline;
  1309. } break;
  1310. case GGML_TYPE_IQ4_NL:
  1311. {
  1312. nth0 = 4;
  1313. nth1 = 16;
  1314. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32].pipeline;
  1315. } break;
  1316. default:
  1317. {
  1318. GGML_METAL_LOG_ERROR("Asserting on type %d\n", (int)src0t);
  1319. GGML_ASSERT(false && "not implemented");
  1320. }
  1321. };
  1322. if (ggml_is_quantized(src0t)) {
  1323. GGML_ASSERT(ne00 >= nth0*nth1);
  1324. }
  1325. [encoder setComputePipelineState:pipeline];
  1326. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1327. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1328. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1329. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  1330. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
  1331. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
  1332. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6];
  1333. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7];
  1334. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8];
  1335. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:9];
  1336. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:10];
  1337. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:11];
  1338. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:12];
  1339. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:13];
  1340. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:14];
  1341. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:15];
  1342. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:16];
  1343. [encoder setBytes:&r2 length:sizeof(r2) atIndex:17];
  1344. [encoder setBytes:&r3 length:sizeof(r3) atIndex:18];
  1345. if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 ||
  1346. src0t == GGML_TYPE_Q5_0 || src0t == GGML_TYPE_Q5_1 || src0t == GGML_TYPE_Q8_0 ||
  1347. src0t == GGML_TYPE_Q2_K || src0t == GGML_TYPE_IQ1_S) { // || src0t == GGML_TYPE_Q4_K) {
  1348. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1349. }
  1350. else if (src0t == GGML_TYPE_IQ2_XXS || src0t == GGML_TYPE_IQ2_XS) {
  1351. const int mem_size = src0t == GGML_TYPE_IQ2_XXS ? 256*8+128 : 512*8+128;
  1352. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1353. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1354. }
  1355. else if (src0t == GGML_TYPE_IQ3_XXS) {
  1356. const int mem_size = 256*4+128;
  1357. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1358. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1359. }
  1360. else if (src0t == GGML_TYPE_IQ4_NL) {
  1361. const int mem_size = 32*sizeof(float);
  1362. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1363. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1364. }
  1365. else if (src0t == GGML_TYPE_Q4_K) {
  1366. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1367. }
  1368. else if (src0t == GGML_TYPE_Q3_K) {
  1369. #ifdef GGML_QKK_64
  1370. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 1)/2, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1371. #else
  1372. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1373. #endif
  1374. }
  1375. else if (src0t == GGML_TYPE_Q5_K) {
  1376. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1377. }
  1378. else if (src0t == GGML_TYPE_Q6_K) {
  1379. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 1)/2, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1380. } else {
  1381. const int64_t ny = (ne11 + nrows - 1)/nrows;
  1382. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ny, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1383. }
  1384. }
  1385. } break;
  1386. case GGML_OP_MUL_MAT_ID:
  1387. {
  1388. //GGML_ASSERT(ne00 == ne10);
  1389. //GGML_ASSERT(ne03 == ne13);
  1390. GGML_ASSERT(src0t == GGML_TYPE_I32);
  1391. const int n_as = ((int32_t *) dst->op_params)[1];
  1392. // TODO: make this more general
  1393. GGML_ASSERT(n_as <= 8);
  1394. // max size of the src1ids array in the kernel stack
  1395. GGML_ASSERT(ne11 <= 512);
  1396. const int64_t ne20 = src2 ? src2->ne[0] : 0;
  1397. const int64_t ne21 = src2 ? src2->ne[1] : 0;
  1398. const int64_t ne22 = src2 ? src2->ne[2] : 0;
  1399. const int64_t ne23 = src2 ? src2->ne[3] : 0; GGML_UNUSED(ne23);
  1400. const uint64_t nb20 = src2 ? src2->nb[0] : 0; GGML_UNUSED(nb20);
  1401. const uint64_t nb21 = src2 ? src2->nb[1] : 0;
  1402. const uint64_t nb22 = src2 ? src2->nb[2] : 0;
  1403. const uint64_t nb23 = src2 ? src2->nb[3] : 0; GGML_UNUSED(nb23);
  1404. const enum ggml_type src2t = src2 ? src2->type : GGML_TYPE_COUNT; GGML_UNUSED(src2t);
  1405. GGML_ASSERT(!ggml_is_transposed(src2));
  1406. GGML_ASSERT(!ggml_is_transposed(src1));
  1407. GGML_ASSERT(src1t == GGML_TYPE_F32);
  1408. const uint r2 = ne12/ne22;
  1409. const uint r3 = ne13/ne23;
  1410. // find the break-even point where the matrix-matrix kernel becomes more efficient compared
  1411. // to the matrix-vector kernel
  1412. int ne11_mm_min = n_as;
  1413. const int idx = ((int32_t *) dst->op_params)[0];
  1414. // batch size
  1415. GGML_ASSERT(ne01 == ne11);
  1416. // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
  1417. // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
  1418. // !!!
  1419. // TODO: for now, always use mat-vec kernels until we figure out how to improve the
  1420. // indirect matrix multiplication
  1421. // !!!
  1422. if ([ctx->device supportsFamily:MTLGPUFamilyApple7] &&
  1423. ne20 % 32 == 0 && ne20 >= 64 &&
  1424. ne11 > ne11_mm_min) {
  1425. id<MTLComputePipelineState> pipeline = nil;
  1426. switch (src2->type) {
  1427. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F32_F32 ].pipeline; break;
  1428. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_F16_F32 ].pipeline; break;
  1429. case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_0_F32 ].pipeline; break;
  1430. case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_1_F32 ].pipeline; break;
  1431. case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_0_F32 ].pipeline; break;
  1432. case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_1_F32 ].pipeline; break;
  1433. case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q8_0_F32 ].pipeline; break;
  1434. case GGML_TYPE_Q2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q2_K_F32 ].pipeline; break;
  1435. case GGML_TYPE_Q3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q3_K_F32 ].pipeline; break;
  1436. case GGML_TYPE_Q4_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q4_K_F32 ].pipeline; break;
  1437. case GGML_TYPE_Q5_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q5_K_F32 ].pipeline; break;
  1438. case GGML_TYPE_Q6_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_Q6_K_F32 ].pipeline; break;
  1439. case GGML_TYPE_IQ2_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XXS_F32].pipeline; break;
  1440. case GGML_TYPE_IQ2_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_XS_F32 ].pipeline; break;
  1441. case GGML_TYPE_IQ3_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_XXS_F32].pipeline; break;
  1442. case GGML_TYPE_IQ1_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ1_S_F32 ].pipeline; break;
  1443. case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32 ].pipeline; break;
  1444. default: GGML_ASSERT(false && "MUL_MAT_ID not implemented");
  1445. }
  1446. [encoder setComputePipelineState:pipeline];
  1447. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1448. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1449. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1450. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:3];
  1451. [encoder setBytes:&ne20 length:sizeof(ne20) atIndex:4];
  1452. [encoder setBytes:&ne22 length:sizeof(ne22) atIndex:5];
  1453. [encoder setBytes:&nb21 length:sizeof(nb21) atIndex:6];
  1454. [encoder setBytes:&nb22 length:sizeof(nb22) atIndex:7];
  1455. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:8];
  1456. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:9];
  1457. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:10];
  1458. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:11];
  1459. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:12];
  1460. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:13];
  1461. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:14];
  1462. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:15];
  1463. [encoder setBytes:&r2 length:sizeof(r2) atIndex:16];
  1464. [encoder setBytes:&r3 length:sizeof(r3) atIndex:17];
  1465. [encoder setBytes:&idx length:sizeof(idx) atIndex:18];
  1466. // TODO: how to make this an array? read Metal docs
  1467. for (int j = 0; j < 8; ++j) {
  1468. // NOTE: this is done like this to avoid uninitialized kernel arguments when n_as < 8
  1469. struct ggml_tensor * src_cur = dst->src[2 + (j % n_as)];
  1470. size_t offs_src_cur = 0;
  1471. id<MTLBuffer> id_src_cur = ggml_metal_get_buffer(src_cur, &offs_src_cur);
  1472. [encoder setBuffer:id_src_cur offset:offs_src_cur atIndex:19 + j];
  1473. }
  1474. [encoder setThreadgroupMemoryLength:8192 atIndex:0];
  1475. [encoder dispatchThreadgroups:MTLSizeMake((ne11 + 31)/32, (ne21 + 63)/64, n_as*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
  1476. } else {
  1477. int nth0 = 32;
  1478. int nth1 = 1;
  1479. int nrows = 1;
  1480. //printf("vector: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12);
  1481. id<MTLComputePipelineState> pipeline = nil;
  1482. // use custom matrix x vector kernel
  1483. switch (src2t) {
  1484. case GGML_TYPE_F32:
  1485. {
  1486. GGML_ASSERT(src1t == GGML_TYPE_F32);
  1487. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F32_F32].pipeline;
  1488. } break;
  1489. case GGML_TYPE_F16:
  1490. {
  1491. GGML_ASSERT(src1t == GGML_TYPE_F32);
  1492. nth0 = 32;
  1493. nth1 = 1;
  1494. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_F16_F32].pipeline;
  1495. } break;
  1496. case GGML_TYPE_Q4_0:
  1497. {
  1498. nth0 = 8;
  1499. nth1 = 8;
  1500. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_0_F32].pipeline;
  1501. } break;
  1502. case GGML_TYPE_Q4_1:
  1503. {
  1504. nth0 = 8;
  1505. nth1 = 8;
  1506. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_1_F32].pipeline;
  1507. } break;
  1508. case GGML_TYPE_Q5_0:
  1509. {
  1510. nth0 = 8;
  1511. nth1 = 8;
  1512. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_0_F32].pipeline;
  1513. } break;
  1514. case GGML_TYPE_Q5_1:
  1515. {
  1516. nth0 = 8;
  1517. nth1 = 8;
  1518. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_1_F32].pipeline;
  1519. } break;
  1520. case GGML_TYPE_Q8_0:
  1521. {
  1522. nth0 = 8;
  1523. nth1 = 8;
  1524. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q8_0_F32].pipeline;
  1525. } break;
  1526. case GGML_TYPE_Q2_K:
  1527. {
  1528. nth0 = 2;
  1529. nth1 = 32;
  1530. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q2_K_F32].pipeline;
  1531. } break;
  1532. case GGML_TYPE_Q3_K:
  1533. {
  1534. nth0 = 2;
  1535. nth1 = 32;
  1536. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q3_K_F32].pipeline;
  1537. } break;
  1538. case GGML_TYPE_Q4_K:
  1539. {
  1540. nth0 = 4; //1;
  1541. nth1 = 8; //32;
  1542. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q4_K_F32].pipeline;
  1543. } break;
  1544. case GGML_TYPE_Q5_K:
  1545. {
  1546. nth0 = 2;
  1547. nth1 = 32;
  1548. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q5_K_F32].pipeline;
  1549. } break;
  1550. case GGML_TYPE_Q6_K:
  1551. {
  1552. nth0 = 2;
  1553. nth1 = 32;
  1554. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_Q6_K_F32].pipeline;
  1555. } break;
  1556. case GGML_TYPE_IQ2_XXS:
  1557. {
  1558. nth0 = 4;
  1559. nth1 = 16;
  1560. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XXS_F32].pipeline;
  1561. } break;
  1562. case GGML_TYPE_IQ2_XS:
  1563. {
  1564. nth0 = 4;
  1565. nth1 = 16;
  1566. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_XS_F32].pipeline;
  1567. } break;
  1568. case GGML_TYPE_IQ3_XXS:
  1569. {
  1570. nth0 = 4;
  1571. nth1 = 16;
  1572. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_XXS_F32].pipeline;
  1573. } break;
  1574. case GGML_TYPE_IQ1_S:
  1575. {
  1576. nth0 = 4;
  1577. nth1 = 16;
  1578. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ1_S_F32].pipeline;
  1579. } break;
  1580. case GGML_TYPE_IQ4_NL:
  1581. {
  1582. nth0 = 4;
  1583. nth1 = 16;
  1584. pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32].pipeline;
  1585. } break;
  1586. default:
  1587. {
  1588. GGML_METAL_LOG_ERROR("Asserting on type %d\n", (int)src2t);
  1589. GGML_ASSERT(false && "not implemented");
  1590. }
  1591. };
  1592. if (ggml_is_quantized(src2t)) {
  1593. GGML_ASSERT(ne20 >= nth0*nth1);
  1594. }
  1595. const int64_t _ne1 = 1; // kernels needs a reference in constant memory
  1596. [encoder setComputePipelineState:pipeline];
  1597. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1598. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1599. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1600. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:3];
  1601. [encoder setBytes:&ne20 length:sizeof(ne20) atIndex:4];
  1602. [encoder setBytes:&ne21 length:sizeof(ne21) atIndex:5];
  1603. [encoder setBytes:&ne22 length:sizeof(ne22) atIndex:6];
  1604. [encoder setBytes:&nb20 length:sizeof(nb20) atIndex:7];
  1605. [encoder setBytes:&nb21 length:sizeof(nb21) atIndex:8];
  1606. [encoder setBytes:&nb22 length:sizeof(nb22) atIndex:9];
  1607. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:10];
  1608. [encoder setBytes:&_ne1 length:sizeof(_ne1) atIndex:11];
  1609. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:12];
  1610. [encoder setBytes:&ne13 length:sizeof(ne13) atIndex:13];
  1611. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:14];
  1612. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:15];
  1613. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:16];
  1614. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:17];
  1615. [encoder setBytes:&_ne1 length:sizeof(_ne1) atIndex:18];
  1616. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:19];
  1617. [encoder setBytes:&r2 length:sizeof(r2) atIndex:20];
  1618. [encoder setBytes:&r3 length:sizeof(r3) atIndex:21];
  1619. [encoder setBytes:&idx length:sizeof(idx) atIndex:22];
  1620. // TODO: how to make this an array? read Metal docs
  1621. for (int j = 0; j < 8; ++j) {
  1622. // NOTE: this is done like this to avoid uninitialized kernel arguments when n_as < 8
  1623. struct ggml_tensor * src_cur = dst->src[2 + (j % n_as)];
  1624. size_t offs_src_cur = 0;
  1625. id<MTLBuffer> id_src_cur = ggml_metal_get_buffer(src_cur, &offs_src_cur);
  1626. [encoder setBuffer:id_src_cur offset:offs_src_cur atIndex:23 + j];
  1627. }
  1628. if (src2t == GGML_TYPE_Q4_0 || src2t == GGML_TYPE_Q4_1 ||
  1629. src2t == GGML_TYPE_Q5_0 || src2t == GGML_TYPE_Q5_1 || src2t == GGML_TYPE_Q8_0 ||
  1630. src2t == GGML_TYPE_Q2_K || src2t == GGML_TYPE_IQ1_S) { // || src2t == GGML_TYPE_Q4_K) {
  1631. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 7)/8, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1632. }
  1633. else if (src2t == GGML_TYPE_IQ2_XXS || src2t == GGML_TYPE_IQ2_XS) {
  1634. const int mem_size = src2t == GGML_TYPE_IQ2_XXS ? 256*8+128 : 512*8+128;
  1635. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1636. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 7)/8, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1637. }
  1638. else if (src2t == GGML_TYPE_IQ3_XXS) {
  1639. const int mem_size = 256*4+128;
  1640. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1641. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 7)/8, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1642. }
  1643. else if (src2t == GGML_TYPE_IQ4_NL) {
  1644. const int mem_size = 32*sizeof(float);
  1645. [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
  1646. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 3)/4, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1647. }
  1648. else if (src2t == GGML_TYPE_Q4_K) {
  1649. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 3)/4, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1650. }
  1651. else if (src2t == GGML_TYPE_Q3_K) {
  1652. #ifdef GGML_QKK_64
  1653. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 1)/2, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1654. #else
  1655. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 3)/4, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1656. #endif
  1657. }
  1658. else if (src2t == GGML_TYPE_Q5_K) {
  1659. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 3)/4, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1660. }
  1661. else if (src2t == GGML_TYPE_Q6_K) {
  1662. [encoder dispatchThreadgroups:MTLSizeMake((ne21 + 1)/2, _ne1, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1663. } else {
  1664. const int64_t ny = (_ne1 + nrows - 1)/nrows;
  1665. [encoder dispatchThreadgroups:MTLSizeMake(ne21, ny, ne01*ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  1666. }
  1667. }
  1668. } break;
  1669. case GGML_OP_GET_ROWS:
  1670. {
  1671. id<MTLComputePipelineState> pipeline = nil;
  1672. switch (src0->type) {
  1673. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_F32 ].pipeline; break;
  1674. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_F16 ].pipeline; break;
  1675. case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_0 ].pipeline; break;
  1676. case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_1 ].pipeline; break;
  1677. case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_0 ].pipeline; break;
  1678. case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_1 ].pipeline; break;
  1679. case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q8_0 ].pipeline; break;
  1680. case GGML_TYPE_Q2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q2_K ].pipeline; break;
  1681. case GGML_TYPE_Q3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q3_K ].pipeline; break;
  1682. case GGML_TYPE_Q4_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q4_K ].pipeline; break;
  1683. case GGML_TYPE_Q5_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q5_K ].pipeline; break;
  1684. case GGML_TYPE_Q6_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_Q6_K ].pipeline; break;
  1685. case GGML_TYPE_IQ2_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XXS].pipeline; break;
  1686. case GGML_TYPE_IQ2_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_XS ].pipeline; break;
  1687. case GGML_TYPE_IQ3_XXS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_XXS].pipeline; break;
  1688. case GGML_TYPE_IQ1_S: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ1_S ].pipeline; break;
  1689. case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL ].pipeline; break;
  1690. case GGML_TYPE_I32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_I32 ].pipeline; break;
  1691. default: GGML_ASSERT(false && "not implemented");
  1692. }
  1693. [encoder setComputePipelineState:pipeline];
  1694. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1695. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1696. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1697. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:3];
  1698. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:4];
  1699. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:5];
  1700. [encoder setBytes:&ne10 length:sizeof( int64_t) atIndex:6];
  1701. [encoder setBytes:&nb10 length:sizeof( int64_t) atIndex:7];
  1702. [encoder setBytes:&nb11 length:sizeof( int64_t) atIndex:8];
  1703. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:9];
  1704. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:10];
  1705. [encoder dispatchThreadgroups:MTLSizeMake(ne10, ne11, 1) threadsPerThreadgroup:MTLSizeMake(32, 1, 1)];
  1706. } break;
  1707. case GGML_OP_RMS_NORM:
  1708. {
  1709. GGML_ASSERT(ne00 % 4 == 0);
  1710. float eps;
  1711. memcpy(&eps, dst->op_params, sizeof(float));
  1712. int nth = 32; // SIMD width
  1713. while (nth < ne00/4 && nth < 1024) {
  1714. nth *= 2;
  1715. }
  1716. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_RMS_NORM].pipeline;
  1717. [encoder setComputePipelineState:pipeline];
  1718. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1719. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1720. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  1721. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3];
  1722. [encoder setBytes:&eps length:sizeof( float) atIndex:4];
  1723. [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
  1724. const int64_t nrows = ggml_nrows(src0);
  1725. [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1726. } break;
  1727. case GGML_OP_GROUP_NORM:
  1728. {
  1729. GGML_ASSERT(ne00 % 4 == 0);
  1730. //float eps;
  1731. //memcpy(&eps, dst->op_params, sizeof(float));
  1732. const float eps = 1e-6f; // TODO: temporarily hardcoded
  1733. const int32_t n_groups = ((int32_t *) dst->op_params)[0];
  1734. int nth = 32; // SIMD width
  1735. //while (nth < ne00/4 && nth < 1024) {
  1736. // nth *= 2;
  1737. //}
  1738. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GROUP_NORM].pipeline;
  1739. [encoder setComputePipelineState:pipeline];
  1740. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1741. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1742. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  1743. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  1744. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  1745. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:5];
  1746. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:6];
  1747. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:7];
  1748. [encoder setBytes:&n_groups length:sizeof( int32_t) atIndex:8];
  1749. [encoder setBytes:&eps length:sizeof( float) atIndex:9];
  1750. [encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
  1751. [encoder dispatchThreadgroups:MTLSizeMake(n_groups, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1752. } break;
  1753. case GGML_OP_NORM:
  1754. {
  1755. float eps;
  1756. memcpy(&eps, dst->op_params, sizeof(float));
  1757. const int nth = MIN(256, ne00);
  1758. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_NORM].pipeline;
  1759. [encoder setComputePipelineState:pipeline];
  1760. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1761. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1762. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  1763. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3];
  1764. [encoder setBytes:&eps length:sizeof( float) atIndex:4];
  1765. [encoder setThreadgroupMemoryLength:GGML_PAD(nth*sizeof(float), 16) atIndex:0];
  1766. const int64_t nrows = ggml_nrows(src0);
  1767. [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1768. } break;
  1769. case GGML_OP_ALIBI:
  1770. {
  1771. GGML_ASSERT((src0t == GGML_TYPE_F32));
  1772. const int nth = MIN(1024, ne00);
  1773. //const int n_past = ((int32_t *) dst->op_params)[0];
  1774. const int n_head = ((int32_t *) dst->op_params)[1];
  1775. float max_bias;
  1776. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  1777. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  1778. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  1779. const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
  1780. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ALIBI_F32].pipeline;
  1781. [encoder setComputePipelineState:pipeline];
  1782. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1783. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1784. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  1785. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  1786. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  1787. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
  1788. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
  1789. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
  1790. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
  1791. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
  1792. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
  1793. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
  1794. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
  1795. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
  1796. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
  1797. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
  1798. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
  1799. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
  1800. [encoder setBytes:&m0 length:sizeof( float) atIndex:18];
  1801. [encoder setBytes:&m1 length:sizeof( float) atIndex:19];
  1802. [encoder setBytes:&n_heads_log2_floor length:sizeof(int) atIndex:20];
  1803. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1804. } break;
  1805. case GGML_OP_ROPE:
  1806. {
  1807. GGML_ASSERT(ne10 == ne02);
  1808. const int nth = MIN(1024, ne00);
  1809. const int n_past = ((int32_t *) dst->op_params)[0];
  1810. const int n_dims = ((int32_t *) dst->op_params)[1];
  1811. const int mode = ((int32_t *) dst->op_params)[2];
  1812. // skip 3, n_ctx, used in GLM RoPE, unimplemented in metal
  1813. const int n_orig_ctx = ((int32_t *) dst->op_params)[4];
  1814. float freq_base, freq_scale, ext_factor, attn_factor, beta_fast, beta_slow;
  1815. memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
  1816. memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
  1817. memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
  1818. memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
  1819. memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
  1820. memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
  1821. id<MTLComputePipelineState> pipeline = nil;
  1822. switch (src0->type) {
  1823. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_F32].pipeline; break;
  1824. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ROPE_F16].pipeline; break;
  1825. default: GGML_ASSERT(false);
  1826. };
  1827. [encoder setComputePipelineState:pipeline];
  1828. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1829. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  1830. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  1831. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:3];
  1832. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:4];
  1833. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:5];
  1834. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:6];
  1835. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:7];
  1836. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:8];
  1837. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:9];
  1838. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:10];
  1839. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:11];
  1840. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:12];
  1841. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:13];
  1842. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:14];
  1843. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:15];
  1844. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:16];
  1845. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:17];
  1846. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:18];
  1847. [encoder setBytes:&n_past length:sizeof( int) atIndex:19];
  1848. [encoder setBytes:&n_dims length:sizeof( int) atIndex:20];
  1849. [encoder setBytes:&mode length:sizeof( int) atIndex:21];
  1850. [encoder setBytes:&n_orig_ctx length:sizeof( int) atIndex:22];
  1851. [encoder setBytes:&freq_base length:sizeof( float) atIndex:23];
  1852. [encoder setBytes:&freq_scale length:sizeof( float) atIndex:24];
  1853. [encoder setBytes:&ext_factor length:sizeof( float) atIndex:25];
  1854. [encoder setBytes:&attn_factor length:sizeof( float) atIndex:26];
  1855. [encoder setBytes:&beta_fast length:sizeof( float) atIndex:27];
  1856. [encoder setBytes:&beta_slow length:sizeof( float) atIndex:28];
  1857. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1858. } break;
  1859. case GGML_OP_IM2COL:
  1860. {
  1861. GGML_ASSERT(src0->type == GGML_TYPE_F16);
  1862. GGML_ASSERT(src1->type == GGML_TYPE_F32);
  1863. GGML_ASSERT( dst->type == GGML_TYPE_F16 || dst->type == GGML_TYPE_F32);
  1864. const int32_t s0 = ((const int32_t *)(dst->op_params))[0];
  1865. const int32_t s1 = ((const int32_t *)(dst->op_params))[1];
  1866. const int32_t p0 = ((const int32_t *)(dst->op_params))[2];
  1867. const int32_t p1 = ((const int32_t *)(dst->op_params))[3];
  1868. const int32_t d0 = ((const int32_t *)(dst->op_params))[4];
  1869. const int32_t d1 = ((const int32_t *)(dst->op_params))[5];
  1870. const bool is_2D = ((const int32_t *)(dst->op_params))[6] == 1;
  1871. const int32_t N = src1->ne[is_2D ? 3 : 2];
  1872. const int32_t IC = src1->ne[is_2D ? 2 : 1];
  1873. const int32_t IH = is_2D ? src1->ne[1] : 1;
  1874. const int32_t IW = src1->ne[0];
  1875. const int32_t KH = is_2D ? src0->ne[1] : 1;
  1876. const int32_t KW = src0->ne[0];
  1877. const int32_t OH = is_2D ? dst->ne[2] : 1;
  1878. const int32_t OW = dst->ne[1];
  1879. const int32_t CHW = IC * KH * KW;
  1880. const int32_t ofs0 = src1->nb[is_2D ? 3 : 2] / 4;
  1881. const int32_t ofs1 = src1->nb[is_2D ? 2 : 1] / 4;
  1882. id<MTLComputePipelineState> pipeline = nil;
  1883. switch (dst->type) {
  1884. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_IM2COL_F32].pipeline; break;
  1885. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_IM2COL_F16].pipeline; break;
  1886. default: GGML_ASSERT(false);
  1887. };
  1888. [encoder setComputePipelineState:pipeline];
  1889. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:0];
  1890. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1891. [encoder setBytes:&ofs0 length:sizeof( int32_t) atIndex:2];
  1892. [encoder setBytes:&ofs1 length:sizeof( int32_t) atIndex:3];
  1893. [encoder setBytes:&IW length:sizeof( int32_t) atIndex:4];
  1894. [encoder setBytes:&IH length:sizeof( int32_t) atIndex:5];
  1895. [encoder setBytes:&CHW length:sizeof( int32_t) atIndex:6];
  1896. [encoder setBytes:&s0 length:sizeof( int32_t) atIndex:7];
  1897. [encoder setBytes:&s1 length:sizeof( int32_t) atIndex:8];
  1898. [encoder setBytes:&p0 length:sizeof( int32_t) atIndex:9];
  1899. [encoder setBytes:&p1 length:sizeof( int32_t) atIndex:10];
  1900. [encoder setBytes:&d0 length:sizeof( int32_t) atIndex:11];
  1901. [encoder setBytes:&d1 length:sizeof( int32_t) atIndex:12];
  1902. [encoder dispatchThreadgroups:MTLSizeMake(IC, OH, OW) threadsPerThreadgroup:MTLSizeMake(N, KH, KW)];
  1903. } break;
  1904. case GGML_OP_UPSCALE:
  1905. {
  1906. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  1907. const int sf = dst->op_params[0];
  1908. const id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_UPSCALE_F32].pipeline;
  1909. [encoder setComputePipelineState:pipeline];
  1910. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1911. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1912. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
  1913. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
  1914. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
  1915. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:5];
  1916. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6];
  1917. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7];
  1918. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8];
  1919. [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:9];
  1920. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:10];
  1921. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:11];
  1922. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:12];
  1923. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:13];
  1924. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:14];
  1925. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:15];
  1926. [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:16];
  1927. [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:17];
  1928. [encoder setBytes:&sf length:sizeof(sf) atIndex:18];
  1929. const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne0);
  1930. [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1931. } break;
  1932. case GGML_OP_PAD:
  1933. {
  1934. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  1935. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_PAD_F32].pipeline;
  1936. [encoder setComputePipelineState:pipeline];
  1937. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1938. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1939. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
  1940. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
  1941. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
  1942. [encoder setBytes:&ne03 length:sizeof(ne03) atIndex:5];
  1943. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6];
  1944. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7];
  1945. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8];
  1946. [encoder setBytes:&nb03 length:sizeof(nb03) atIndex:9];
  1947. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:10];
  1948. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:11];
  1949. [encoder setBytes:&ne2 length:sizeof(ne2) atIndex:12];
  1950. [encoder setBytes:&ne3 length:sizeof(ne3) atIndex:13];
  1951. [encoder setBytes:&nb0 length:sizeof(nb0) atIndex:14];
  1952. [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:15];
  1953. [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:16];
  1954. [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:17];
  1955. const int nth = MIN(1024, ne0);
  1956. [encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1957. } break;
  1958. case GGML_OP_ARGSORT:
  1959. {
  1960. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  1961. GGML_ASSERT( dst->type == GGML_TYPE_I32);
  1962. const int nrows = ggml_nrows(src0);
  1963. enum ggml_sort_order order = (enum ggml_sort_order) dst->op_params[0];
  1964. id<MTLComputePipelineState> pipeline = nil;
  1965. switch (order) {
  1966. case GGML_SORT_ASC: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC].pipeline; break;
  1967. case GGML_SORT_DESC: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC].pipeline; break;
  1968. default: GGML_ASSERT(false);
  1969. };
  1970. [encoder setComputePipelineState:pipeline];
  1971. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1972. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1973. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  1974. [encoder dispatchThreadgroups:MTLSizeMake(1, nrows, 1) threadsPerThreadgroup:MTLSizeMake(ne00, 1, 1)];
  1975. } break;
  1976. case GGML_OP_LEAKY_RELU:
  1977. {
  1978. GGML_ASSERT(src0->type == GGML_TYPE_F32);
  1979. float slope;
  1980. memcpy(&slope, dst->op_params, sizeof(float));
  1981. id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32].pipeline;
  1982. [encoder setComputePipelineState:pipeline];
  1983. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1984. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1985. [encoder setBytes:&slope length:sizeof(slope) atIndex:2];
  1986. const int64_t n = ggml_nelements(dst);
  1987. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  1988. } break;
  1989. case GGML_OP_DUP:
  1990. case GGML_OP_CPY:
  1991. case GGML_OP_CONT:
  1992. {
  1993. GGML_ASSERT(ne00 % ggml_blck_size(src0->type) == 0);
  1994. int nth = MIN(1024, ne00/ggml_blck_size(src0->type));
  1995. id<MTLComputePipelineState> pipeline = nil;
  1996. switch (src0t) {
  1997. case GGML_TYPE_F32:
  1998. {
  1999. GGML_ASSERT(ne0 % ggml_blck_size(dst->type) == 0);
  2000. switch (dstt) {
  2001. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_F16].pipeline; break;
  2002. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_F32].pipeline; break;
  2003. case GGML_TYPE_Q8_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0].pipeline; break;
  2004. case GGML_TYPE_Q4_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_0].pipeline; break;
  2005. case GGML_TYPE_Q4_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q4_1].pipeline; break;
  2006. //case GGML_TYPE_Q5_0: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_0].pipeline; break;
  2007. //case GGML_TYPE_Q5_1: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F32_Q5_1].pipeline; break;
  2008. default: GGML_ASSERT(false && "not implemented");
  2009. };
  2010. } break;
  2011. case GGML_TYPE_F16:
  2012. {
  2013. switch (dstt) {
  2014. case GGML_TYPE_F16: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F16_F16].pipeline; break;
  2015. case GGML_TYPE_F32: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_CPY_F16_F32].pipeline; break;
  2016. default: GGML_ASSERT(false && "not implemented");
  2017. };
  2018. } break;
  2019. default: GGML_ASSERT(false && "not implemented");
  2020. }
  2021. [encoder setComputePipelineState:pipeline];
  2022. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  2023. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  2024. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  2025. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  2026. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  2027. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
  2028. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
  2029. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
  2030. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
  2031. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
  2032. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
  2033. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
  2034. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
  2035. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
  2036. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
  2037. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
  2038. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
  2039. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
  2040. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  2041. } break;
  2042. default:
  2043. {
  2044. GGML_METAL_LOG_ERROR("%s: error: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
  2045. GGML_ASSERT(false);
  2046. }
  2047. }
  2048. if (should_capture) {
  2049. [encoder popDebugGroup];
  2050. }
  2051. }
  2052. [encoder endEncoding];
  2053. [command_buffer commit];
  2054. });
  2055. // Wait for completion and check status of each command buffer
  2056. // needed to detect if the device ran out-of-memory for example (#1881)
  2057. for (int i = 0; i < n_cb; ++i) {
  2058. id<MTLCommandBuffer> command_buffer = command_buffers[i];
  2059. [command_buffer waitUntilCompleted];
  2060. MTLCommandBufferStatus status = [command_buffer status];
  2061. if (status != MTLCommandBufferStatusCompleted) {
  2062. GGML_METAL_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, i, status);
  2063. return false;
  2064. }
  2065. }
  2066. if (should_capture) {
  2067. [[MTLCaptureManager sharedCaptureManager] stopCapture];
  2068. }
  2069. }
  2070. return true;
  2071. }
  2072. ////////////////////////////////////////////////////////////////////////////////
  2073. // backend interface
  2074. // default buffer
  2075. static id<MTLDevice> g_backend_device = nil;
  2076. static int g_backend_device_ref_count = 0;
  2077. static id<MTLDevice> ggml_backend_metal_get_device(void) {
  2078. if (g_backend_device == nil) {
  2079. g_backend_device = MTLCreateSystemDefaultDevice();
  2080. }
  2081. g_backend_device_ref_count++;
  2082. return g_backend_device;
  2083. }
  2084. static void ggml_backend_metal_free_device(void) {
  2085. assert(g_backend_device_ref_count > 0);
  2086. g_backend_device_ref_count--;
  2087. if (g_backend_device_ref_count == 0) {
  2088. [g_backend_device release];
  2089. g_backend_device = nil;
  2090. }
  2091. }
  2092. GGML_CALL static const char * ggml_backend_metal_buffer_get_name(ggml_backend_buffer_t buffer) {
  2093. return "Metal";
  2094. UNUSED(buffer);
  2095. }
  2096. GGML_CALL static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer) {
  2097. struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
  2098. for (int i = 0; i < ctx->n_buffers; i++) {
  2099. [ctx->buffers[i].metal release];
  2100. }
  2101. ggml_backend_metal_free_device();
  2102. if (ctx->owned) {
  2103. free(ctx->all_data);
  2104. }
  2105. free(ctx);
  2106. }
  2107. GGML_CALL static void * ggml_backend_metal_buffer_get_base(ggml_backend_buffer_t buffer) {
  2108. struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
  2109. return ctx->all_data;
  2110. }
  2111. GGML_CALL static void ggml_backend_metal_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
  2112. memcpy((char *)tensor->data + offset, data, size);
  2113. UNUSED(buffer);
  2114. }
  2115. GGML_CALL static void ggml_backend_metal_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
  2116. memcpy(data, (const char *)tensor->data + offset, size);
  2117. UNUSED(buffer);
  2118. }
  2119. GGML_CALL static bool ggml_backend_metal_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
  2120. if (ggml_backend_buffer_is_host(src->buffer)) {
  2121. memcpy(dst->data, src->data, ggml_nbytes(src));
  2122. return true;
  2123. }
  2124. return false;
  2125. UNUSED(buffer);
  2126. }
  2127. GGML_CALL static void ggml_backend_metal_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
  2128. struct ggml_backend_metal_buffer_context * ctx = (struct ggml_backend_metal_buffer_context *)buffer->context;
  2129. memset(ctx->all_data, value, ctx->all_size);
  2130. }
  2131. static struct ggml_backend_buffer_i ggml_backend_metal_buffer_i = {
  2132. /* .get_name = */ ggml_backend_metal_buffer_get_name,
  2133. /* .free_buffer = */ ggml_backend_metal_buffer_free_buffer,
  2134. /* .get_base = */ ggml_backend_metal_buffer_get_base,
  2135. /* .init_tensor = */ NULL,
  2136. /* .set_tensor = */ ggml_backend_metal_buffer_set_tensor,
  2137. /* .get_tensor = */ ggml_backend_metal_buffer_get_tensor,
  2138. /* .cpy_tensor = */ ggml_backend_metal_buffer_cpy_tensor,
  2139. /* .clear = */ ggml_backend_metal_buffer_clear,
  2140. /* .reset = */ NULL,
  2141. };
  2142. // default buffer type
  2143. GGML_CALL static const char * ggml_backend_metal_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
  2144. return "Metal";
  2145. UNUSED(buft);
  2146. }
  2147. static void ggml_backend_metal_log_allocated_size(id<MTLDevice> device) {
  2148. #if TARGET_OS_OSX || (TARGET_OS_IOS && __clang_major__ >= 15)
  2149. if (@available(macOS 10.12, iOS 16.0, *)) {
  2150. GGML_METAL_LOG_INFO(", (%8.2f / %8.2f)",
  2151. device.currentAllocatedSize / 1024.0 / 1024.0,
  2152. device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
  2153. if (device.currentAllocatedSize > device.recommendedMaxWorkingSetSize) {
  2154. GGML_METAL_LOG_WARN("%s: warning: current allocated size is greater than the recommended max working set size\n", __func__);
  2155. } else {
  2156. GGML_METAL_LOG_INFO("\n");
  2157. }
  2158. } else {
  2159. GGML_METAL_LOG_INFO(", (%8.2f)\n", device.currentAllocatedSize / 1024.0 / 1024.0);
  2160. }
  2161. #endif
  2162. UNUSED(device);
  2163. }
  2164. GGML_CALL static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  2165. struct ggml_backend_metal_buffer_context * ctx = malloc(sizeof(struct ggml_backend_metal_buffer_context));
  2166. const size_t size_page = sysconf(_SC_PAGESIZE);
  2167. size_t size_aligned = size;
  2168. if ((size_aligned % size_page) != 0) {
  2169. size_aligned += (size_page - (size_aligned % size_page));
  2170. }
  2171. id<MTLDevice> device = ggml_backend_metal_get_device();
  2172. ctx->all_data = ggml_metal_host_malloc(size_aligned);
  2173. ctx->all_size = size_aligned;
  2174. ctx->owned = true;
  2175. ctx->n_buffers = 1;
  2176. ctx->buffers[0].data = ctx->all_data;
  2177. ctx->buffers[0].size = size;
  2178. ctx->buffers[0].metal = [device newBufferWithBytesNoCopy:ctx->all_data
  2179. length:size_aligned
  2180. options:MTLResourceStorageModeShared
  2181. deallocator:nil];
  2182. if (ctx->buffers[0].metal == nil) {
  2183. GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0);
  2184. free(ctx);
  2185. ggml_backend_metal_free_device();
  2186. return NULL;
  2187. }
  2188. GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB", __func__, size_aligned / 1024.0 / 1024.0);
  2189. ggml_backend_metal_log_allocated_size(device);
  2190. return ggml_backend_buffer_init(buft, ggml_backend_metal_buffer_i, ctx, size);
  2191. }
  2192. GGML_CALL static size_t ggml_backend_metal_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
  2193. return 32;
  2194. UNUSED(buft);
  2195. }
  2196. GGML_CALL static size_t ggml_backend_metal_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) {
  2197. id<MTLDevice> device = ggml_backend_metal_get_device();
  2198. size_t max_size = device.maxBufferLength;
  2199. ggml_backend_metal_free_device();
  2200. return max_size;
  2201. UNUSED(buft);
  2202. }
  2203. GGML_CALL static bool ggml_backend_metal_buffer_type_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) {
  2204. return ggml_backend_is_metal(backend) || ggml_backend_is_cpu(backend);
  2205. UNUSED(buft);
  2206. }
  2207. GGML_CALL static bool ggml_backend_metal_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
  2208. return true;
  2209. UNUSED(buft);
  2210. }
  2211. GGML_CALL ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void) {
  2212. static struct ggml_backend_buffer_type ggml_backend_buffer_type_metal = {
  2213. /* .iface = */ {
  2214. /* .get_name = */ ggml_backend_metal_buffer_type_get_name,
  2215. /* .alloc_buffer = */ ggml_backend_metal_buffer_type_alloc_buffer,
  2216. /* .get_alignment = */ ggml_backend_metal_buffer_type_get_alignment,
  2217. /* .get_max_size = */ ggml_backend_metal_buffer_type_get_max_size,
  2218. /* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
  2219. /* .supports_backend = */ ggml_backend_metal_buffer_type_supports_backend,
  2220. /* .is_host = */ ggml_backend_metal_buffer_type_is_host,
  2221. },
  2222. /* .context = */ NULL,
  2223. };
  2224. return &ggml_backend_buffer_type_metal;
  2225. }
  2226. // buffer from ptr
  2227. GGML_CALL ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data, size_t size, size_t max_size) {
  2228. struct ggml_backend_metal_buffer_context * ctx = malloc(sizeof(struct ggml_backend_metal_buffer_context));
  2229. ctx->all_data = data;
  2230. ctx->all_size = size;
  2231. ctx->owned = false;
  2232. ctx->n_buffers = 0;
  2233. const size_t size_page = sysconf(_SC_PAGESIZE);
  2234. // page-align the data ptr
  2235. {
  2236. const uintptr_t offs = (uintptr_t) data % size_page;
  2237. data = (void *) ((char *) data - offs);
  2238. size += offs;
  2239. }
  2240. size_t size_aligned = size;
  2241. if ((size_aligned % size_page) != 0) {
  2242. size_aligned += (size_page - (size_aligned % size_page));
  2243. }
  2244. id<MTLDevice> device = ggml_backend_metal_get_device();
  2245. // the buffer fits into the max buffer size allowed by the device
  2246. if (size_aligned <= device.maxBufferLength) {
  2247. ctx->buffers[ctx->n_buffers].data = data;
  2248. ctx->buffers[ctx->n_buffers].size = size;
  2249. ctx->buffers[ctx->n_buffers].metal = [device newBufferWithBytesNoCopy:data length:size_aligned options:MTLResourceStorageModeShared deallocator:nil];
  2250. if (ctx->buffers[ctx->n_buffers].metal == nil) {
  2251. GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0);
  2252. return false;
  2253. }
  2254. GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB", __func__, size_aligned / 1024.0 / 1024.0);
  2255. ++ctx->n_buffers;
  2256. } else {
  2257. // this overlap between the views will guarantee that the tensor with the maximum size will fully fit into
  2258. // one of the views
  2259. const size_t size_ovlp = ((max_size + size_page - 1) / size_page + 1) * size_page; // round-up 2 pages just in case
  2260. const size_t size_step = device.maxBufferLength - size_ovlp;
  2261. const size_t size_view = device.maxBufferLength;
  2262. for (size_t i = 0; i < size; i += size_step) {
  2263. const size_t size_step_aligned = (i + size_view <= size) ? size_view : (size_aligned - i);
  2264. ctx->buffers[ctx->n_buffers].data = (void *) ((uint8_t *) data + i);
  2265. ctx->buffers[ctx->n_buffers].size = size_step_aligned;
  2266. ctx->buffers[ctx->n_buffers].metal = [device newBufferWithBytesNoCopy:(void *) ((uint8_t *) data + i) length:size_step_aligned options:MTLResourceStorageModeShared deallocator:nil];
  2267. if (ctx->buffers[ctx->n_buffers].metal == nil) {
  2268. GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_step_aligned / 1024.0 / 1024.0);
  2269. return false;
  2270. }
  2271. GGML_METAL_LOG_INFO("%s: allocated buffer, size = %8.2f MiB, offs = %12ld", __func__, size_step_aligned / 1024.0 / 1024.0, i);
  2272. if (i + size_step < size) {
  2273. GGML_METAL_LOG_INFO("\n");
  2274. }
  2275. ++ctx->n_buffers;
  2276. }
  2277. }
  2278. ggml_backend_metal_log_allocated_size(device);
  2279. return ggml_backend_buffer_init(ggml_backend_metal_buffer_type(), ggml_backend_metal_buffer_i, ctx, size);
  2280. }
  2281. // backend
  2282. GGML_CALL static const char * ggml_backend_metal_name(ggml_backend_t backend) {
  2283. return "Metal";
  2284. UNUSED(backend);
  2285. }
  2286. GGML_CALL static void ggml_backend_metal_free(ggml_backend_t backend) {
  2287. struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context;
  2288. ggml_metal_free(ctx);
  2289. free(backend);
  2290. }
  2291. GGML_CALL static ggml_backend_buffer_type_t ggml_backend_metal_get_default_buffer_type(ggml_backend_t backend) {
  2292. return ggml_backend_metal_buffer_type();
  2293. UNUSED(backend);
  2294. }
  2295. GGML_CALL static bool ggml_backend_metal_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
  2296. struct ggml_metal_context * metal_ctx = (struct ggml_metal_context *)backend->context;
  2297. return ggml_metal_graph_compute(metal_ctx, cgraph);
  2298. }
  2299. GGML_CALL static bool ggml_backend_metal_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
  2300. struct ggml_metal_context * metal_ctx = (struct ggml_metal_context *)backend->context;
  2301. return ggml_metal_supports_op(metal_ctx, op);
  2302. }
  2303. static struct ggml_backend_i ggml_backend_metal_i = {
  2304. /* .get_name = */ ggml_backend_metal_name,
  2305. /* .free = */ ggml_backend_metal_free,
  2306. /* .get_default_buffer_type = */ ggml_backend_metal_get_default_buffer_type,
  2307. /* .set_tensor_async = */ NULL,
  2308. /* .get_tensor_async = */ NULL,
  2309. /* .cpy_tensor_async = */ NULL,
  2310. /* .synchronize = */ NULL,
  2311. /* .graph_plan_create = */ NULL,
  2312. /* .graph_plan_free = */ NULL,
  2313. /* .graph_plan_compute = */ NULL,
  2314. /* .graph_compute = */ ggml_backend_metal_graph_compute,
  2315. /* .supports_op = */ ggml_backend_metal_supports_op,
  2316. };
  2317. void ggml_backend_metal_log_set_callback(ggml_log_callback log_callback, void * user_data) {
  2318. ggml_metal_log_callback = log_callback;
  2319. ggml_metal_log_user_data = user_data;
  2320. }
  2321. ggml_backend_t ggml_backend_metal_init(void) {
  2322. struct ggml_metal_context * ctx = ggml_metal_init(GGML_DEFAULT_N_THREADS);
  2323. if (ctx == NULL) {
  2324. return NULL;
  2325. }
  2326. ggml_backend_t metal_backend = malloc(sizeof(struct ggml_backend));
  2327. *metal_backend = (struct ggml_backend) {
  2328. /* .interface = */ ggml_backend_metal_i,
  2329. /* .context = */ ctx,
  2330. };
  2331. return metal_backend;
  2332. }
  2333. bool ggml_backend_is_metal(ggml_backend_t backend) {
  2334. return backend && backend->iface.get_name == ggml_backend_metal_name;
  2335. }
  2336. void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb) {
  2337. GGML_ASSERT(ggml_backend_is_metal(backend));
  2338. struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context;
  2339. ctx->n_cb = MIN(n_cb, GGML_METAL_MAX_BUFFERS);
  2340. }
  2341. bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family) {
  2342. GGML_ASSERT(ggml_backend_is_metal(backend));
  2343. struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context;
  2344. return [ctx->device supportsFamily:(MTLGPUFamilyApple1 + family - 1)];
  2345. }
  2346. void ggml_backend_metal_capture_next_compute(ggml_backend_t backend) {
  2347. GGML_ASSERT(ggml_backend_is_metal(backend));
  2348. struct ggml_metal_context * ctx = (struct ggml_metal_context *)backend->context;
  2349. ctx->should_capture_next_compute = true;
  2350. }
  2351. GGML_CALL ggml_backend_t ggml_backend_reg_metal_init(const char * params, void * user_data); // silence warning
  2352. GGML_CALL ggml_backend_t ggml_backend_reg_metal_init(const char * params, void * user_data) {
  2353. return ggml_backend_metal_init();
  2354. GGML_UNUSED(params);
  2355. GGML_UNUSED(user_data);
  2356. }