ggml-metal.m 64 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295
  1. #import "ggml-metal.h"
  2. #import "ggml.h"
  3. #import <Foundation/Foundation.h>
  4. #import <Metal/Metal.h>
  5. #undef MIN
  6. #undef MAX
  7. #define MIN(a, b) ((a) < (b) ? (a) : (b))
  8. #define MAX(a, b) ((a) > (b) ? (a) : (b))
  9. // TODO: temporary - reuse llama.cpp logging
  10. #ifdef GGML_METAL_NDEBUG
  11. #define metal_printf(...)
  12. #else
  13. #define metal_printf(...) fprintf(stderr, __VA_ARGS__)
  14. #endif
  15. #define UNUSED(x) (void)(x)
  16. #define GGML_MAX_CONCUR (2*GGML_MAX_NODES)
  17. struct ggml_metal_buffer {
  18. const char * name;
  19. void * data;
  20. size_t size;
  21. id<MTLBuffer> metal;
  22. };
  23. struct ggml_metal_context {
  24. int n_cb;
  25. id<MTLDevice> device;
  26. id<MTLCommandQueue> queue;
  27. id<MTLLibrary> library;
  28. id<MTLCommandBuffer> command_buffers [GGML_METAL_MAX_COMMAND_BUFFERS];
  29. id<MTLComputeCommandEncoder> command_encoders[GGML_METAL_MAX_COMMAND_BUFFERS];
  30. dispatch_queue_t d_queue;
  31. int n_buffers;
  32. struct ggml_metal_buffer buffers[GGML_METAL_MAX_BUFFERS];
  33. int concur_list[GGML_MAX_CONCUR];
  34. int concur_list_len;
  35. // custom kernels
  36. #define GGML_METAL_DECL_KERNEL(name) \
  37. id<MTLFunction> function_##name; \
  38. id<MTLComputePipelineState> pipeline_##name
  39. GGML_METAL_DECL_KERNEL(add);
  40. GGML_METAL_DECL_KERNEL(add_row); // TODO: avoid this extra kernel, instead extend the "add" kernel to support broadcast
  41. GGML_METAL_DECL_KERNEL(mul);
  42. GGML_METAL_DECL_KERNEL(mul_row); // TODO: avoid this extra kernel, instead extend the "mul" kernel to support broadcast
  43. GGML_METAL_DECL_KERNEL(scale);
  44. GGML_METAL_DECL_KERNEL(silu);
  45. GGML_METAL_DECL_KERNEL(relu);
  46. GGML_METAL_DECL_KERNEL(gelu);
  47. GGML_METAL_DECL_KERNEL(soft_max);
  48. GGML_METAL_DECL_KERNEL(soft_max_4);
  49. GGML_METAL_DECL_KERNEL(diag_mask_inf);
  50. GGML_METAL_DECL_KERNEL(diag_mask_inf_8);
  51. GGML_METAL_DECL_KERNEL(get_rows_f32);
  52. GGML_METAL_DECL_KERNEL(get_rows_f16);
  53. GGML_METAL_DECL_KERNEL(get_rows_q4_0);
  54. GGML_METAL_DECL_KERNEL(get_rows_q4_1);
  55. GGML_METAL_DECL_KERNEL(get_rows_q8_0);
  56. GGML_METAL_DECL_KERNEL(get_rows_q2_K);
  57. GGML_METAL_DECL_KERNEL(get_rows_q3_K);
  58. GGML_METAL_DECL_KERNEL(get_rows_q4_K);
  59. GGML_METAL_DECL_KERNEL(get_rows_q5_K);
  60. GGML_METAL_DECL_KERNEL(get_rows_q6_K);
  61. GGML_METAL_DECL_KERNEL(rms_norm);
  62. GGML_METAL_DECL_KERNEL(norm);
  63. GGML_METAL_DECL_KERNEL(mul_mat_f32_f32);
  64. GGML_METAL_DECL_KERNEL(mul_mat_f16_f32);
  65. GGML_METAL_DECL_KERNEL(mul_mat_f16_f32_1row);
  66. GGML_METAL_DECL_KERNEL(mul_mat_f16_f32_l4);
  67. GGML_METAL_DECL_KERNEL(mul_mat_q4_0_f32);
  68. GGML_METAL_DECL_KERNEL(mul_mat_q4_1_f32);
  69. GGML_METAL_DECL_KERNEL(mul_mat_q8_0_f32);
  70. GGML_METAL_DECL_KERNEL(mul_mat_q2_K_f32);
  71. GGML_METAL_DECL_KERNEL(mul_mat_q3_K_f32);
  72. GGML_METAL_DECL_KERNEL(mul_mat_q4_K_f32);
  73. GGML_METAL_DECL_KERNEL(mul_mat_q5_K_f32);
  74. GGML_METAL_DECL_KERNEL(mul_mat_q6_K_f32);
  75. GGML_METAL_DECL_KERNEL(mul_mm_f32_f32);
  76. GGML_METAL_DECL_KERNEL(mul_mm_f16_f32);
  77. GGML_METAL_DECL_KERNEL(mul_mm_q4_0_f32);
  78. GGML_METAL_DECL_KERNEL(mul_mm_q4_1_f32);
  79. GGML_METAL_DECL_KERNEL(mul_mm_q8_0_f32);
  80. GGML_METAL_DECL_KERNEL(mul_mm_q2_K_f32);
  81. GGML_METAL_DECL_KERNEL(mul_mm_q3_K_f32);
  82. GGML_METAL_DECL_KERNEL(mul_mm_q4_K_f32);
  83. GGML_METAL_DECL_KERNEL(mul_mm_q5_K_f32);
  84. GGML_METAL_DECL_KERNEL(mul_mm_q6_K_f32);
  85. GGML_METAL_DECL_KERNEL(rope);
  86. GGML_METAL_DECL_KERNEL(alibi_f32);
  87. GGML_METAL_DECL_KERNEL(cpy_f32_f16);
  88. GGML_METAL_DECL_KERNEL(cpy_f32_f32);
  89. GGML_METAL_DECL_KERNEL(cpy_f16_f16);
  90. #undef GGML_METAL_DECL_KERNEL
  91. };
  92. // MSL code
  93. // TODO: move the contents here when ready
  94. // for now it is easier to work in a separate file
  95. static NSString * const msl_library_source = @"see metal.metal";
  96. // Here to assist with NSBundle Path Hack
  97. @interface GGMLMetalClass : NSObject
  98. @end
  99. @implementation GGMLMetalClass
  100. @end
  101. struct ggml_metal_context * ggml_metal_init(int n_cb) {
  102. metal_printf("%s: allocating\n", __func__);
  103. id <MTLDevice> device;
  104. NSString * s;
  105. #if TARGET_OS_OSX
  106. // Show all the Metal device instances in the system
  107. NSArray * devices = MTLCopyAllDevices();
  108. for (device in devices) {
  109. s = [device name];
  110. metal_printf("%s: found device: %s\n", __func__, [s UTF8String]);
  111. }
  112. #endif
  113. // Pick and show default Metal device
  114. device = MTLCreateSystemDefaultDevice();
  115. s = [device name];
  116. metal_printf("%s: picking default device: %s\n", __func__, [s UTF8String]);
  117. // Configure context
  118. struct ggml_metal_context * ctx = malloc(sizeof(struct ggml_metal_context));
  119. ctx->device = device;
  120. ctx->n_cb = MIN(n_cb, GGML_METAL_MAX_BUFFERS);
  121. ctx->queue = [ctx->device newCommandQueue];
  122. ctx->n_buffers = 0;
  123. ctx->concur_list_len = 0;
  124. ctx->d_queue = dispatch_queue_create("ggml-metal", DISPATCH_QUEUE_CONCURRENT);
  125. #ifdef GGML_SWIFT
  126. // load the default.metallib file
  127. {
  128. NSError * error = nil;
  129. NSBundle * bundle = [NSBundle bundleForClass:[GGMLMetalClass class]];
  130. NSString * llamaBundlePath = [bundle pathForResource:@"llama_llama" ofType:@"bundle"];
  131. NSBundle * llamaBundle = [NSBundle bundleWithPath:llamaBundlePath];
  132. NSString * libPath = [llamaBundle pathForResource:@"default" ofType:@"metallib"];
  133. NSURL * libURL = [NSURL fileURLWithPath:libPath];
  134. // Load the metallib file into a Metal library
  135. ctx->library = [ctx->device newLibraryWithURL:libURL error:&error];
  136. if (error) {
  137. metal_printf("%s: error: %s\n", __func__, [[error description] UTF8String]);
  138. return NULL;
  139. }
  140. }
  141. #else
  142. UNUSED(msl_library_source);
  143. // read the source from "ggml-metal.metal" into a string and use newLibraryWithSource
  144. {
  145. NSError * error = nil;
  146. //NSString * path = [[NSBundle mainBundle] pathForResource:@"../../examples/metal/metal" ofType:@"metal"];
  147. NSBundle * bundle = [NSBundle bundleForClass:[GGMLMetalClass class]];
  148. NSString * path = [bundle pathForResource:@"ggml-metal" ofType:@"metal"];
  149. metal_printf("%s: loading '%s'\n", __func__, [path UTF8String]);
  150. NSString * src = [NSString stringWithContentsOfFile:path encoding:NSUTF8StringEncoding error:&error];
  151. if (error) {
  152. metal_printf("%s: error: %s\n", __func__, [[error description] UTF8String]);
  153. return NULL;
  154. }
  155. #ifdef GGML_QKK_64
  156. MTLCompileOptions* options = [MTLCompileOptions new];
  157. options.preprocessorMacros = @{ @"QK_K" : @(64) };
  158. ctx->library = [ctx->device newLibraryWithSource:src options:options error:&error];
  159. #else
  160. ctx->library = [ctx->device newLibraryWithSource:src options:nil error:&error];
  161. #endif
  162. if (error) {
  163. metal_printf("%s: error: %s\n", __func__, [[error description] UTF8String]);
  164. return NULL;
  165. }
  166. }
  167. #endif
  168. // load kernels
  169. {
  170. NSError * error = nil;
  171. #define GGML_METAL_ADD_KERNEL(name) \
  172. ctx->function_##name = [ctx->library newFunctionWithName:@"kernel_"#name]; \
  173. ctx->pipeline_##name = [ctx->device newComputePipelineStateWithFunction:ctx->function_##name error:&error]; \
  174. metal_printf("%s: loaded %-32s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) ctx->pipeline_##name, \
  175. (int) ctx->pipeline_##name.maxTotalThreadsPerThreadgroup, \
  176. (int) ctx->pipeline_##name.threadExecutionWidth); \
  177. if (error) { \
  178. metal_printf("%s: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \
  179. return NULL; \
  180. }
  181. GGML_METAL_ADD_KERNEL(add);
  182. GGML_METAL_ADD_KERNEL(add_row);
  183. GGML_METAL_ADD_KERNEL(mul);
  184. GGML_METAL_ADD_KERNEL(mul_row);
  185. GGML_METAL_ADD_KERNEL(scale);
  186. GGML_METAL_ADD_KERNEL(silu);
  187. GGML_METAL_ADD_KERNEL(relu);
  188. GGML_METAL_ADD_KERNEL(gelu);
  189. GGML_METAL_ADD_KERNEL(soft_max);
  190. GGML_METAL_ADD_KERNEL(soft_max_4);
  191. GGML_METAL_ADD_KERNEL(diag_mask_inf);
  192. GGML_METAL_ADD_KERNEL(diag_mask_inf_8);
  193. GGML_METAL_ADD_KERNEL(get_rows_f32);
  194. GGML_METAL_ADD_KERNEL(get_rows_f16);
  195. GGML_METAL_ADD_KERNEL(get_rows_q4_0);
  196. GGML_METAL_ADD_KERNEL(get_rows_q4_1);
  197. GGML_METAL_ADD_KERNEL(get_rows_q8_0);
  198. GGML_METAL_ADD_KERNEL(get_rows_q2_K);
  199. GGML_METAL_ADD_KERNEL(get_rows_q3_K);
  200. GGML_METAL_ADD_KERNEL(get_rows_q4_K);
  201. GGML_METAL_ADD_KERNEL(get_rows_q5_K);
  202. GGML_METAL_ADD_KERNEL(get_rows_q6_K);
  203. GGML_METAL_ADD_KERNEL(rms_norm);
  204. GGML_METAL_ADD_KERNEL(norm);
  205. GGML_METAL_ADD_KERNEL(mul_mat_f32_f32);
  206. GGML_METAL_ADD_KERNEL(mul_mat_f16_f32);
  207. GGML_METAL_ADD_KERNEL(mul_mat_f16_f32_1row);
  208. GGML_METAL_ADD_KERNEL(mul_mat_f16_f32_l4);
  209. GGML_METAL_ADD_KERNEL(mul_mat_q4_0_f32);
  210. GGML_METAL_ADD_KERNEL(mul_mat_q4_1_f32);
  211. GGML_METAL_ADD_KERNEL(mul_mat_q8_0_f32);
  212. GGML_METAL_ADD_KERNEL(mul_mat_q2_K_f32);
  213. GGML_METAL_ADD_KERNEL(mul_mat_q3_K_f32);
  214. GGML_METAL_ADD_KERNEL(mul_mat_q4_K_f32);
  215. GGML_METAL_ADD_KERNEL(mul_mat_q5_K_f32);
  216. GGML_METAL_ADD_KERNEL(mul_mat_q6_K_f32);
  217. GGML_METAL_ADD_KERNEL(mul_mm_f32_f32);
  218. GGML_METAL_ADD_KERNEL(mul_mm_f16_f32);
  219. GGML_METAL_ADD_KERNEL(mul_mm_q4_0_f32);
  220. GGML_METAL_ADD_KERNEL(mul_mm_q8_0_f32);
  221. GGML_METAL_ADD_KERNEL(mul_mm_q4_1_f32);
  222. GGML_METAL_ADD_KERNEL(mul_mm_q2_K_f32);
  223. GGML_METAL_ADD_KERNEL(mul_mm_q3_K_f32);
  224. GGML_METAL_ADD_KERNEL(mul_mm_q4_K_f32);
  225. GGML_METAL_ADD_KERNEL(mul_mm_q5_K_f32);
  226. GGML_METAL_ADD_KERNEL(mul_mm_q6_K_f32);
  227. GGML_METAL_ADD_KERNEL(rope);
  228. GGML_METAL_ADD_KERNEL(alibi_f32);
  229. GGML_METAL_ADD_KERNEL(cpy_f32_f16);
  230. GGML_METAL_ADD_KERNEL(cpy_f32_f32);
  231. GGML_METAL_ADD_KERNEL(cpy_f16_f16);
  232. #undef GGML_METAL_ADD_KERNEL
  233. }
  234. metal_printf("%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false");
  235. #if TARGET_OS_OSX
  236. metal_printf("%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
  237. if (ctx->device.maxTransferRate != 0) {
  238. metal_printf("%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1024.0 / 1024.0);
  239. } else {
  240. metal_printf("%s: maxTransferRate = built-in GPU\n", __func__);
  241. }
  242. #endif
  243. return ctx;
  244. }
  245. void ggml_metal_free(struct ggml_metal_context * ctx) {
  246. metal_printf("%s: deallocating\n", __func__);
  247. #define GGML_METAL_DEL_KERNEL(name) \
  248. [ctx->function_##name release]; \
  249. [ctx->pipeline_##name release];
  250. GGML_METAL_DEL_KERNEL(add);
  251. GGML_METAL_DEL_KERNEL(add_row);
  252. GGML_METAL_DEL_KERNEL(mul);
  253. GGML_METAL_DEL_KERNEL(mul_row);
  254. GGML_METAL_DEL_KERNEL(scale);
  255. GGML_METAL_DEL_KERNEL(silu);
  256. GGML_METAL_DEL_KERNEL(relu);
  257. GGML_METAL_DEL_KERNEL(gelu);
  258. GGML_METAL_DEL_KERNEL(soft_max);
  259. GGML_METAL_DEL_KERNEL(soft_max_4);
  260. GGML_METAL_DEL_KERNEL(diag_mask_inf);
  261. GGML_METAL_DEL_KERNEL(diag_mask_inf_8);
  262. GGML_METAL_DEL_KERNEL(get_rows_f32);
  263. GGML_METAL_DEL_KERNEL(get_rows_f16);
  264. GGML_METAL_DEL_KERNEL(get_rows_q4_0);
  265. GGML_METAL_DEL_KERNEL(get_rows_q4_1);
  266. GGML_METAL_DEL_KERNEL(get_rows_q8_0);
  267. GGML_METAL_DEL_KERNEL(get_rows_q2_K);
  268. GGML_METAL_DEL_KERNEL(get_rows_q3_K);
  269. GGML_METAL_DEL_KERNEL(get_rows_q4_K);
  270. GGML_METAL_DEL_KERNEL(get_rows_q5_K);
  271. GGML_METAL_DEL_KERNEL(get_rows_q6_K);
  272. GGML_METAL_DEL_KERNEL(rms_norm);
  273. GGML_METAL_DEL_KERNEL(norm);
  274. GGML_METAL_DEL_KERNEL(mul_mat_f32_f32);
  275. GGML_METAL_DEL_KERNEL(mul_mat_f16_f32);
  276. GGML_METAL_DEL_KERNEL(mul_mat_f16_f32_1row);
  277. GGML_METAL_DEL_KERNEL(mul_mat_f16_f32_l4);
  278. GGML_METAL_DEL_KERNEL(mul_mat_q4_0_f32);
  279. GGML_METAL_DEL_KERNEL(mul_mat_q4_1_f32);
  280. GGML_METAL_DEL_KERNEL(mul_mat_q8_0_f32);
  281. GGML_METAL_DEL_KERNEL(mul_mat_q2_K_f32);
  282. GGML_METAL_DEL_KERNEL(mul_mat_q3_K_f32);
  283. GGML_METAL_DEL_KERNEL(mul_mat_q4_K_f32);
  284. GGML_METAL_DEL_KERNEL(mul_mat_q5_K_f32);
  285. GGML_METAL_DEL_KERNEL(mul_mat_q6_K_f32);
  286. GGML_METAL_DEL_KERNEL(mul_mm_f32_f32);
  287. GGML_METAL_DEL_KERNEL(mul_mm_f16_f32);
  288. GGML_METAL_DEL_KERNEL(mul_mm_q4_0_f32);
  289. GGML_METAL_DEL_KERNEL(mul_mm_q8_0_f32);
  290. GGML_METAL_DEL_KERNEL(mul_mm_q4_1_f32);
  291. GGML_METAL_DEL_KERNEL(mul_mm_q2_K_f32);
  292. GGML_METAL_DEL_KERNEL(mul_mm_q3_K_f32);
  293. GGML_METAL_DEL_KERNEL(mul_mm_q4_K_f32);
  294. GGML_METAL_DEL_KERNEL(mul_mm_q5_K_f32);
  295. GGML_METAL_DEL_KERNEL(mul_mm_q6_K_f32);
  296. GGML_METAL_DEL_KERNEL(rope);
  297. GGML_METAL_DEL_KERNEL(alibi_f32);
  298. GGML_METAL_DEL_KERNEL(cpy_f32_f16);
  299. GGML_METAL_DEL_KERNEL(cpy_f32_f32);
  300. GGML_METAL_DEL_KERNEL(cpy_f16_f16);
  301. #undef GGML_METAL_DEL_KERNEL
  302. for (int i = 0; i < ctx->n_buffers; ++i) {
  303. [ctx->buffers[i].metal release];
  304. }
  305. [ctx->library release];
  306. [ctx->queue release];
  307. [ctx->device release];
  308. dispatch_release(ctx->d_queue);
  309. free(ctx);
  310. }
  311. void * ggml_metal_host_malloc(size_t n) {
  312. void * data = NULL;
  313. const int result = posix_memalign((void **) &data, sysconf(_SC_PAGESIZE), n);
  314. if (result != 0) {
  315. metal_printf("%s: error: posix_memalign failed\n", __func__);
  316. return NULL;
  317. }
  318. return data;
  319. }
  320. void ggml_metal_host_free(void * data) {
  321. free(data);
  322. }
  323. void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb) {
  324. ctx->n_cb = MIN(n_cb, GGML_METAL_MAX_BUFFERS);
  325. }
  326. int ggml_metal_if_optimized(struct ggml_metal_context * ctx) {
  327. return ctx->concur_list_len;
  328. }
  329. int * ggml_metal_get_concur_list(struct ggml_metal_context * ctx) {
  330. return ctx->concur_list;
  331. }
  332. // finds the Metal buffer that contains the tensor data on the GPU device
  333. // the assumption is that there is 1-to-1 mapping between the host and device memory buffers, so we can find the
  334. // Metal buffer based on the host memory pointer
  335. //
  336. static id<MTLBuffer> ggml_metal_get_buffer(struct ggml_metal_context * ctx, struct ggml_tensor * t, size_t * offs) {
  337. //metal_printf("%s: data tensor '%16s', offs_data = %8ld, offs_eval = %8ld, offs_cach = %8ld\n", __func__, t->name, offs_data, offs_eval, offs_cach);
  338. const int64_t tsize = ggml_nbytes(t);
  339. // find the view that contains the tensor fully
  340. for (int i = 0; i < ctx->n_buffers; ++i) {
  341. const int64_t ioffs = (int64_t) t->data - (int64_t) ctx->buffers[i].data;
  342. //metal_printf("ioffs = %10ld, tsize = %10ld, sum = %10ld, ctx->buffers[%d].size = %10ld, name = %s\n", ioffs, tsize, ioffs + tsize, i, ctx->buffers[i].size, ctx->buffers[i].name);
  343. if (ioffs >= 0 && ioffs + tsize <= (int64_t) ctx->buffers[i].size) {
  344. *offs = (size_t) ioffs;
  345. //metal_printf("%s: '%s' tensor '%16s', offs = %8ld\n", __func__, ctx->buffers[i].name, t->name, *offs);
  346. return ctx->buffers[i].metal;
  347. }
  348. }
  349. metal_printf("%s: error: buffer is nil\n", __func__);
  350. return nil;
  351. }
  352. bool ggml_metal_add_buffer(
  353. struct ggml_metal_context * ctx,
  354. const char * name,
  355. void * data,
  356. size_t size,
  357. size_t max_size) {
  358. if (ctx->n_buffers >= GGML_METAL_MAX_BUFFERS) {
  359. metal_printf("%s: too many buffers\n", __func__);
  360. return false;
  361. }
  362. if (data) {
  363. // verify that the buffer does not overlap with any of the existing buffers
  364. for (int i = 0; i < ctx->n_buffers; ++i) {
  365. const int64_t ioffs = (int64_t) data - (int64_t) ctx->buffers[i].data;
  366. if (ioffs >= 0 && ioffs < (int64_t) ctx->buffers[i].size) {
  367. metal_printf("%s: error: buffer '%s' overlaps with '%s'\n", __func__, name, ctx->buffers[i].name);
  368. return false;
  369. }
  370. }
  371. const size_t size_page = sysconf(_SC_PAGESIZE);
  372. size_t size_aligned = size;
  373. if ((size_aligned % size_page) != 0) {
  374. size_aligned += (size_page - (size_aligned % size_page));
  375. }
  376. // the buffer fits into the max buffer size allowed by the device
  377. if (size_aligned <= ctx->device.maxBufferLength) {
  378. ctx->buffers[ctx->n_buffers].name = name;
  379. ctx->buffers[ctx->n_buffers].data = data;
  380. ctx->buffers[ctx->n_buffers].size = size;
  381. ctx->buffers[ctx->n_buffers].metal = [ctx->device newBufferWithBytesNoCopy:data length:size_aligned options:MTLResourceStorageModeShared deallocator:nil];
  382. if (ctx->buffers[ctx->n_buffers].metal == nil) {
  383. metal_printf("%s: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_aligned / 1024.0 / 1024.0);
  384. return false;
  385. }
  386. metal_printf("%s: allocated '%-16s' buffer, size = %8.2f MB", __func__, name, size_aligned / 1024.0 / 1024.0);
  387. ++ctx->n_buffers;
  388. } else {
  389. // this overlap between the views will guarantee that the tensor with the maximum size will fully fit into
  390. // one of the views
  391. const size_t size_ovlp = ((max_size + size_page - 1) / size_page + 1) * size_page; // round-up 2 pages just in case
  392. const size_t size_step = ctx->device.maxBufferLength - size_ovlp;
  393. const size_t size_view = ctx->device.maxBufferLength;
  394. for (size_t i = 0; i < size; i += size_step) {
  395. const size_t size_step_aligned = (i + size_view <= size) ? size_view : (size_aligned - i);
  396. ctx->buffers[ctx->n_buffers].name = name;
  397. ctx->buffers[ctx->n_buffers].data = (void *) ((uint8_t *) data + i);
  398. ctx->buffers[ctx->n_buffers].size = size_step_aligned;
  399. ctx->buffers[ctx->n_buffers].metal = [ctx->device newBufferWithBytesNoCopy:(void *) ((uint8_t *) data + i) length:size_step_aligned options:MTLResourceStorageModeShared deallocator:nil];
  400. if (ctx->buffers[ctx->n_buffers].metal == nil) {
  401. metal_printf("%s: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_step_aligned / 1024.0 / 1024.0);
  402. return false;
  403. }
  404. metal_printf("%s: allocated '%-16s' buffer, size = %8.2f MB, offs = %12ld", __func__, name, size_step_aligned / 1024.0 / 1024.0, i);
  405. if (i + size_step < size) {
  406. metal_printf("\n");
  407. }
  408. ++ctx->n_buffers;
  409. }
  410. }
  411. #if TARGET_OS_OSX
  412. metal_printf(", (%8.2f / %8.2f)",
  413. ctx->device.currentAllocatedSize / 1024.0 / 1024.0,
  414. ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
  415. if (ctx->device.currentAllocatedSize > ctx->device.recommendedMaxWorkingSetSize) {
  416. metal_printf(", warning: current allocated size is greater than the recommended max working set size\n");
  417. } else {
  418. metal_printf("\n");
  419. }
  420. #else
  421. metal_printf(", (%8.2f)\n", ctx->device.currentAllocatedSize / 1024.0 / 1024.0);
  422. #endif
  423. }
  424. return true;
  425. }
  426. void ggml_metal_set_tensor(
  427. struct ggml_metal_context * ctx,
  428. struct ggml_tensor * t) {
  429. size_t offs;
  430. id<MTLBuffer> id_dst = ggml_metal_get_buffer(ctx, t, &offs);
  431. memcpy((void *) ((uint8_t *) id_dst.contents + offs), t->data, ggml_nbytes(t));
  432. }
  433. void ggml_metal_get_tensor(
  434. struct ggml_metal_context * ctx,
  435. struct ggml_tensor * t) {
  436. size_t offs;
  437. id<MTLBuffer> id_src = ggml_metal_get_buffer(ctx, t, &offs);
  438. memcpy(t->data, (void *) ((uint8_t *) id_src.contents + offs), ggml_nbytes(t));
  439. }
  440. void ggml_metal_graph_find_concurrency(
  441. struct ggml_metal_context * ctx,
  442. struct ggml_cgraph * gf, bool check_mem) {
  443. int search_depth = gf->n_nodes; //we only find concurrency in this range to avoid wasting too much time
  444. int nodes_unused[GGML_MAX_CONCUR];
  445. for (int i = 0; i < GGML_MAX_CONCUR; i++) { ctx->concur_list[i] = 0; }
  446. for (int i = 0; i < gf->n_nodes; i++) { nodes_unused[i] = 1; }
  447. ctx->concur_list_len = 0;
  448. int n_left = gf->n_nodes;
  449. int n_start = 0; // all nodes before n_start at nodes_unused array have been sorted and store back to ctx->concur_list
  450. int level_pos = 0; // at ctx->concur_list, the last layer (level) ends at level_pos
  451. while (n_left > 0) {
  452. // number of nodes at a layer (that can be issued concurrently)
  453. int concurrency = 0;
  454. for (int i = n_start; i < ((n_start + search_depth > gf->n_nodes) ? gf->n_nodes : n_start + search_depth); i++) {
  455. if (nodes_unused[i]) {
  456. // if the requirements for gf->nodes[i] are satisfied
  457. int exe_flag = 1;
  458. // scan all srcs
  459. for (int src_ind = 0; src_ind < GGML_MAX_SRC; src_ind++) {
  460. struct ggml_tensor * src_cur = gf->nodes[i]->src[src_ind];
  461. if (src_cur) {
  462. // if is leaf nodes it's satisfied.
  463. // TODO: ggml_is_leaf()
  464. if (src_cur->op == GGML_OP_NONE && src_cur->grad == NULL) {
  465. continue;
  466. }
  467. // otherwise this src should be the output from previous nodes.
  468. int is_found = 0;
  469. // scan 2*search_depth back because we inserted barrier.
  470. //for (int j = ((level_pos - 2*search_depth) < 0 ? 0 : (level_pos - 2*search_depth)); j < level_pos; j++) {
  471. for (int j = MAX(0, level_pos - 2*search_depth); j < level_pos; j++) {
  472. if (ctx->concur_list[j] >= 0 && gf->nodes[ctx->concur_list[j]] == src_cur) {
  473. is_found = 1;
  474. break;
  475. }
  476. }
  477. if (is_found == 0) {
  478. exe_flag = 0;
  479. break;
  480. }
  481. }
  482. }
  483. if (exe_flag && check_mem) {
  484. // check if nodes[i]'s data will be overwritten by a node before nodes[i].
  485. // if node[5] and node[3] write to the same memory region, then we can't issue node[5] before node[3]
  486. int64_t data_start = (int64_t) gf->nodes[i]->data;
  487. int64_t length = (int64_t) ggml_nbytes(gf->nodes[i]);
  488. for (int j = n_start; j < i; j++) {
  489. if (nodes_unused[j] && gf->nodes[j]->op != GGML_OP_RESHAPE \
  490. && gf->nodes[j]->op != GGML_OP_VIEW \
  491. && gf->nodes[j]->op != GGML_OP_TRANSPOSE \
  492. && gf->nodes[j]->op != GGML_OP_PERMUTE) {
  493. if (((int64_t)gf->nodes[j]->data) >= data_start + length || \
  494. ((int64_t)gf->nodes[j]->data) + (int64_t) ggml_nbytes(gf->nodes[j]) <= data_start) {
  495. continue;
  496. }
  497. exe_flag = 0;
  498. }
  499. }
  500. }
  501. if (exe_flag) {
  502. ctx->concur_list[level_pos + concurrency] = i;
  503. nodes_unused[i] = 0;
  504. concurrency++;
  505. ctx->concur_list_len++;
  506. }
  507. }
  508. }
  509. n_left -= concurrency;
  510. // adding a barrier different layer
  511. ctx->concur_list[level_pos + concurrency] = -1;
  512. ctx->concur_list_len++;
  513. // jump all sorted nodes at nodes_bak
  514. while (!nodes_unused[n_start]) {
  515. n_start++;
  516. }
  517. level_pos += concurrency + 1;
  518. }
  519. if (ctx->concur_list_len > GGML_MAX_CONCUR) {
  520. metal_printf("%s: too many elements for metal ctx->concur_list!\n", __func__);
  521. }
  522. }
  523. void ggml_metal_graph_compute(
  524. struct ggml_metal_context * ctx,
  525. struct ggml_cgraph * gf) {
  526. @autoreleasepool {
  527. // if there is ctx->concur_list, dispatch concurrently
  528. // else fallback to serial dispatch
  529. MTLComputePassDescriptor * edesc = MTLComputePassDescriptor.computePassDescriptor;
  530. const bool has_concur = ctx->concur_list_len && ctx->concur_list_len <= GGML_MAX_CONCUR;
  531. const int n_nodes = has_concur ? ctx->concur_list_len : gf->n_nodes;
  532. edesc.dispatchType = has_concur ? MTLDispatchTypeConcurrent : MTLDispatchTypeSerial;
  533. // create multiple command buffers and enqueue them
  534. // then, we encode the graph into the command buffers in parallel
  535. const int n_cb = ctx->n_cb;
  536. for (int i = 0; i < n_cb; ++i) {
  537. ctx->command_buffers[i] = [ctx->queue commandBuffer];
  538. // enqueue the command buffers in order to specify their execution order
  539. [ctx->command_buffers[i] enqueue];
  540. ctx->command_encoders[i] = [ctx->command_buffers[i] computeCommandEncoderWithDescriptor: edesc];
  541. }
  542. for (int cb_idx = 0; cb_idx < n_cb; ++cb_idx) {
  543. const int n_nodes_per_cb = (n_nodes + n_cb - 1) / n_cb;
  544. dispatch_async(ctx->d_queue, ^{
  545. size_t offs_src0 = 0;
  546. size_t offs_src1 = 0;
  547. size_t offs_dst = 0;
  548. id<MTLCommandBuffer> command_buffer = ctx->command_buffers[cb_idx];
  549. id<MTLComputeCommandEncoder> encoder = ctx->command_encoders[cb_idx];
  550. const int node_start = (cb_idx + 0) * n_nodes_per_cb;
  551. const int node_end = MIN((cb_idx == n_cb - 1) ? n_nodes : (cb_idx + 1) * n_nodes_per_cb, n_nodes);
  552. for (int ind = node_start; ind < node_end; ++ind) {
  553. const int i = has_concur ? ctx->concur_list[ind] : ind;
  554. if (i == -1) {
  555. [encoder memoryBarrierWithScope:MTLBarrierScopeBuffers];
  556. continue;
  557. }
  558. //metal_printf("%s: encoding node %3d, op = %8s\n", __func__, i, ggml_op_name(gf->nodes[i]->op));
  559. struct ggml_tensor * src0 = gf->nodes[i]->src[0];
  560. struct ggml_tensor * src1 = gf->nodes[i]->src[1];
  561. struct ggml_tensor * dst = gf->nodes[i];
  562. const int64_t ne00 = src0 ? src0->ne[0] : 0;
  563. const int64_t ne01 = src0 ? src0->ne[1] : 0;
  564. const int64_t ne02 = src0 ? src0->ne[2] : 0;
  565. const int64_t ne03 = src0 ? src0->ne[3] : 0;
  566. const uint64_t nb00 = src0 ? src0->nb[0] : 0;
  567. const uint64_t nb01 = src0 ? src0->nb[1] : 0;
  568. const uint64_t nb02 = src0 ? src0->nb[2] : 0;
  569. const uint64_t nb03 = src0 ? src0->nb[3] : 0;
  570. const int64_t ne10 = src1 ? src1->ne[0] : 0;
  571. const int64_t ne11 = src1 ? src1->ne[1] : 0;
  572. const int64_t ne12 = src1 ? src1->ne[2] : 0;
  573. const int64_t ne13 = src1 ? src1->ne[3] : 0; UNUSED(ne13);
  574. const uint64_t nb10 = src1 ? src1->nb[0] : 0;
  575. const uint64_t nb11 = src1 ? src1->nb[1] : 0;
  576. const uint64_t nb12 = src1 ? src1->nb[2] : 0;
  577. const uint64_t nb13 = src1 ? src1->nb[3] : 0; UNUSED(nb13);
  578. const int64_t ne0 = dst ? dst->ne[0] : 0;
  579. const int64_t ne1 = dst ? dst->ne[1] : 0;
  580. const int64_t ne2 = dst ? dst->ne[2] : 0;
  581. const int64_t ne3 = dst ? dst->ne[3] : 0;
  582. const uint64_t nb0 = dst ? dst->nb[0] : 0;
  583. const uint64_t nb1 = dst ? dst->nb[1] : 0;
  584. const uint64_t nb2 = dst ? dst->nb[2] : 0;
  585. const uint64_t nb3 = dst ? dst->nb[3] : 0;
  586. const enum ggml_type src0t = src0 ? src0->type : GGML_TYPE_COUNT;
  587. const enum ggml_type src1t = src1 ? src1->type : GGML_TYPE_COUNT;
  588. const enum ggml_type dstt = dst ? dst->type : GGML_TYPE_COUNT;
  589. id<MTLBuffer> id_src0 = src0 ? ggml_metal_get_buffer(ctx, src0, &offs_src0) : nil;
  590. id<MTLBuffer> id_src1 = src1 ? ggml_metal_get_buffer(ctx, src1, &offs_src1) : nil;
  591. id<MTLBuffer> id_dst = dst ? ggml_metal_get_buffer(ctx, dst, &offs_dst) : nil;
  592. //metal_printf("%s: op - %s\n", __func__, ggml_op_name(dst->op));
  593. //if (src0) {
  594. // metal_printf("%s: src0 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src0t), ne00, ne01, ne02,
  595. // ggml_is_contiguous(src0), src0->name);
  596. //}
  597. //if (src1) {
  598. // metal_printf("%s: src1 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src1t), ne10, ne11, ne12,
  599. // ggml_is_contiguous(src1), src1->name);
  600. //}
  601. //if (dst) {
  602. // metal_printf("%s: dst - %4s [%5lld, %5lld, %5lld], 1, %s\n", __func__, ggml_type_name(dstt), ne0, ne1, ne2,
  603. // dst->name);
  604. //}
  605. switch (dst->op) {
  606. case GGML_OP_NONE:
  607. case GGML_OP_RESHAPE:
  608. case GGML_OP_VIEW:
  609. case GGML_OP_TRANSPOSE:
  610. case GGML_OP_PERMUTE:
  611. {
  612. // noop
  613. } break;
  614. case GGML_OP_ADD:
  615. {
  616. GGML_ASSERT(ggml_is_contiguous(src0));
  617. GGML_ASSERT(ggml_is_contiguous(src1));
  618. // utilize float4
  619. GGML_ASSERT(ne00 % 4 == 0);
  620. const int64_t nb = ne00/4;
  621. if (ggml_nelements(src1) == ne10) {
  622. // src1 is a row
  623. GGML_ASSERT(ne11 == 1);
  624. [encoder setComputePipelineState:ctx->pipeline_add_row];
  625. } else {
  626. [encoder setComputePipelineState:ctx->pipeline_add];
  627. }
  628. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  629. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  630. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  631. [encoder setBytes:&nb length:sizeof(nb) atIndex:3];
  632. const int64_t n = ggml_nelements(dst)/4;
  633. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  634. } break;
  635. case GGML_OP_MUL:
  636. {
  637. GGML_ASSERT(ggml_is_contiguous(src0));
  638. GGML_ASSERT(ggml_is_contiguous(src1));
  639. // utilize float4
  640. GGML_ASSERT(ne00 % 4 == 0);
  641. const int64_t nb = ne00/4;
  642. if (ggml_nelements(src1) == ne10) {
  643. // src1 is a row
  644. GGML_ASSERT(ne11 == 1);
  645. [encoder setComputePipelineState:ctx->pipeline_mul_row];
  646. } else {
  647. [encoder setComputePipelineState:ctx->pipeline_mul];
  648. }
  649. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  650. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  651. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  652. [encoder setBytes:&nb length:sizeof(nb) atIndex:3];
  653. const int64_t n = ggml_nelements(dst)/4;
  654. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  655. } break;
  656. case GGML_OP_SCALE:
  657. {
  658. GGML_ASSERT(ggml_is_contiguous(src0));
  659. const float scale = *(const float *) src1->data;
  660. [encoder setComputePipelineState:ctx->pipeline_scale];
  661. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  662. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  663. [encoder setBytes:&scale length:sizeof(scale) atIndex:2];
  664. const int64_t n = ggml_nelements(dst)/4;
  665. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  666. } break;
  667. case GGML_OP_UNARY:
  668. switch (ggml_get_unary_op(gf->nodes[i])) {
  669. case GGML_UNARY_OP_SILU:
  670. {
  671. [encoder setComputePipelineState:ctx->pipeline_silu];
  672. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  673. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  674. const int64_t n = ggml_nelements(dst)/4;
  675. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  676. } break;
  677. case GGML_UNARY_OP_RELU:
  678. {
  679. [encoder setComputePipelineState:ctx->pipeline_relu];
  680. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  681. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  682. const int64_t n = ggml_nelements(dst);
  683. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  684. } break;
  685. case GGML_UNARY_OP_GELU:
  686. {
  687. [encoder setComputePipelineState:ctx->pipeline_gelu];
  688. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  689. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  690. const int64_t n = ggml_nelements(dst)/4;
  691. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  692. } break;
  693. default:
  694. {
  695. metal_printf("%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
  696. GGML_ASSERT(false);
  697. }
  698. } break;
  699. case GGML_OP_SOFT_MAX:
  700. {
  701. const int nth = 32;
  702. if (ne00%4 == 0) {
  703. [encoder setComputePipelineState:ctx->pipeline_soft_max_4];
  704. } else {
  705. [encoder setComputePipelineState:ctx->pipeline_soft_max];
  706. }
  707. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  708. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  709. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
  710. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
  711. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
  712. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  713. } break;
  714. case GGML_OP_DIAG_MASK_INF:
  715. {
  716. const int n_past = ((int32_t *)(dst->op_params))[0];
  717. if (ne00%8 == 0) {
  718. [encoder setComputePipelineState:ctx->pipeline_diag_mask_inf_8];
  719. } else {
  720. [encoder setComputePipelineState:ctx->pipeline_diag_mask_inf];
  721. }
  722. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  723. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  724. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
  725. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
  726. [encoder setBytes:&n_past length:sizeof(int) atIndex:4];
  727. if (ne00%8 == 0) {
  728. [encoder dispatchThreadgroups:MTLSizeMake(ne00*ne01*ne02/8, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  729. }
  730. else {
  731. [encoder dispatchThreadgroups:MTLSizeMake(ne00, ne01, ne02) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  732. }
  733. } break;
  734. case GGML_OP_MUL_MAT:
  735. {
  736. // TODO: needs to be updated after PR: https://github.com/ggerganov/ggml/pull/224
  737. GGML_ASSERT(ne00 == ne10);
  738. // GGML_ASSERT(ne02 == ne12); // Should be checked on individual data types until broadcast is implemented everywhere
  739. uint gqa = ne12/ne02;
  740. GGML_ASSERT(ne03 == ne13);
  741. // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
  742. // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
  743. if (!ggml_is_transposed(src0) &&
  744. !ggml_is_transposed(src1) &&
  745. src1t == GGML_TYPE_F32 &&
  746. [ctx->device supportsFamily:MTLGPUFamilyApple7] &&
  747. ne00%32 == 0 &&
  748. ne11 > 1) {
  749. switch (src0->type) {
  750. case GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_mul_mm_f32_f32]; break;
  751. case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_mul_mm_f16_f32]; break;
  752. case GGML_TYPE_Q4_0: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_0_f32]; break;
  753. case GGML_TYPE_Q4_1: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_1_f32]; break;
  754. case GGML_TYPE_Q8_0: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q8_0_f32]; break;
  755. case GGML_TYPE_Q2_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q2_K_f32]; break;
  756. case GGML_TYPE_Q3_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q3_K_f32]; break;
  757. case GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q4_K_f32]; break;
  758. case GGML_TYPE_Q5_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q5_K_f32]; break;
  759. case GGML_TYPE_Q6_K: [encoder setComputePipelineState:ctx->pipeline_mul_mm_q6_K_f32]; break;
  760. default: GGML_ASSERT(false && "MUL MAT-MAT not implemented");
  761. }
  762. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  763. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  764. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  765. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  766. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
  767. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:5];
  768. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:6];
  769. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:7];
  770. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:8];
  771. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:9];
  772. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:10];
  773. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:11];
  774. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:12];
  775. [encoder setBytes:&gqa length:sizeof(gqa) atIndex:13];
  776. [encoder setThreadgroupMemoryLength:8192 atIndex:0];
  777. [encoder dispatchThreadgroups:MTLSizeMake( (ne11+31)/32, (ne01+63) / 64, ne12) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
  778. } else {
  779. int nth0 = 32;
  780. int nth1 = 1;
  781. int nrows = 1;
  782. // use custom matrix x vector kernel
  783. switch (src0t) {
  784. case GGML_TYPE_F32:
  785. {
  786. [encoder setComputePipelineState:ctx->pipeline_mul_mat_f32_f32];
  787. nrows = 4;
  788. } break;
  789. case GGML_TYPE_F16:
  790. {
  791. nth0 = 32;
  792. nth1 = 1;
  793. if (ne11 * ne12 < 4) {
  794. [encoder setComputePipelineState:ctx->pipeline_mul_mat_f16_f32_1row];
  795. } else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) {
  796. [encoder setComputePipelineState:ctx->pipeline_mul_mat_f16_f32_l4];
  797. nrows = ne11;
  798. } else {
  799. [encoder setComputePipelineState:ctx->pipeline_mul_mat_f16_f32];
  800. nrows = 4;
  801. }
  802. } break;
  803. case GGML_TYPE_Q4_0:
  804. {
  805. GGML_ASSERT(ne02 == 1);
  806. GGML_ASSERT(ne12 == 1);
  807. nth0 = 8;
  808. nth1 = 8;
  809. [encoder setComputePipelineState:ctx->pipeline_mul_mat_q4_0_f32];
  810. } break;
  811. case GGML_TYPE_Q4_1:
  812. {
  813. GGML_ASSERT(ne02 == 1);
  814. GGML_ASSERT(ne12 == 1);
  815. nth0 = 8;
  816. nth1 = 8;
  817. [encoder setComputePipelineState:ctx->pipeline_mul_mat_q4_1_f32];
  818. } break;
  819. case GGML_TYPE_Q8_0:
  820. {
  821. GGML_ASSERT(ne02 == 1);
  822. GGML_ASSERT(ne12 == 1);
  823. nth0 = 8;
  824. nth1 = 8;
  825. [encoder setComputePipelineState:ctx->pipeline_mul_mat_q8_0_f32];
  826. } break;
  827. case GGML_TYPE_Q2_K:
  828. {
  829. GGML_ASSERT(ne02 == 1);
  830. GGML_ASSERT(ne12 == 1);
  831. nth0 = 2;
  832. nth1 = 32;
  833. [encoder setComputePipelineState:ctx->pipeline_mul_mat_q2_K_f32];
  834. } break;
  835. case GGML_TYPE_Q3_K:
  836. {
  837. GGML_ASSERT(ne02 == 1);
  838. GGML_ASSERT(ne12 == 1);
  839. nth0 = 2;
  840. nth1 = 32;
  841. [encoder setComputePipelineState:ctx->pipeline_mul_mat_q3_K_f32];
  842. } break;
  843. case GGML_TYPE_Q4_K:
  844. {
  845. GGML_ASSERT(ne02 == 1);
  846. GGML_ASSERT(ne12 == 1);
  847. nth0 = 4; //1;
  848. nth1 = 8; //32;
  849. [encoder setComputePipelineState:ctx->pipeline_mul_mat_q4_K_f32];
  850. } break;
  851. case GGML_TYPE_Q5_K:
  852. {
  853. GGML_ASSERT(ne02 == 1);
  854. GGML_ASSERT(ne12 == 1);
  855. nth0 = 2;
  856. nth1 = 32;
  857. [encoder setComputePipelineState:ctx->pipeline_mul_mat_q5_K_f32];
  858. } break;
  859. case GGML_TYPE_Q6_K:
  860. {
  861. GGML_ASSERT(ne02 == 1);
  862. GGML_ASSERT(ne12 == 1);
  863. nth0 = 2;
  864. nth1 = 32;
  865. [encoder setComputePipelineState:ctx->pipeline_mul_mat_q6_K_f32];
  866. } break;
  867. default:
  868. {
  869. metal_printf("Asserting on type %d\n",(int)src0t);
  870. GGML_ASSERT(false && "not implemented");
  871. }
  872. };
  873. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  874. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  875. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  876. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  877. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
  878. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
  879. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:6];
  880. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:7];
  881. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:8];
  882. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:9];
  883. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:10];
  884. [encoder setBytes:&ne12 length:sizeof(ne12) atIndex:11];
  885. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:12];
  886. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:13];
  887. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:14];
  888. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:15];
  889. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:16];
  890. [encoder setBytes:&gqa length:sizeof(gqa) atIndex:17];
  891. if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 || src0t == GGML_TYPE_Q8_0 ||
  892. src0t == GGML_TYPE_Q2_K) {// || src0t == GGML_TYPE_Q4_K) {
  893. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  894. }
  895. else if (src0t == GGML_TYPE_Q4_K) {
  896. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  897. }
  898. else if (src0t == GGML_TYPE_Q3_K) {
  899. #ifdef GGML_QKK_64
  900. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 1)/2, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  901. #else
  902. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  903. #endif
  904. }
  905. else if (src0t == GGML_TYPE_Q5_K) {
  906. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  907. }
  908. else if (src0t == GGML_TYPE_Q6_K) {
  909. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 1)/2, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  910. } else {
  911. int64_t ny = (ne11 + nrows - 1)/nrows;
  912. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ny, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  913. }
  914. }
  915. } break;
  916. case GGML_OP_GET_ROWS:
  917. {
  918. switch (src0->type) {
  919. case GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_get_rows_f32]; break;
  920. case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_get_rows_f16]; break;
  921. case GGML_TYPE_Q4_0: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_0]; break;
  922. case GGML_TYPE_Q4_1: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_1]; break;
  923. case GGML_TYPE_Q8_0: [encoder setComputePipelineState:ctx->pipeline_get_rows_q8_0]; break;
  924. case GGML_TYPE_Q2_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q2_K]; break;
  925. case GGML_TYPE_Q3_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q3_K]; break;
  926. case GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_K]; break;
  927. case GGML_TYPE_Q5_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q5_K]; break;
  928. case GGML_TYPE_Q6_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q6_K]; break;
  929. default: GGML_ASSERT(false && "not implemented");
  930. }
  931. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  932. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  933. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  934. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:3];
  935. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:4];
  936. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:5];
  937. const int64_t n = ggml_nelements(src1);
  938. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  939. } break;
  940. case GGML_OP_RMS_NORM:
  941. {
  942. float eps;
  943. memcpy(&eps, dst->op_params, sizeof(float));
  944. const int nth = 512;
  945. [encoder setComputePipelineState:ctx->pipeline_rms_norm];
  946. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  947. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  948. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  949. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3];
  950. [encoder setBytes:&eps length:sizeof( float) atIndex:4];
  951. [encoder setThreadgroupMemoryLength:nth/32*sizeof(float) atIndex:0];
  952. const int64_t nrows = ggml_nrows(src0);
  953. [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  954. } break;
  955. case GGML_OP_NORM:
  956. {
  957. float eps;
  958. memcpy(&eps, dst->op_params, sizeof(float));
  959. const int nth = 256;
  960. [encoder setComputePipelineState:ctx->pipeline_norm];
  961. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  962. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  963. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  964. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3];
  965. [encoder setBytes:&eps length:sizeof( float) atIndex:4];
  966. [encoder setThreadgroupMemoryLength:nth*sizeof(float) atIndex:0];
  967. const int64_t nrows = ggml_nrows(src0);
  968. [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  969. } break;
  970. case GGML_OP_ALIBI:
  971. {
  972. GGML_ASSERT((src0t == GGML_TYPE_F32));
  973. const int n_past = ((int32_t *) dst->op_params)[0]; UNUSED(n_past);
  974. const int n_head = ((int32_t *) dst->op_params)[1];
  975. float max_bias;
  976. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  977. if (__builtin_popcount(n_head) != 1) {
  978. GGML_ASSERT(false && "only power-of-two n_head implemented");
  979. }
  980. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  981. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  982. [encoder setComputePipelineState:ctx->pipeline_alibi_f32];
  983. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  984. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  985. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  986. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  987. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  988. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
  989. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
  990. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
  991. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
  992. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
  993. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
  994. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
  995. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
  996. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
  997. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
  998. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
  999. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
  1000. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
  1001. [encoder setBytes:&m0 length:sizeof( float) atIndex:18];
  1002. const int nth = 32;
  1003. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1004. } break;
  1005. case GGML_OP_ROPE:
  1006. {
  1007. const int n_past = ((int32_t *) dst->op_params)[0];
  1008. const int n_dims = ((int32_t *) dst->op_params)[1];
  1009. const int mode = ((int32_t *) dst->op_params)[2];
  1010. float freq_base;
  1011. float freq_scale;
  1012. memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
  1013. memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
  1014. [encoder setComputePipelineState:ctx->pipeline_rope];
  1015. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1016. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1017. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  1018. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  1019. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  1020. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
  1021. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
  1022. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
  1023. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
  1024. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
  1025. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
  1026. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
  1027. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
  1028. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
  1029. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
  1030. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
  1031. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
  1032. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
  1033. [encoder setBytes:&n_past length:sizeof( int) atIndex:18];
  1034. [encoder setBytes:&n_dims length:sizeof( int) atIndex:19];
  1035. [encoder setBytes:&mode length:sizeof( int) atIndex:20];
  1036. [encoder setBytes:&freq_base length:sizeof(float) atIndex:21];
  1037. [encoder setBytes:&freq_scale length:sizeof(float) atIndex:22];
  1038. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(32, 1, 1)];
  1039. } break;
  1040. case GGML_OP_DUP:
  1041. case GGML_OP_CPY:
  1042. case GGML_OP_CONT:
  1043. {
  1044. const int nth = 32;
  1045. switch (src0t) {
  1046. case GGML_TYPE_F32:
  1047. {
  1048. switch (dstt) {
  1049. case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_cpy_f32_f16]; break;
  1050. case GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_cpy_f32_f32]; break;
  1051. default: GGML_ASSERT(false && "not implemented");
  1052. };
  1053. } break;
  1054. case GGML_TYPE_F16:
  1055. {
  1056. switch (dstt) {
  1057. case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_cpy_f16_f16]; break;
  1058. case GGML_TYPE_F32: GGML_ASSERT(false && "cpy_f16_f32 not implemented"); break;
  1059. default: GGML_ASSERT(false && "not implemented");
  1060. };
  1061. } break;
  1062. default: GGML_ASSERT(false && "not implemented");
  1063. }
  1064. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  1065. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  1066. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  1067. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  1068. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  1069. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
  1070. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
  1071. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
  1072. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
  1073. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
  1074. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
  1075. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
  1076. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
  1077. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
  1078. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
  1079. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
  1080. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
  1081. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
  1082. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  1083. } break;
  1084. default:
  1085. {
  1086. metal_printf("%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
  1087. GGML_ASSERT(false);
  1088. }
  1089. }
  1090. }
  1091. if (encoder != nil) {
  1092. [encoder endEncoding];
  1093. encoder = nil;
  1094. }
  1095. [command_buffer commit];
  1096. });
  1097. }
  1098. // wait for all threads to finish
  1099. dispatch_barrier_sync(ctx->d_queue, ^{});
  1100. // check status of command buffers
  1101. // needed to detect if the device ran out-of-memory for example (#1881)
  1102. for (int i = 0; i < n_cb; i++) {
  1103. [ctx->command_buffers[i] waitUntilCompleted];
  1104. MTLCommandBufferStatus status = (MTLCommandBufferStatus) [ctx->command_buffers[i] status];
  1105. if (status != MTLCommandBufferStatusCompleted) {
  1106. metal_printf("%s: command buffer %d failed with status %lu\n", __func__, i, status);
  1107. GGML_ASSERT(false);
  1108. }
  1109. }
  1110. }
  1111. }