ggml-metal.m 56 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130
  1. #import "ggml-metal.h"
  2. #import "ggml.h"
  3. #import <Foundation/Foundation.h>
  4. #import <Metal/Metal.h>
  5. #import <MetalPerformanceShaders/MetalPerformanceShaders.h>
  6. #ifdef GGML_METAL_NDEBUG
  7. #define metal_printf(...)
  8. #else
  9. #define metal_printf(...) fprintf(stderr, __VA_ARGS__)
  10. #endif
  11. #define UNUSED(x) (void)(x)
  12. struct ggml_metal_buffer {
  13. const char * name;
  14. void * data;
  15. size_t size;
  16. id<MTLBuffer> metal;
  17. };
  18. struct ggml_metal_context {
  19. int n_cb;
  20. float * logits;
  21. id<MTLDevice> device;
  22. id<MTLCommandQueue> queue;
  23. id<MTLLibrary> library;
  24. int n_buffers;
  25. struct ggml_metal_buffer buffers[GGML_METAL_MAX_BUFFERS];
  26. int concur_list[GGML_MAX_NODES];
  27. int concur_list_len;
  28. // custom kernels
  29. #define GGML_METAL_DECL_KERNEL(name) \
  30. id<MTLFunction> function_##name; \
  31. id<MTLComputePipelineState> pipeline_##name
  32. GGML_METAL_DECL_KERNEL(add);
  33. GGML_METAL_DECL_KERNEL(add_row); // TODO: avoid this extra kernel, instead extend the "add" kernel to support broadcast
  34. GGML_METAL_DECL_KERNEL(mul);
  35. GGML_METAL_DECL_KERNEL(mul_row); // TODO: avoid this extra kernel, instead extend the "mul" kernel to support broadcast
  36. GGML_METAL_DECL_KERNEL(scale);
  37. GGML_METAL_DECL_KERNEL(silu);
  38. GGML_METAL_DECL_KERNEL(relu);
  39. GGML_METAL_DECL_KERNEL(gelu);
  40. GGML_METAL_DECL_KERNEL(soft_max);
  41. GGML_METAL_DECL_KERNEL(diag_mask_inf);
  42. GGML_METAL_DECL_KERNEL(get_rows_f16);
  43. GGML_METAL_DECL_KERNEL(get_rows_q4_0);
  44. GGML_METAL_DECL_KERNEL(get_rows_q4_1);
  45. GGML_METAL_DECL_KERNEL(get_rows_q2_K);
  46. GGML_METAL_DECL_KERNEL(get_rows_q3_K);
  47. GGML_METAL_DECL_KERNEL(get_rows_q4_K);
  48. GGML_METAL_DECL_KERNEL(get_rows_q5_K);
  49. GGML_METAL_DECL_KERNEL(get_rows_q6_K);
  50. GGML_METAL_DECL_KERNEL(rms_norm);
  51. GGML_METAL_DECL_KERNEL(norm);
  52. GGML_METAL_DECL_KERNEL(mul_mat_f16_f32);
  53. GGML_METAL_DECL_KERNEL(mul_mat_q4_0_f32);
  54. GGML_METAL_DECL_KERNEL(mul_mat_q4_1_f32);
  55. GGML_METAL_DECL_KERNEL(mul_mat_q2_K_f32);
  56. GGML_METAL_DECL_KERNEL(mul_mat_q3_K_f32);
  57. GGML_METAL_DECL_KERNEL(mul_mat_q4_K_f32);
  58. GGML_METAL_DECL_KERNEL(mul_mat_q5_K_f32);
  59. GGML_METAL_DECL_KERNEL(mul_mat_q6_K_f32);
  60. GGML_METAL_DECL_KERNEL(rope);
  61. GGML_METAL_DECL_KERNEL(alibi_f32);
  62. GGML_METAL_DECL_KERNEL(cpy_f32_f16);
  63. GGML_METAL_DECL_KERNEL(cpy_f32_f32);
  64. GGML_METAL_DECL_KERNEL(cpy_f16_f16);
  65. #undef GGML_METAL_DECL_KERNEL
  66. };
  67. // MSL code
  68. // TODO: move the contents here when ready
  69. // for now it is easier to work in a separate file
  70. static NSString * const msl_library_source = @"see metal.metal";
  71. // Here to assist with NSBundle Path Hack
  72. @interface GGMLMetalClass : NSObject
  73. @end
  74. @implementation GGMLMetalClass
  75. @end
  76. struct ggml_metal_context * ggml_metal_init(int n_cb) {
  77. fprintf(stderr, "%s: allocating\n", __func__);
  78. struct ggml_metal_context * ctx = malloc(sizeof(struct ggml_metal_context));
  79. ctx->n_cb = n_cb;
  80. ctx->device = MTLCreateSystemDefaultDevice();
  81. ctx->queue = [ctx->device newCommandQueue];
  82. ctx->n_buffers = 0;
  83. ctx->concur_list_len = 0;
  84. // determine if we can use MPS
  85. if (MPSSupportsMTLDevice(ctx->device)) {
  86. fprintf(stderr, "%s: using MPS\n", __func__);
  87. } else {
  88. fprintf(stderr, "%s: not using MPS\n", __func__);
  89. GGML_ASSERT(false && "MPS not supported");
  90. }
  91. #if 0
  92. // compile from source string and show compile log
  93. {
  94. NSError * error = nil;
  95. ctx->library = [ctx->device newLibraryWithSource:msl_library_source options:nil error:&error];
  96. if (error) {
  97. fprintf(stderr, "%s: error: %s\n", __func__, [[error description] UTF8String]);
  98. exit(1);
  99. }
  100. }
  101. #else
  102. UNUSED(msl_library_source);
  103. // read the source from "ggml-metal.metal" into a string and use newLibraryWithSource
  104. {
  105. NSError * error = nil;
  106. //NSString * path = [[NSBundle mainBundle] pathForResource:@"../../examples/metal/metal" ofType:@"metal"];
  107. NSBundle * bundle = [NSBundle bundleForClass:[GGMLMetalClass class]];
  108. NSString * path = [bundle pathForResource:@"ggml-metal" ofType:@"metal"];
  109. fprintf(stderr, "%s: loading '%s'\n", __func__, [path UTF8String]);
  110. NSString * src = [NSString stringWithContentsOfFile:path encoding:NSUTF8StringEncoding error:&error];
  111. if (error) {
  112. fprintf(stderr, "%s: error: %s\n", __func__, [[error description] UTF8String]);
  113. exit(1);
  114. }
  115. #ifdef GGML_QKK_64
  116. MTLCompileOptions* options = [MTLCompileOptions new];
  117. options.preprocessorMacros = @{ @"QK_K" : @(64) };
  118. ctx->library = [ctx->device newLibraryWithSource:src options:options error:&error];
  119. #else
  120. ctx->library = [ctx->device newLibraryWithSource:src options:nil error:&error];
  121. #endif
  122. if (error) {
  123. fprintf(stderr, "%s: error: %s\n", __func__, [[error description] UTF8String]);
  124. exit(1);
  125. }
  126. }
  127. #endif
  128. // load kernels
  129. {
  130. #define GGML_METAL_ADD_KERNEL(name) \
  131. ctx->function_##name = [ctx->library newFunctionWithName:@"kernel_"#name]; \
  132. ctx->pipeline_##name = [ctx->device newComputePipelineStateWithFunction:ctx->function_##name error:nil]; \
  133. fprintf(stderr, "%s: loaded %-32s %16p\n", __func__, "kernel_"#name, (void *) ctx->pipeline_##name);
  134. GGML_METAL_ADD_KERNEL(add);
  135. GGML_METAL_ADD_KERNEL(add_row);
  136. GGML_METAL_ADD_KERNEL(mul);
  137. GGML_METAL_ADD_KERNEL(mul_row);
  138. GGML_METAL_ADD_KERNEL(scale);
  139. GGML_METAL_ADD_KERNEL(silu);
  140. GGML_METAL_ADD_KERNEL(relu);
  141. GGML_METAL_ADD_KERNEL(gelu);
  142. GGML_METAL_ADD_KERNEL(soft_max);
  143. GGML_METAL_ADD_KERNEL(diag_mask_inf);
  144. GGML_METAL_ADD_KERNEL(get_rows_f16);
  145. GGML_METAL_ADD_KERNEL(get_rows_q4_0);
  146. GGML_METAL_ADD_KERNEL(get_rows_q4_1);
  147. GGML_METAL_ADD_KERNEL(get_rows_q2_K);
  148. GGML_METAL_ADD_KERNEL(get_rows_q3_K);
  149. GGML_METAL_ADD_KERNEL(get_rows_q4_K);
  150. GGML_METAL_ADD_KERNEL(get_rows_q5_K);
  151. GGML_METAL_ADD_KERNEL(get_rows_q6_K);
  152. GGML_METAL_ADD_KERNEL(rms_norm);
  153. GGML_METAL_ADD_KERNEL(norm);
  154. GGML_METAL_ADD_KERNEL(mul_mat_f16_f32);
  155. GGML_METAL_ADD_KERNEL(mul_mat_q4_0_f32);
  156. GGML_METAL_ADD_KERNEL(mul_mat_q4_1_f32);
  157. GGML_METAL_ADD_KERNEL(mul_mat_q2_K_f32);
  158. GGML_METAL_ADD_KERNEL(mul_mat_q3_K_f32);
  159. GGML_METAL_ADD_KERNEL(mul_mat_q4_K_f32);
  160. GGML_METAL_ADD_KERNEL(mul_mat_q5_K_f32);
  161. GGML_METAL_ADD_KERNEL(mul_mat_q6_K_f32);
  162. GGML_METAL_ADD_KERNEL(rope);
  163. GGML_METAL_ADD_KERNEL(alibi_f32);
  164. GGML_METAL_ADD_KERNEL(cpy_f32_f16);
  165. GGML_METAL_ADD_KERNEL(cpy_f32_f32);
  166. GGML_METAL_ADD_KERNEL(cpy_f16_f16);
  167. #undef GGML_METAL_ADD_KERNEL
  168. }
  169. fprintf(stderr, "%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
  170. fprintf(stderr, "%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false");
  171. if (ctx->device.maxTransferRate != 0) {
  172. fprintf(stderr, "%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1024.0 / 1024.0);
  173. } else {
  174. fprintf(stderr, "%s: maxTransferRate = built-in GPU\n", __func__);
  175. }
  176. return ctx;
  177. }
  178. void ggml_metal_free(struct ggml_metal_context * ctx) {
  179. fprintf(stderr, "%s: deallocating\n", __func__);
  180. for (int i = 0; i < ctx->n_buffers; ++i) {
  181. [ctx->buffers[i].metal release];
  182. }
  183. free(ctx);
  184. }
  185. void ggml_metal_set_n_cb(struct ggml_metal_context * ctx, int n_cb) {
  186. ctx->n_cb = n_cb;
  187. }
  188. bool ggml_metal_if_optimized(struct ggml_metal_context * ctx) {
  189. if (ctx->concur_list_len) {
  190. return true;
  191. }
  192. return false;
  193. }
  194. // finds the Metal buffer that contains the tensor data on the GPU device
  195. // the assumption is that there is 1-to-1 mapping between the host and device memory buffers, so we can find the
  196. // Metal buffer based on the host memory pointer
  197. //
  198. static id<MTLBuffer> ggml_metal_get_buffer(struct ggml_metal_context * ctx, struct ggml_tensor * t, size_t * offs) {
  199. //fprintf(stderr, "%s: data tensor '%16s', offs_data = %8ld, offs_eval = %8ld, offs_cach = %8ld\n", __func__, t->name, offs_data, offs_eval, offs_cach);
  200. const int64_t tsize = ggml_nbytes(t);
  201. // find the view that contains the tensor fully
  202. for (int i = 0; i < ctx->n_buffers; ++i) {
  203. const int64_t ioffs = (int64_t) t->data - (int64_t) ctx->buffers[i].data;
  204. if (ioffs >= 0 && ioffs + tsize <= (int64_t) ctx->buffers[i].size) {
  205. *offs = (size_t) ioffs;
  206. //fprintf(stderr, "%s: '%s' tensor '%16s', offs = %8ld\n", __func__, ctx->buffers[i].name, t->name, *offs);
  207. return ctx->buffers[i].metal;
  208. }
  209. }
  210. fprintf(stderr, "%s: error: buffer is nil\n", __func__);
  211. return nil;
  212. }
  213. bool ggml_metal_add_buffer(
  214. struct ggml_metal_context * ctx,
  215. const char * name,
  216. void * data,
  217. size_t size,
  218. size_t max_size) {
  219. if (ctx->n_buffers >= GGML_METAL_MAX_BUFFERS) {
  220. fprintf(stderr, "%s: too many buffers\n", __func__);
  221. return false;
  222. }
  223. if (data) {
  224. // verify that the buffer does not overlap with any of the existing buffers
  225. for (int i = 0; i < ctx->n_buffers; ++i) {
  226. const int64_t ioffs = (int64_t) data - (int64_t) ctx->buffers[i].data;
  227. if (ioffs >= 0 && ioffs < (int64_t) ctx->buffers[i].size) {
  228. fprintf(stderr, "%s: error: buffer '%s' overlaps with '%s'\n", __func__, name, ctx->buffers[i].name);
  229. return false;
  230. }
  231. }
  232. const size_t size_page = getpagesize();
  233. size_t size_aligned = size;
  234. if ((size_aligned % size_page) != 0) {
  235. size_aligned += (size_page - (size_aligned % size_page));
  236. }
  237. // the buffer fits into the max buffer size allowed by the device
  238. if (size_aligned <= ctx->device.maxBufferLength) {
  239. ctx->buffers[ctx->n_buffers].name = name;
  240. ctx->buffers[ctx->n_buffers].data = data;
  241. ctx->buffers[ctx->n_buffers].size = size;
  242. ctx->buffers[ctx->n_buffers].metal = [ctx->device newBufferWithBytesNoCopy:data length:size_aligned options:MTLResourceStorageModeShared deallocator:nil];
  243. if (ctx->buffers[ctx->n_buffers].metal == nil) {
  244. fprintf(stderr, "%s: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_aligned / 1024.0 / 1024.0);
  245. return false;
  246. }
  247. fprintf(stderr, "%s: allocated '%-16s' buffer, size = %8.2f MB", __func__, name, size_aligned / 1024.0 / 1024.0);
  248. ++ctx->n_buffers;
  249. } else {
  250. // this overlap between the views will guarantee that the tensor with the maximum size will fully fit into
  251. // one of the views
  252. const size_t size_ovlp = ((max_size + size_page - 1) / size_page + 1) * size_page; // round-up 2 pages just in case
  253. const size_t size_step = ctx->device.maxBufferLength - size_ovlp;
  254. const size_t size_view = ctx->device.maxBufferLength;
  255. for (size_t i = 0; i < size; i += size_step) {
  256. const size_t size_step_aligned = (i + size_view <= size) ? size_view : (size_aligned - i);
  257. ctx->buffers[ctx->n_buffers].name = name;
  258. ctx->buffers[ctx->n_buffers].data = (void *) ((uint8_t *) data + i);
  259. ctx->buffers[ctx->n_buffers].size = size_step_aligned;
  260. ctx->buffers[ctx->n_buffers].metal = [ctx->device newBufferWithBytesNoCopy:(void *) ((uint8_t *) data + i) length:size_step_aligned options:MTLResourceStorageModeShared deallocator:nil];
  261. if (ctx->buffers[ctx->n_buffers].metal == nil) {
  262. fprintf(stderr, "%s: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, size_step_aligned / 1024.0 / 1024.0);
  263. return false;
  264. }
  265. fprintf(stderr, "%s: allocated '%-16s' buffer, size = %8.2f MB, offs = %12ld", __func__, name, size_step_aligned / 1024.0 / 1024.0, i);
  266. if (i + size_step < size) {
  267. fprintf(stderr, "\n");
  268. }
  269. ++ctx->n_buffers;
  270. }
  271. }
  272. fprintf(stderr, ", (%8.2f / %8.2f)",
  273. ctx->device.currentAllocatedSize / 1024.0 / 1024.0,
  274. ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
  275. if (ctx->device.currentAllocatedSize > ctx->device.recommendedMaxWorkingSetSize) {
  276. fprintf(stderr, ", warning: current allocated size is greater than the recommended max working set size\n");
  277. } else {
  278. fprintf(stderr, "\n");
  279. }
  280. }
  281. return true;
  282. }
  283. void ggml_metal_set_tensor(
  284. struct ggml_metal_context * ctx,
  285. struct ggml_tensor * t) {
  286. metal_printf("%s: set input for tensor '%s'\n", __func__, t->name);
  287. size_t offs;
  288. id<MTLBuffer> id_dst = ggml_metal_get_buffer(ctx, t, &offs);
  289. memcpy((void *) ((uint8_t *) id_dst.contents + offs), t->data, ggml_nbytes(t));
  290. }
  291. void ggml_metal_get_tensor(
  292. struct ggml_metal_context * ctx,
  293. struct ggml_tensor * t) {
  294. metal_printf("%s: extract results for tensor '%s'\n", __func__, t->name);
  295. size_t offs;
  296. id<MTLBuffer> id_src = ggml_metal_get_buffer(ctx, t, &offs);
  297. memcpy(t->data, (void *) ((uint8_t *) id_src.contents + offs), ggml_nbytes(t));
  298. }
  299. void ggml_metal_graph_find_concurrency(
  300. struct ggml_metal_context * ctx,
  301. struct ggml_cgraph * gf) {
  302. int search_depth = gf->n_nodes; //we only find concurrency in this range to avoid wasting too much time
  303. int nodes_unused[GGML_MAX_NODES];
  304. for (int i = 0; i < GGML_MAX_NODES; i++) {ctx->concur_list[i] = 0;}
  305. for (int i = 0; i < gf->n_nodes; i++) {nodes_unused[i] = 1;}
  306. ctx->concur_list_len = 0;
  307. int n_left = gf->n_nodes;
  308. int n_start = 0; // all nodes before n_start at nodes_unused array have been sorted and store back to ctx->concur_list
  309. int level_pos = 0; // at ctx->concur_list, the last layer (level) ends at level_pos
  310. while (n_left > 0) {
  311. // number of nodes at a layer (that can be issued concurrently)
  312. int concurrency = 0;
  313. for (int i = n_start; i < ((n_start + search_depth > gf->n_nodes) ? gf->n_nodes : n_start + search_depth); i++) {
  314. if (nodes_unused[i]) {
  315. // if the requirements for gf->nodes[i] are satisfied
  316. int exe_flag=1;
  317. // scan all srcs
  318. for (int src_ind = 0; src_ind < GGML_MAX_SRC; src_ind++) {
  319. struct ggml_tensor * src_cur = gf->nodes[i]->src[src_ind];
  320. if (src_cur) {
  321. // if is leaf nodes it's satisfied.
  322. if (src_cur->op == GGML_OP_NONE && src_cur->grad == NULL) {continue;}
  323. // otherwise this src should be the output from previous nodes.
  324. int is_found = 0;
  325. // scan 2*search_depth back because we inserted barrier.
  326. for (int j = ((level_pos - 2*search_depth) < 0 ? 0 : (level_pos - 2*search_depth)); j < level_pos; j++) {
  327. if (gf->nodes[ctx->concur_list[j]] == src_cur) {is_found = 1; break;}
  328. }
  329. if (is_found == 0) {exe_flag = 0; break;}
  330. }
  331. }
  332. if (exe_flag) {
  333. // check if nodes[i]'s data will be overwritten by a node before nodes[i].
  334. // if node[5] and node[3] write to the same memory region, then we can't issue node[5] before node[3]
  335. int64_t data_start = (int64_t) gf->nodes[i]->data;
  336. int64_t length = (int64_t) ggml_nbytes(gf->nodes[i]);
  337. for (int j = n_start; j < i; j++) {
  338. if (nodes_unused[j] && gf->nodes[j]->op != GGML_OP_RESHAPE \
  339. && gf->nodes[j]->op != GGML_OP_VIEW \
  340. && gf->nodes[j]->op != GGML_OP_TRANSPOSE \
  341. && gf->nodes[j]->op != GGML_OP_PERMUTE) {
  342. if (((int64_t)gf->nodes[j]->data) >= data_start + length || \
  343. ((int64_t)gf->nodes[j]->data) + (int64_t) ggml_nbytes(gf->nodes[j]) <= data_start) {
  344. continue;
  345. } else {
  346. exe_flag = 0;
  347. }
  348. }
  349. }
  350. }
  351. if (exe_flag) {
  352. ctx->concur_list[level_pos + concurrency] = i;
  353. nodes_unused[i] = 0;
  354. concurrency++;
  355. ctx->concur_list_len++;
  356. }
  357. }
  358. }
  359. n_left -= concurrency;
  360. // adding a barrier different layer
  361. ctx->concur_list[level_pos + concurrency] = -1;
  362. ctx->concur_list_len++;
  363. // jump all sorted nodes at nodes_bak
  364. while (!nodes_unused[n_start]) {n_start++;}
  365. level_pos += concurrency + 1;
  366. }
  367. if (ctx->concur_list_len > GGML_MAX_NODES) {
  368. fprintf(stderr, "%s: too many elements for metal ctx->concur_list!\n", __func__);
  369. }
  370. }
  371. void ggml_metal_graph_compute(
  372. struct ggml_metal_context * ctx,
  373. struct ggml_cgraph * gf) {
  374. metal_printf("%s: evaluating graph\n", __func__);
  375. // if there is ctx->concur_list, dispatch concurrently
  376. // else fallback to serial dispatch
  377. MTLComputePassDescriptor * edesc = MTLComputePassDescriptor.computePassDescriptor;
  378. const bool has_concur = ctx->concur_list_len && ctx->concur_list_len <= GGML_MAX_NODES;
  379. const int n_nodes = has_concur ? ctx->concur_list_len : gf->n_nodes;
  380. edesc.dispatchType = has_concur ? MTLDispatchTypeConcurrent : MTLDispatchTypeSerial;
  381. // create multiple command buffers and enqueue them
  382. // then, we encode the graph into the command buffers in parallel
  383. const int n_cb = ctx->n_cb;
  384. NSMutableArray * command_buffers = [NSMutableArray arrayWithCapacity:n_cb];
  385. for (int i = 0; i < n_cb; ++i) {
  386. command_buffers[i] = [ctx->queue commandBuffer];
  387. // enqueue the command buffers in order to specify their execution order
  388. [command_buffers[i] enqueue];
  389. }
  390. // TODO: is this the best way to start threads?
  391. dispatch_queue_t queue = dispatch_queue_create("llama.cpp", DISPATCH_QUEUE_CONCURRENT);
  392. for (int cb_idx = 0; cb_idx < n_cb; ++cb_idx) {
  393. const int n_nodes_per_cb = (n_nodes + n_cb - 1) / n_cb;
  394. dispatch_async(queue, ^{
  395. size_t offs_src0 = 0;
  396. size_t offs_src1 = 0;
  397. size_t offs_dst = 0;
  398. id<MTLCommandBuffer> command_buffer = command_buffers[cb_idx];
  399. id<MTLComputeCommandEncoder> encoder = nil;
  400. const int node_start = (cb_idx + 0) * n_nodes_per_cb;
  401. const int node_end = (cb_idx == n_cb - 1) ? n_nodes : (cb_idx + 1) * n_nodes_per_cb;
  402. for (int ind = node_start; ind < node_end; ++ind) {
  403. const int i = has_concur ? ctx->concur_list[ind] : ind;
  404. if (i == -1) {
  405. if (encoder == nil) {
  406. encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
  407. continue;
  408. }
  409. [encoder memoryBarrierWithScope:MTLBarrierScopeBuffers];
  410. continue;
  411. }
  412. metal_printf("%s: encoding node %3d, op = %8s\n", __func__, i, ggml_op_name(gf->nodes[i]->op));
  413. struct ggml_tensor * src0 = gf->nodes[i]->src[0];
  414. struct ggml_tensor * src1 = gf->nodes[i]->src[1];
  415. struct ggml_tensor * dst = gf->nodes[i];
  416. const int64_t ne00 = src0 ? src0->ne[0] : 0;
  417. const int64_t ne01 = src0 ? src0->ne[1] : 0;
  418. const int64_t ne02 = src0 ? src0->ne[2] : 0;
  419. const int64_t ne03 = src0 ? src0->ne[3] : 0;
  420. const uint64_t nb00 = src0 ? src0->nb[0] : 0;
  421. const uint64_t nb01 = src0 ? src0->nb[1] : 0;
  422. const uint64_t nb02 = src0 ? src0->nb[2] : 0;
  423. const uint64_t nb03 = src0 ? src0->nb[3] : 0;
  424. const int64_t ne10 = src1 ? src1->ne[0] : 0;
  425. const int64_t ne11 = src1 ? src1->ne[1] : 0;
  426. const int64_t ne12 = src1 ? src1->ne[2] : 0;
  427. const int64_t ne13 = src1 ? src1->ne[3] : 0; UNUSED(ne13);
  428. const uint64_t nb10 = src1 ? src1->nb[0] : 0;
  429. const uint64_t nb11 = src1 ? src1->nb[1] : 0;
  430. const uint64_t nb12 = src1 ? src1->nb[2] : 0;
  431. const uint64_t nb13 = src1 ? src1->nb[3] : 0; UNUSED(nb13);
  432. const int64_t ne0 = dst ? dst->ne[0] : 0;
  433. const int64_t ne1 = dst ? dst->ne[1] : 0;
  434. const int64_t ne2 = dst ? dst->ne[2] : 0;
  435. const int64_t ne3 = dst ? dst->ne[3] : 0;
  436. const uint64_t nb0 = dst ? dst->nb[0] : 0;
  437. const uint64_t nb1 = dst ? dst->nb[1] : 0;
  438. const uint64_t nb2 = dst ? dst->nb[2] : 0;
  439. const uint64_t nb3 = dst ? dst->nb[3] : 0;
  440. const enum ggml_type src0t = src0 ? src0->type : GGML_TYPE_COUNT;
  441. const enum ggml_type src1t = src1 ? src1->type : GGML_TYPE_COUNT;
  442. const enum ggml_type dstt = dst ? dst->type : GGML_TYPE_COUNT;
  443. id<MTLBuffer> id_src0 = src0 ? ggml_metal_get_buffer(ctx, src0, &offs_src0) : nil;
  444. id<MTLBuffer> id_src1 = src1 ? ggml_metal_get_buffer(ctx, src1, &offs_src1) : nil;
  445. id<MTLBuffer> id_dst = dst ? ggml_metal_get_buffer(ctx, dst, &offs_dst) : nil;
  446. //metal_printf("%s: op - %s\n", __func__, ggml_op_name(dst->op));
  447. //if (src0) {
  448. // metal_printf("%s: src0 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src0t), ne00, ne01, ne02,
  449. // ggml_is_contiguous(src0), src0->name);
  450. //}
  451. //if (src1) {
  452. // metal_printf("%s: src1 - %4s [%5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(src1t), ne10, ne11, ne12,
  453. // ggml_is_contiguous(src1), src1->name);
  454. //}
  455. //if (dst) {
  456. // metal_printf("%s: dst - %4s [%5lld, %5lld, %5lld], 1, %s\n", __func__, ggml_type_name(dstt), ne0, ne1, ne2,
  457. // dst->name);
  458. //}
  459. switch (dst->op) {
  460. case GGML_OP_NONE:
  461. case GGML_OP_RESHAPE:
  462. case GGML_OP_VIEW:
  463. case GGML_OP_TRANSPOSE:
  464. case GGML_OP_PERMUTE:
  465. {
  466. // noop
  467. } break;
  468. case GGML_OP_ADD:
  469. {
  470. if (encoder == nil) {
  471. encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
  472. }
  473. if (ggml_nelements(src1) == ne10) {
  474. // src1 is a row
  475. [encoder setComputePipelineState:ctx->pipeline_add_row];
  476. } else {
  477. [encoder setComputePipelineState:ctx->pipeline_add];
  478. }
  479. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  480. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  481. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  482. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  483. const int64_t n = ggml_nelements(dst);
  484. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  485. } break;
  486. case GGML_OP_MUL:
  487. {
  488. if (encoder == nil) {
  489. encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
  490. }
  491. if (ggml_nelements(src1) == ne10) {
  492. // src1 is a row
  493. [encoder setComputePipelineState:ctx->pipeline_mul_row];
  494. } else {
  495. [encoder setComputePipelineState:ctx->pipeline_mul];
  496. }
  497. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  498. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  499. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  500. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  501. const int64_t n = ggml_nelements(dst);
  502. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  503. } break;
  504. case GGML_OP_SCALE:
  505. {
  506. if (encoder == nil) {
  507. encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
  508. }
  509. const float scale = *(const float *) src1->data;
  510. [encoder setComputePipelineState:ctx->pipeline_scale];
  511. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  512. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  513. [encoder setBytes:&scale length:sizeof(scale) atIndex:2];
  514. const int64_t n = ggml_nelements(dst);
  515. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  516. } break;
  517. case GGML_OP_UNARY:
  518. switch (ggml_get_unary_op(gf->nodes[i])) {
  519. case GGML_UNARY_OP_SILU:
  520. {
  521. if (encoder == nil) {
  522. encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
  523. }
  524. [encoder setComputePipelineState:ctx->pipeline_silu];
  525. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  526. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  527. const int64_t n = ggml_nelements(dst);
  528. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  529. } break;
  530. case GGML_UNARY_OP_RELU:
  531. {
  532. if (encoder == nil) {
  533. encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
  534. }
  535. [encoder setComputePipelineState:ctx->pipeline_relu];
  536. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  537. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  538. const int64_t n = ggml_nelements(dst);
  539. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  540. } break;
  541. case GGML_UNARY_OP_GELU:
  542. {
  543. if (encoder == nil) {
  544. encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
  545. }
  546. [encoder setComputePipelineState:ctx->pipeline_gelu];
  547. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  548. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  549. const int64_t n = ggml_nelements(dst);
  550. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  551. } break;
  552. default:
  553. {
  554. fprintf(stderr, "%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
  555. GGML_ASSERT(false);
  556. }
  557. } break;
  558. case GGML_OP_SOFT_MAX:
  559. {
  560. if (encoder == nil) {
  561. encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
  562. }
  563. const int nth = 32;
  564. [encoder setComputePipelineState:ctx->pipeline_soft_max];
  565. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  566. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  567. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
  568. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
  569. [encoder setBytes:&ne02 length:sizeof(ne02) atIndex:4];
  570. [encoder setThreadgroupMemoryLength:nth*sizeof(float) atIndex:0];
  571. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  572. } break;
  573. case GGML_OP_DIAG_MASK_INF:
  574. {
  575. if (encoder == nil) {
  576. encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
  577. }
  578. const int n_past = ((int32_t *)(dst->op_params))[0];
  579. [encoder setComputePipelineState:ctx->pipeline_diag_mask_inf];
  580. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  581. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  582. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:2];
  583. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:3];
  584. [encoder setBytes:&n_past length:sizeof(int) atIndex:4];
  585. [encoder dispatchThreadgroups:MTLSizeMake(ne00, ne01, ne02) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  586. } break;
  587. case GGML_OP_MUL_MAT:
  588. {
  589. // TODO: needs to be updated after PR: https://github.com/ggerganov/ggml/pull/224
  590. GGML_ASSERT(ne00 == ne10);
  591. GGML_ASSERT(ne02 == ne12);
  592. if (ggml_is_contiguous(src0) &&
  593. ggml_is_contiguous(src1) &&
  594. (src0t == GGML_TYPE_F32 || src0t == GGML_TYPE_F16) && ne11 > 1) {
  595. if (encoder != nil) {
  596. [encoder endEncoding];
  597. encoder = nil;
  598. }
  599. MPSDataType src0dt = src0t == GGML_TYPE_F32 ? MPSDataTypeFloat32 : MPSDataTypeFloat16;
  600. MPSDataType src1dt = src1t == GGML_TYPE_F32 ? MPSDataTypeFloat32 : MPSDataTypeFloat16;
  601. // for F32 x F32 we use MPS
  602. MPSMatrixDescriptor * desc0 = [MPSMatrixDescriptor
  603. matrixDescriptorWithRows:ne01 columns:ne00 rowBytes:src0->nb[1] dataType:src0dt];
  604. MPSMatrixDescriptor * desc1 = [MPSMatrixDescriptor
  605. matrixDescriptorWithRows:ne11 columns:ne10 rowBytes:src1->nb[1] dataType:src1dt];
  606. MPSMatrixDescriptor * desc = [MPSMatrixDescriptor
  607. matrixDescriptorWithRows:ne1 columns:ne0 rowBytes:dst->nb[1] dataType:MPSDataTypeFloat32];
  608. MPSMatrixMultiplication * mul = [[MPSMatrixMultiplication alloc]
  609. initWithDevice:ctx->device transposeLeft:false transposeRight:true
  610. resultRows:ne11 resultColumns:ne01 interiorColumns:ne00 alpha:1.0 beta:0.0];
  611. // we need to do ne02 multiplications
  612. // TODO: is there a way to do this in parallel - currently very slow ..
  613. // TODO: might be possible to offload part of the computation to ANE using Accelerate's CBLAS
  614. for (int64_t i02 = 0; i02 < ne02; ++i02) {
  615. size_t offs_src0_cur = offs_src0 + i02*nb02;
  616. size_t offs_src1_cur = offs_src1 + i02*nb12;
  617. size_t offs_dst_cur = offs_dst + i02*nb2;
  618. MPSMatrix * mat_src0 = [[MPSMatrix alloc] initWithBuffer:id_src0 offset:offs_src0_cur descriptor:desc0];
  619. MPSMatrix * mat_src1 = [[MPSMatrix alloc] initWithBuffer:id_src1 offset:offs_src1_cur descriptor:desc1];
  620. MPSMatrix * mat_dst = [[MPSMatrix alloc] initWithBuffer:id_dst offset:offs_dst_cur descriptor:desc ];
  621. [mul encodeToCommandBuffer:command_buffer leftMatrix:mat_src1 rightMatrix:mat_src0 resultMatrix:mat_dst];
  622. }
  623. } else {
  624. if (encoder == nil) {
  625. encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
  626. }
  627. int nth0 = 32;
  628. int nth1 = 1;
  629. // use custom matrix x vector kernel
  630. switch (src0t) {
  631. case GGML_TYPE_F16:
  632. {
  633. GGML_ASSERT(ne02 == ne12);
  634. nth0 = 64;
  635. nth1 = 1;
  636. [encoder setComputePipelineState:ctx->pipeline_mul_mat_f16_f32];
  637. } break;
  638. case GGML_TYPE_Q4_0:
  639. {
  640. GGML_ASSERT(ne02 == 1);
  641. GGML_ASSERT(ne12 == 1);
  642. nth0 = 8;
  643. nth1 = 8;
  644. [encoder setComputePipelineState:ctx->pipeline_mul_mat_q4_0_f32];
  645. } break;
  646. case GGML_TYPE_Q4_1:
  647. {
  648. GGML_ASSERT(ne02 == 1);
  649. GGML_ASSERT(ne12 == 1);
  650. nth0 = 8;
  651. nth1 = 8;
  652. [encoder setComputePipelineState:ctx->pipeline_mul_mat_q4_1_f32];
  653. } break;
  654. case GGML_TYPE_Q2_K:
  655. {
  656. GGML_ASSERT(ne02 == 1);
  657. GGML_ASSERT(ne12 == 1);
  658. nth0 = 2;
  659. nth1 = 32;
  660. [encoder setComputePipelineState:ctx->pipeline_mul_mat_q2_K_f32];
  661. } break;
  662. case GGML_TYPE_Q3_K:
  663. {
  664. GGML_ASSERT(ne02 == 1);
  665. GGML_ASSERT(ne12 == 1);
  666. nth0 = 2;
  667. nth1 = 32;
  668. [encoder setComputePipelineState:ctx->pipeline_mul_mat_q3_K_f32];
  669. } break;
  670. case GGML_TYPE_Q4_K:
  671. {
  672. GGML_ASSERT(ne02 == 1);
  673. GGML_ASSERT(ne12 == 1);
  674. nth0 = 2;
  675. nth1 = 32;
  676. [encoder setComputePipelineState:ctx->pipeline_mul_mat_q4_K_f32];
  677. } break;
  678. case GGML_TYPE_Q5_K:
  679. {
  680. GGML_ASSERT(ne02 == 1);
  681. GGML_ASSERT(ne12 == 1);
  682. nth0 = 2;
  683. nth1 = 32;
  684. [encoder setComputePipelineState:ctx->pipeline_mul_mat_q5_K_f32];
  685. } break;
  686. case GGML_TYPE_Q6_K:
  687. {
  688. GGML_ASSERT(ne02 == 1);
  689. GGML_ASSERT(ne12 == 1);
  690. nth0 = 2;
  691. nth1 = 32;
  692. [encoder setComputePipelineState:ctx->pipeline_mul_mat_q6_K_f32];
  693. } break;
  694. default:
  695. {
  696. fprintf(stderr, "Asserting on type %d\n",(int)src0t);
  697. GGML_ASSERT(false && "not implemented");
  698. }
  699. };
  700. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  701. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  702. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  703. [encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
  704. [encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
  705. [encoder setBytes:&nb00 length:sizeof(nb00) atIndex:5];
  706. [encoder setBytes:&nb01 length:sizeof(nb01) atIndex:6];
  707. [encoder setBytes:&nb02 length:sizeof(nb02) atIndex:7];
  708. [encoder setBytes:&ne10 length:sizeof(ne10) atIndex:8];
  709. [encoder setBytes:&ne11 length:sizeof(ne11) atIndex:9];
  710. [encoder setBytes:&nb10 length:sizeof(nb10) atIndex:10];
  711. [encoder setBytes:&nb11 length:sizeof(nb11) atIndex:11];
  712. [encoder setBytes:&nb12 length:sizeof(nb12) atIndex:12];
  713. [encoder setBytes:&ne0 length:sizeof(ne0) atIndex:13];
  714. [encoder setBytes:&ne1 length:sizeof(ne1) atIndex:14];
  715. if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 ||
  716. src0t == GGML_TYPE_Q2_K || src0t == GGML_TYPE_Q4_K) {
  717. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7) / 8, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  718. }
  719. else if (src0t == GGML_TYPE_Q3_K) {
  720. #ifdef GGML_QKK_64
  721. [encoder dispatchThreadgroups:MTLSizeMake((ne01+1)/2, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  722. #else
  723. [encoder dispatchThreadgroups:MTLSizeMake((ne01+3)/4, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  724. #endif
  725. }
  726. else if (src0t == GGML_TYPE_Q5_K) {
  727. [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3) / 4, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  728. }
  729. else if (src0t == GGML_TYPE_Q6_K) {
  730. [encoder dispatchThreadgroups:MTLSizeMake((ne01+1)/2, ne11, 1) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  731. } else {
  732. [encoder setThreadgroupMemoryLength:nth0*sizeof(float) atIndex:0];
  733. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
  734. }
  735. }
  736. } break;
  737. case GGML_OP_GET_ROWS:
  738. {
  739. if (encoder == nil) {
  740. encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
  741. }
  742. switch (src0->type) {
  743. case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_get_rows_f16]; break;
  744. case GGML_TYPE_Q4_0: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_0]; break;
  745. case GGML_TYPE_Q4_1: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_1]; break;
  746. case GGML_TYPE_Q2_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q2_K]; break;
  747. case GGML_TYPE_Q3_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q3_K]; break;
  748. case GGML_TYPE_Q4_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q4_K]; break;
  749. case GGML_TYPE_Q5_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q5_K]; break;
  750. case GGML_TYPE_Q6_K: [encoder setComputePipelineState:ctx->pipeline_get_rows_q6_K]; break;
  751. default: GGML_ASSERT(false && "not implemented");
  752. }
  753. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  754. [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
  755. [encoder setBuffer:id_dst offset:offs_dst atIndex:2];
  756. [encoder setBytes:&(src0->ne[0]) length:sizeof( int64_t) atIndex:3];
  757. [encoder setBytes:&(src0->nb[1]) length:sizeof(uint64_t) atIndex:4];
  758. [encoder setBytes:&(dst->nb[1]) length:sizeof(uint64_t) atIndex:5];
  759. const int64_t n = ggml_nelements(src1);
  760. [encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  761. } break;
  762. case GGML_OP_RMS_NORM:
  763. {
  764. if (encoder == nil) {
  765. encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
  766. }
  767. float eps;
  768. memcpy(&eps, dst->op_params, sizeof(float));
  769. const int nth = 512;
  770. [encoder setComputePipelineState:ctx->pipeline_rms_norm];
  771. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  772. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  773. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  774. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3];
  775. [encoder setBytes:&eps length:sizeof( float) atIndex:4];
  776. [encoder setThreadgroupMemoryLength:nth/32*sizeof(float) atIndex:0];
  777. const int64_t nrows = ggml_nrows(src0);
  778. [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  779. } break;
  780. case GGML_OP_NORM:
  781. {
  782. if (encoder == nil) {
  783. encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
  784. }
  785. const float eps = 1e-5f;
  786. const int nth = 256;
  787. [encoder setComputePipelineState:ctx->pipeline_norm];
  788. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  789. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  790. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  791. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:3];
  792. [encoder setBytes:&eps length:sizeof( float) atIndex:4];
  793. [encoder setThreadgroupMemoryLength:nth*sizeof(float) atIndex:0];
  794. const int64_t nrows = ggml_nrows(src0);
  795. [encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  796. } break;
  797. case GGML_OP_ALIBI:
  798. {
  799. if (encoder == nil) {
  800. encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
  801. }
  802. GGML_ASSERT((src0t == GGML_TYPE_F32));
  803. const int n_past = ((int32_t *) dst->op_params)[0]; UNUSED(n_past);
  804. const int n_head = ((int32_t *) dst->op_params)[1];
  805. float max_bias;
  806. memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
  807. if (__builtin_popcount(n_head) != 1) {
  808. GGML_ASSERT(false && "only power-of-two n_head implemented");
  809. }
  810. const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
  811. const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
  812. [encoder setComputePipelineState:ctx->pipeline_alibi_f32];
  813. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  814. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  815. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  816. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  817. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  818. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
  819. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
  820. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
  821. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
  822. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
  823. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
  824. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
  825. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
  826. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
  827. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
  828. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
  829. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
  830. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
  831. [encoder setBytes:&m0 length:sizeof( float) atIndex:18];
  832. const int nth = 32;
  833. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  834. } break;
  835. case GGML_OP_ROPE:
  836. {
  837. if (encoder == nil) {
  838. encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
  839. }
  840. const int n_past = ((int32_t *) dst->op_params)[0];
  841. const int n_dims = ((int32_t *) dst->op_params)[1];
  842. const int mode = ((int32_t *) dst->op_params)[2];
  843. float freq_base;
  844. float freq_scale;
  845. memcpy(&freq_base, (int32_t *) dst->op_params + 4, sizeof(float));
  846. memcpy(&freq_scale, (int32_t *) dst->op_params + 5, sizeof(float));
  847. [encoder setComputePipelineState:ctx->pipeline_rope];
  848. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  849. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  850. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  851. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  852. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  853. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
  854. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
  855. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
  856. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
  857. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
  858. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
  859. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
  860. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
  861. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
  862. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
  863. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
  864. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
  865. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
  866. [encoder setBytes:&n_past length:sizeof( int) atIndex:18];
  867. [encoder setBytes:&n_dims length:sizeof( int) atIndex:19];
  868. [encoder setBytes:&mode length:sizeof( int) atIndex:20];
  869. [encoder setBytes:&freq_base length:sizeof(float) atIndex:21];
  870. [encoder setBytes:&freq_scale length:sizeof(float) atIndex:22];
  871. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
  872. } break;
  873. case GGML_OP_DUP:
  874. case GGML_OP_CPY:
  875. case GGML_OP_CONT:
  876. {
  877. if (encoder == nil) {
  878. encoder = [command_buffer computeCommandEncoderWithDescriptor: edesc];
  879. }
  880. const int nth = 32;
  881. switch (src0t) {
  882. case GGML_TYPE_F32:
  883. {
  884. switch (dstt) {
  885. case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_cpy_f32_f16]; break;
  886. case GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_cpy_f32_f32]; break;
  887. default: GGML_ASSERT(false && "not implemented");
  888. };
  889. } break;
  890. case GGML_TYPE_F16:
  891. {
  892. switch (dstt) {
  893. case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_cpy_f16_f16]; break;
  894. case GGML_TYPE_F32: GGML_ASSERT(false && "cpy_f16_f32 not implemented"); break;
  895. default: GGML_ASSERT(false && "not implemented");
  896. };
  897. } break;
  898. default: GGML_ASSERT(false && "not implemented");
  899. }
  900. [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
  901. [encoder setBuffer:id_dst offset:offs_dst atIndex:1];
  902. [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
  903. [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
  904. [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
  905. [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
  906. [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
  907. [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
  908. [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
  909. [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
  910. [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
  911. [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
  912. [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
  913. [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
  914. [encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
  915. [encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
  916. [encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
  917. [encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
  918. [encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
  919. } break;
  920. default:
  921. {
  922. fprintf(stderr, "%s: node %3d, op = %8s not implemented\n", __func__, i, ggml_op_name(dst->op));
  923. GGML_ASSERT(false);
  924. }
  925. }
  926. }
  927. if (encoder != nil) {
  928. [encoder endEncoding];
  929. encoder = nil;
  930. }
  931. [command_buffer commit];
  932. });
  933. }
  934. // wait for all threads to finish
  935. dispatch_barrier_sync(queue, ^{});
  936. [command_buffers[n_cb - 1] waitUntilCompleted];
  937. // check status of command buffers
  938. // needed to detect if the device ran out-of-memory for example (#1881)
  939. for (int i = 0; i < n_cb; i++) {
  940. MTLCommandBufferStatus status = (MTLCommandBufferStatus) [command_buffers[i] status];
  941. if (status != MTLCommandBufferStatusCompleted) {
  942. fprintf(stderr, "%s: command buffer %d failed with status %lu\n", __func__, i, status);
  943. GGML_ASSERT(false);
  944. }
  945. }
  946. }